mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
1246 Commits
v1.36
...
fuse-auto_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
87d64e7fb4 | ||
|
|
793f594b07 | ||
|
|
4fe6614ae1 | ||
|
|
4c2fbf9b36 | ||
|
|
ed4f1b2936 | ||
|
|
144c1a04d4 | ||
|
|
25ec7f5c00 | ||
|
|
b15603d5ea | ||
|
|
71c974bf9a | ||
|
|
03c5b8232e | ||
|
|
72392a2d72 | ||
|
|
b062ae9d13 | ||
|
|
8c0335a176 | ||
|
|
794e55de27 | ||
|
|
038ed1aaf0 | ||
|
|
97beff5370 | ||
|
|
b9b9bce0db | ||
|
|
947e10eb2b | ||
|
|
6b42421374 | ||
|
|
fa051ff970 | ||
|
|
69164b3dda | ||
|
|
935533e57f | ||
|
|
1550f70865 | ||
|
|
1a65c3a740 | ||
|
|
a29a1de43d | ||
|
|
e7ae5e8ee0 | ||
|
|
56e1e82005 | ||
|
|
8442498693 | ||
|
|
08021c4636 | ||
|
|
3f0789e2db | ||
|
|
7110349547 | ||
|
|
a9adb43896 | ||
|
|
c47a4c9703 | ||
|
|
d9d00a7dd7 | ||
|
|
b82e66daaa | ||
|
|
7d2861ead6 | ||
|
|
aaa8591661 | ||
|
|
4df1794932 | ||
|
|
d18928962c | ||
|
|
339fbf0df5 | ||
|
|
13ccb39819 | ||
|
|
f9a1a7e700 | ||
|
|
1c75581959 | ||
|
|
4d793b8ee8 | ||
|
|
9289aead9b | ||
|
|
ce109ed9c0 | ||
|
|
d7ac4ca44e | ||
|
|
1053d7e123 | ||
|
|
017297af70 | ||
|
|
4e8e5fed7d | ||
|
|
c0f772bc14 | ||
|
|
334ef28012 | ||
|
|
da45dadfe9 | ||
|
|
05edb5f501 | ||
|
|
04d18d2a07 | ||
|
|
f1269dc06a | ||
|
|
c5286ee157 | ||
|
|
ba43acb6aa | ||
|
|
8a84975993 | ||
|
|
d758e1908e | ||
|
|
737aed8412 | ||
|
|
4009fb67c8 | ||
|
|
3ef938ebde | ||
|
|
5302e5f9b1 | ||
|
|
de8c7d8e45 | ||
|
|
2a29f7f6c8 | ||
|
|
2b332bced2 | ||
|
|
aad75e6720 | ||
|
|
2a806a8d8b | ||
|
|
500085d244 | ||
|
|
3d8e529441 | ||
|
|
6607d8752c | ||
|
|
67e9ef4547 | ||
|
|
d4213c0ac5 | ||
|
|
3a2248aa5f | ||
|
|
573ef4c8ee | ||
|
|
7bf2d389a8 | ||
|
|
71b4f1ccab | ||
|
|
e5ff375948 | ||
|
|
512f4b4487 | ||
|
|
a38f8b87ce | ||
|
|
9697754707 | ||
|
|
8e625e0bc3 | ||
|
|
e52ecba295 | ||
|
|
e62d2fd309 | ||
|
|
e56be0dfd8 | ||
|
|
2a32e2d838 | ||
|
|
db4c206e0e | ||
|
|
f77efc7649 | ||
|
|
aadbcce486 | ||
|
|
f162116132 | ||
|
|
909c3a92d6 | ||
|
|
826975c341 | ||
|
|
6791cf7d7f | ||
|
|
d022c81d99 | ||
|
|
cdde8fa75a | ||
|
|
5ede6f6d09 | ||
|
|
53292527bb | ||
|
|
ec9894da07 | ||
|
|
ad02d1be3f | ||
|
|
63f413f477 | ||
|
|
f1ffe8e309 | ||
|
|
d85b9bc9d6 | ||
|
|
b07e51cf73 | ||
|
|
f073db81b1 | ||
|
|
9698a2babb | ||
|
|
5eecbd83ee | ||
|
|
e42edc8e8c | ||
|
|
291954baba | ||
|
|
9d8d7ae1f0 | ||
|
|
6ce32e4661 | ||
|
|
1755ffd1f3 | ||
|
|
aa5c5ec5d3 | ||
|
|
e80ae4e09c | ||
|
|
1320e84bc2 | ||
|
|
cb5bd47e61 | ||
|
|
790a8a9aed | ||
|
|
f1a43eca4d | ||
|
|
7ea68f1fc6 | ||
|
|
6427029c4e | ||
|
|
21383877df | ||
|
|
f95835d613 | ||
|
|
be79b47a7a | ||
|
|
be22735609 | ||
|
|
1b1b3c13cd | ||
|
|
5c128272fd | ||
|
|
d178233e74 | ||
|
|
98bf65c43b | ||
|
|
3b5e70c8c6 | ||
|
|
bd3ad1ac3e | ||
|
|
9fdf273614 | ||
|
|
fe25cb9c54 | ||
|
|
f2608e2a64 | ||
|
|
a5f1811892 | ||
|
|
50dc5fe92e | ||
|
|
b7d2048032 | ||
|
|
3116249692 | ||
|
|
d049e5c680 | ||
|
|
1c9572aba1 | ||
|
|
76f2cbeb94 | ||
|
|
0479c7dcf5 | ||
|
|
55674c0bfc | ||
|
|
e4c380b2a8 | ||
|
|
74cbdea0ef | ||
|
|
a3bf6b9c2c | ||
|
|
0daced29db | ||
|
|
b78af517de | ||
|
|
d8e88f10cd | ||
|
|
849db6699d | ||
|
|
a81ec00a8c | ||
|
|
da4a5e1fb3 | ||
|
|
ae562b5a4f | ||
|
|
c01177bc28 | ||
|
|
9f04ce282e | ||
|
|
764440068e | ||
|
|
a703216286 | ||
|
|
96a62d55a2 | ||
|
|
d0f32b62fd | ||
|
|
7c5f87842c | ||
|
|
cc8799e0d6 | ||
|
|
da214973a1 | ||
|
|
be8bd89674 | ||
|
|
9ab2521ef2 | ||
|
|
21a10e58c9 | ||
|
|
d36b80f587 | ||
|
|
24980d7123 | ||
|
|
870c58f7f8 | ||
|
|
b3c6f5f4b8 | ||
|
|
311a962011 | ||
|
|
da7a77ef2e | ||
|
|
9fbc40c5b9 | ||
|
|
56ce784301 | ||
|
|
8fe3037301 | ||
|
|
ba7ae2ee8c | ||
|
|
dc59836021 | ||
|
|
1a3fb21a77 | ||
|
|
bcdb7719c6 | ||
|
|
c51d97c752 | ||
|
|
57a5b72d60 | ||
|
|
34ba17deec | ||
|
|
e3a1bc9cd3 | ||
|
|
a35e62e15c | ||
|
|
d1ca8b8959 | ||
|
|
a0c65deca8 | ||
|
|
1f255a8567 | ||
|
|
f50b85278a | ||
|
|
9948b39dba | ||
|
|
2b855751fc | ||
|
|
ef3bcec76c | ||
|
|
1ac6dacf0f | ||
|
|
94e277d759 | ||
|
|
b83814082b | ||
|
|
2b7957cc74 | ||
|
|
3d5106e52b | ||
|
|
29ce1c2747 | ||
|
|
dc247d21ff | ||
|
|
8c3740c2c5 | ||
|
|
acd5d4377e | ||
|
|
9e4cd55477 | ||
|
|
2015f98f0c | ||
|
|
0e6faa2313 | ||
|
|
905e40b3e6 | ||
|
|
1db68571fd | ||
|
|
6b67489133 | ||
|
|
27dfcf303c | ||
|
|
e6d9720d7b | ||
|
|
196da4d903 | ||
|
|
18317a2747 | ||
|
|
ef412c1985 | ||
|
|
d97fe3b824 | ||
|
|
792c9e185e | ||
|
|
1f681e585b | ||
|
|
e82452ce9a | ||
|
|
dcf8334673 | ||
|
|
37be78705d | ||
|
|
4b5ff33125 | ||
|
|
d5b2ec32f1 | ||
|
|
aeedacfb50 | ||
|
|
92b266d361 | ||
|
|
05e32cfcf9 | ||
|
|
cbec59146a | ||
|
|
06e3fa3aba | ||
|
|
0fa700b3cf | ||
|
|
42f0963bf9 | ||
|
|
be54fd8f70 | ||
|
|
e5be471ce0 | ||
|
|
80588a5a6b | ||
|
|
67023f0040 | ||
|
|
32e02bd367 | ||
|
|
c749cf8d99 | ||
|
|
92cfb57fbd | ||
|
|
0cb5c4aa73 | ||
|
|
0358e9e724 | ||
|
|
a69d8ec93b | ||
|
|
92c5aa3786 | ||
|
|
fbe1c7f1ea | ||
|
|
c4531daa43 | ||
|
|
6e11a25df5 | ||
|
|
0865e38917 | ||
|
|
ab2fa59fc4 | ||
|
|
e13f65b953 | ||
|
|
5b8977a053 | ||
|
|
1dea99ab20 | ||
|
|
06a8d3011d | ||
|
|
e7fd607078 | ||
|
|
eca99b33c0 | ||
|
|
e42cee5e02 | ||
|
|
d45c750f76 | ||
|
|
2c2bb0f750 | ||
|
|
a8267d1628 | ||
|
|
9df266a6b4 | ||
|
|
4d553ef701 | ||
|
|
1ba3ffdc59 | ||
|
|
72f1b097a7 | ||
|
|
885044d0a5 | ||
|
|
6c10312c75 | ||
|
|
e5aa5fe7d8 | ||
|
|
9b140b42c9 | ||
|
|
0bfbde8856 | ||
|
|
98a924602f | ||
|
|
7e80e609e8 | ||
|
|
91b068ad3a | ||
|
|
b52e34ef5e | ||
|
|
32e6eee341 | ||
|
|
c5f1d501ed | ||
|
|
0ed0d9a7bc | ||
|
|
d9c13bff83 | ||
|
|
ce91289b09 | ||
|
|
5ba5be9b37 | ||
|
|
e9a2cbec37 | ||
|
|
4f6f07c074 | ||
|
|
f6020f1308 | ||
|
|
a46f2a9eb7 | ||
|
|
911a78ce6d | ||
|
|
d64789528d | ||
|
|
940df88eb2 | ||
|
|
19ca9fb939 | ||
|
|
26f1c55987 | ||
|
|
1afac32d80 | ||
|
|
26fbd00b4f | ||
|
|
1313b529ff | ||
|
|
82e835d6fc | ||
|
|
fa867a9a4c | ||
|
|
38d9475a34 | ||
|
|
c21c7e75b0 | ||
|
|
c8d095612a | ||
|
|
012d4a1235 | ||
|
|
854d3c3025 | ||
|
|
5bedc4c668 | ||
|
|
86892467d9 | ||
|
|
e62fe06763 | ||
|
|
4295428a0f | ||
|
|
2db0c4dd95 | ||
|
|
5bf639048f | ||
|
|
4924ac2f17 | ||
|
|
d4cca8d9f9 | ||
|
|
a9e386b153 | ||
|
|
117238211b | ||
|
|
645cf5ec0f | ||
|
|
d1bb8efb88 | ||
|
|
c19e675ca6 | ||
|
|
34c45a7c04 | ||
|
|
0a0318df20 | ||
|
|
04e055fc06 | ||
|
|
d551137635 | ||
|
|
aba43cd3a4 | ||
|
|
7f744033d8 | ||
|
|
078d705dbe | ||
|
|
5981f9fab5 | ||
|
|
84776c4e43 | ||
|
|
c1a3e363a6 | ||
|
|
7ccc6080b0 | ||
|
|
677971643c | ||
|
|
f4a1c1163c | ||
|
|
97b48cf988 | ||
|
|
86e5a35491 | ||
|
|
8bb2854fe4 | ||
|
|
d76da1f5fd | ||
|
|
89748feaa5 | ||
|
|
dfd0f4c5a4 | ||
|
|
0c9dc006c5 | ||
|
|
4e90ad04d5 | ||
|
|
43c7ea81df | ||
|
|
fa003e89b6 | ||
|
|
5114b11d6f | ||
|
|
f832433fa5 | ||
|
|
d073efdc6c | ||
|
|
9e48748182 | ||
|
|
b6058e0106 | ||
|
|
66c69fe620 | ||
|
|
a2336ad774 | ||
|
|
7713acf23d | ||
|
|
473a388f6d | ||
|
|
c8a4d437a0 | ||
|
|
09c14af6d1 | ||
|
|
acae10cd6f | ||
|
|
0861207ace | ||
|
|
a7dbf32c53 | ||
|
|
6025bb6ad1 | ||
|
|
70f07fd3ac | ||
|
|
b3f55d6bda | ||
|
|
d9094f1a45 | ||
|
|
572ee5ec96 | ||
|
|
316dac25c2 | ||
|
|
ee3c45676f | ||
|
|
2e7e15461b | ||
|
|
0175332987 | ||
|
|
85e0b87c99 | ||
|
|
d41017a277 | ||
|
|
fc32fee4ad | ||
|
|
5795bd7db6 | ||
|
|
9b011ce7e4 | ||
|
|
5e334eedd2 | ||
|
|
7fb53a031c | ||
|
|
ebfeec9fb4 | ||
|
|
90af7af9a3 | ||
|
|
fe8eeec5b5 | ||
|
|
e0eb666dbf | ||
|
|
7d4da1c66a | ||
|
|
f3e982d3bf | ||
|
|
3f9d0d3baf | ||
|
|
e9fd2250eb | ||
|
|
769aa860f2 | ||
|
|
fdebf9da31 | ||
|
|
77f344a69d | ||
|
|
62540b4007 | ||
|
|
21faac6e6c | ||
|
|
167a4396c7 | ||
|
|
1585aa61c1 | ||
|
|
b91bd32489 | ||
|
|
c3d0f68923 | ||
|
|
f57e92b9a5 | ||
|
|
baf9ee5cf7 | ||
|
|
354f1ad722 | ||
|
|
54deb01f00 | ||
|
|
3282fd26af | ||
|
|
88d830c7b7 | ||
|
|
724120d2f3 | ||
|
|
25bbc5d22b | ||
|
|
00adf40f9f | ||
|
|
aeefa34f62 | ||
|
|
9252224d82 | ||
|
|
1383df4f58 | ||
|
|
0ce81f68fe | ||
|
|
20ca7d0e4f | ||
|
|
4c3d42bcbb | ||
|
|
2ef8de0843 | ||
|
|
a70200dd29 | ||
|
|
c99412d11e | ||
|
|
abc736df1d | ||
|
|
ab0d06eb16 | ||
|
|
9ffc3898b1 | ||
|
|
afc963ed92 | ||
|
|
c929de9dc4 | ||
|
|
451cd6d971 | ||
|
|
a647c54888 | ||
|
|
334bf49d30 | ||
|
|
d8f78a7266 | ||
|
|
62e72801be | ||
|
|
358c1fbac9 | ||
|
|
cc9d7156e4 | ||
|
|
221a8a9c5d | ||
|
|
2b6f7028a6 | ||
|
|
5530662ccc | ||
|
|
442334ba61 | ||
|
|
70b4842823 | ||
|
|
2f63a9f81c | ||
|
|
8a9ed57951 | ||
|
|
a5c3bcc9c7 | ||
|
|
9b800d7184 | ||
|
|
b1945d0094 | ||
|
|
9a34fd984c | ||
|
|
644313a4b9 | ||
|
|
675e7c5d8e | ||
|
|
99f3c8bc93 | ||
|
|
ff6a7142da | ||
|
|
691c725e8b | ||
|
|
ee388c4331 | ||
|
|
771fbbe314 | ||
|
|
ab8c0a81fa | ||
|
|
cd7fd51119 | ||
|
|
0f787e43b0 | ||
|
|
3a7bb7b2df | ||
|
|
54724a1362 | ||
|
|
846bbef1e9 | ||
|
|
b33e3f779c | ||
|
|
8a25ca786c | ||
|
|
04a0a7406b | ||
|
|
9a653fea10 | ||
|
|
b183bd7f00 | ||
|
|
5055b340da | ||
|
|
6546b7e0b0 | ||
|
|
f4a5489d19 | ||
|
|
82418c3021 | ||
|
|
bf6101cb6c | ||
|
|
5723d2dbff | ||
|
|
d0d6b83a7a | ||
|
|
bea02fcf52 | ||
|
|
8722403b0d | ||
|
|
9aa8815990 | ||
|
|
6fb868e00c | ||
|
|
2f746426e7 | ||
|
|
4c1ffc7f54 | ||
|
|
1018e9bb27 | ||
|
|
295c3fabec | ||
|
|
3f8d286a75 | ||
|
|
fc8641809e | ||
|
|
de35f1c165 | ||
|
|
2974efc7d6 | ||
|
|
a6227f34e2 | ||
|
|
3c7a755631 | ||
|
|
8df78f2b6d | ||
|
|
44276db454 | ||
|
|
2eb5cfb7ad | ||
|
|
b3d8b7e22e | ||
|
|
ed2d4ef4a2 | ||
|
|
11fe3fdc16 | ||
|
|
cf6d522d2f | ||
|
|
29d428040c | ||
|
|
1aa482c333 | ||
|
|
40af98b0b3 | ||
|
|
c277a4096c | ||
|
|
1852a0e0c9 | ||
|
|
44cedbd9d9 | ||
|
|
540e00e938 | ||
|
|
a4fe2455ed | ||
|
|
f622017539 | ||
|
|
07f20dd1fd | ||
|
|
fe52502f19 | ||
|
|
9a73688e3a | ||
|
|
bc3ee977f4 | ||
|
|
a69fc8b80d | ||
|
|
926cd52a7f | ||
|
|
c2ce3114f4 | ||
|
|
29286cc8b3 | ||
|
|
1f5e23aedb | ||
|
|
d016438243 | ||
|
|
fa500e6d21 | ||
|
|
dbabb18b0c | ||
|
|
6f6f2aa369 | ||
|
|
17dabf7a99 | ||
|
|
9520992a54 | ||
|
|
a3dd2c691e | ||
|
|
38f829842a | ||
|
|
f9806848fe | ||
|
|
88e0770f2d | ||
|
|
a6833b68ca | ||
|
|
e44dc2b14d | ||
|
|
d876392d15 | ||
|
|
c098e25552 | ||
|
|
186f78d44f | ||
|
|
ea69deaa4c | ||
|
|
c963c74fbe | ||
|
|
9c45125271 | ||
|
|
8653944a6d | ||
|
|
84bc4dc142 | ||
|
|
84d00e9046 | ||
|
|
71bc108ce6 | ||
|
|
e57a388851 | ||
|
|
bfa2878d24 | ||
|
|
dcdb43eb07 | ||
|
|
115d24e1f7 | ||
|
|
62b74d06ff | ||
|
|
7117ba7d58 | ||
|
|
5e73acd40a | ||
|
|
25a41e1945 | ||
|
|
ee66419a27 | ||
|
|
8e86a902e2 | ||
|
|
a80d8a21dc | ||
|
|
517bdc719b | ||
|
|
5ad226ab54 | ||
|
|
a375992186 | ||
|
|
b96c73bee6 | ||
|
|
97c414f025 | ||
|
|
71722b5b95 | ||
|
|
59a8108fc3 | ||
|
|
821be5ebed | ||
|
|
2030dc13b2 | ||
|
|
5cce74d630 | ||
|
|
acd55a8f65 | ||
|
|
ad76dd0adc | ||
|
|
8c90bfb0cd | ||
|
|
4b0c5f79b5 | ||
|
|
1848e26183 | ||
|
|
7d3a17725d | ||
|
|
8e83fb6fb9 | ||
|
|
11da2a6c9b | ||
|
|
92624bbbf1 | ||
|
|
60afda007b | ||
|
|
b8b620f5c2 | ||
|
|
0a7731cf0d | ||
|
|
6cac98d2ce | ||
|
|
712e6a8085 | ||
|
|
6d333da69f | ||
|
|
5c7e8d5a2b | ||
|
|
57f1bb7bb2 | ||
|
|
5e83dce1f6 | ||
|
|
052c886317 | ||
|
|
28480c0570 | ||
|
|
72349bdaae | ||
|
|
36e6d23112 | ||
|
|
0eba37d8f3 | ||
|
|
c74c3b37da | ||
|
|
7c71ee1a5b | ||
|
|
ed20fa5ee7 | ||
|
|
54a9fdf421 | ||
|
|
0d041602cf | ||
|
|
8f47d7fc06 | ||
|
|
4dd1e507f4 | ||
|
|
65618afd8c | ||
|
|
be4ed14525 | ||
|
|
ef89f1f1a7 | ||
|
|
b412c745a1 | ||
|
|
f34a9116d4 | ||
|
|
64ea94c1a4 | ||
|
|
4eac50eb83 | ||
|
|
5683f74025 | ||
|
|
fe71d4fd87 | ||
|
|
a64d92bd35 | ||
|
|
c5cf0792f2 | ||
|
|
255d3e925d | ||
|
|
0d4bff8239 | ||
|
|
4ba58884b1 | ||
|
|
8839e4ee33 | ||
|
|
ebbe77f525 | ||
|
|
6f1ae00c7f | ||
|
|
6b5989712f | ||
|
|
29d34426bc | ||
|
|
2a01fa9fa0 | ||
|
|
4c0e2f9b3b | ||
|
|
240c97cd7a | ||
|
|
2fd0bec4e4 | ||
|
|
7e585cda96 | ||
|
|
1b1593a894 | ||
|
|
9c242edc10 | ||
|
|
0914ec316c | ||
|
|
2cf808c825 | ||
|
|
66558213e0 | ||
|
|
84701e376a | ||
|
|
829dd1ad25 | ||
|
|
7c972d375b | ||
|
|
3d2f3d9a7f | ||
|
|
845b22a628 | ||
|
|
3684585104 | ||
|
|
f424019380 | ||
|
|
ab03f6e475 | ||
|
|
b48b537325 | ||
|
|
b05e472d2e | ||
|
|
5061aaaf46 | ||
|
|
e00616b016 | ||
|
|
09f203f62b | ||
|
|
2965cbe264 | ||
|
|
bb3ba7b314 | ||
|
|
f12512dd13 | ||
|
|
25b073c767 | ||
|
|
ebd7780188 | ||
|
|
fa4a25a73b | ||
|
|
934df67aef | ||
|
|
006b296c34 | ||
|
|
38b85e94ea | ||
|
|
4b185355df | ||
|
|
7d15c33e42 | ||
|
|
11332a19a0 | ||
|
|
a1f8318b29 | ||
|
|
e767c9ac9f | ||
|
|
56cfb810a8 | ||
|
|
835ca15ec8 | ||
|
|
4af4bbb539 | ||
|
|
47450ba326 | ||
|
|
639e812789 | ||
|
|
1c6cad2252 | ||
|
|
6d3df6f172 | ||
|
|
c16ac697a9 | ||
|
|
0978957a2e | ||
|
|
d1b19f975d | ||
|
|
aab8051f50 | ||
|
|
1248beb0b2 | ||
|
|
6448c445f5 | ||
|
|
fdb01437d8 | ||
|
|
729e1305b7 | ||
|
|
02ffd43572 | ||
|
|
e53892f53b | ||
|
|
6c62fced60 | ||
|
|
c64ad851af | ||
|
|
4c116af1d0 | ||
|
|
8357a82eee | ||
|
|
483f4b8ad9 | ||
|
|
6f61da5c75 | ||
|
|
159fce0106 | ||
|
|
569c1a2ec1 | ||
|
|
2497ca5134 | ||
|
|
cbe5d7ce64 | ||
|
|
1a65a4e769 | ||
|
|
bcf1ece43b | ||
|
|
b4aa920a3d | ||
|
|
41a97e39c8 | ||
|
|
abbcb2f5e0 | ||
|
|
cb6de4a2cf | ||
|
|
dc1c679c65 | ||
|
|
3fb4fe31d2 | ||
|
|
76b151984c | ||
|
|
f0ed384786 | ||
|
|
f80f7a0509 | ||
|
|
af50f31f7d | ||
|
|
8e2213fbbd | ||
|
|
085c690798 | ||
|
|
2b666187a6 | ||
|
|
00b46a8b96 | ||
|
|
b21f227bd3 | ||
|
|
e98e550021 | ||
|
|
60945d0a37 | ||
|
|
b4083b4371 | ||
|
|
eb3415db50 | ||
|
|
9fbd8a6419 | ||
|
|
9738f8532b | ||
|
|
a5b034a992 | ||
|
|
321b6da7af | ||
|
|
1b22ee5b93 | ||
|
|
eab55ce882 | ||
|
|
61b6159a05 | ||
|
|
c560017934 | ||
|
|
7c3584f4e6 | ||
|
|
981cfb1bec | ||
|
|
992647b157 | ||
|
|
dec21ccf63 | ||
|
|
94adf4f43b | ||
|
|
e7f2935333 | ||
|
|
f5f8c0c438 | ||
|
|
60cdcf784c | ||
|
|
57a5c67729 | ||
|
|
d7908c06c9 | ||
|
|
8951875c21 | ||
|
|
05a1e1532b | ||
|
|
7f20e1d7f3 | ||
|
|
bb0ce0cb5f | ||
|
|
e946a8eab0 | ||
|
|
a0cfa0929b | ||
|
|
3fb1e96988 | ||
|
|
46947b3b9b | ||
|
|
de98e2480d | ||
|
|
3cf7c61aa0 | ||
|
|
d8b3bf014d | ||
|
|
0bfa29cbcf | ||
|
|
6cc968b085 | ||
|
|
ce5b3a531d | ||
|
|
5acb6f47e7 | ||
|
|
409ba56fde | ||
|
|
5d875e8840 | ||
|
|
429bb7e8b8 | ||
|
|
7d3abdc463 | ||
|
|
538246f6c3 | ||
|
|
557dd8f031 | ||
|
|
37aaa19f3a | ||
|
|
cef2e3bf83 | ||
|
|
a3a436ce16 | ||
|
|
5d05df3124 | ||
|
|
421ba84e12 | ||
|
|
7ae7080824 | ||
|
|
31d2fb4e11 | ||
|
|
704e82aab1 | ||
|
|
fc352c1ff6 | ||
|
|
e491093cd1 | ||
|
|
016abf825e | ||
|
|
0c942199c9 | ||
|
|
aec2265be0 | ||
|
|
2423fa40e2 | ||
|
|
4355f3fe97 | ||
|
|
9fbff7bcab | ||
|
|
413faa99cf | ||
|
|
ed91d6b5a5 | ||
|
|
c65734ee69 | ||
|
|
8c8abfd6dc | ||
|
|
dfaee55ef3 | ||
|
|
72072d7d6b | ||
|
|
f1287e13f7 | ||
|
|
7749157596 | ||
|
|
682b4d54c5 | ||
|
|
245edd1b0e | ||
|
|
4d081ec87e | ||
|
|
a8dfc5ce3b | ||
|
|
68d0b5adbb | ||
|
|
c4ad3ac94c | ||
|
|
16e16bc220 | ||
|
|
73dfa21ba3 | ||
|
|
c31556c6d1 | ||
|
|
2083ac6e2a | ||
|
|
22ee839d05 | ||
|
|
5634659ea3 | ||
|
|
e18122e88b | ||
|
|
07ec8073fe | ||
|
|
8184ec4b70 | ||
|
|
190367d917 | ||
|
|
a5dc62f6c1 | ||
|
|
3e0c91ba4b | ||
|
|
7e065440fb | ||
|
|
e8883e9fdb | ||
|
|
1a8f824bad | ||
|
|
c1aaff220d | ||
|
|
6da6b2556b | ||
|
|
ca19fd2d7e | ||
|
|
2fac74b517 | ||
|
|
8b6daaa877 | ||
|
|
3af9d63261 | ||
|
|
c6cd2a5280 | ||
|
|
0bb84efe75 | ||
|
|
3ec15ac2bd | ||
|
|
750690503e | ||
|
|
54950d3423 | ||
|
|
014aa3d157 | ||
|
|
cc7ed13b9b | ||
|
|
6552581a17 | ||
|
|
f60e2a7aac | ||
|
|
cacae8d12d | ||
|
|
4a1013f2de | ||
|
|
d0b9baab13 | ||
|
|
96665c16cb | ||
|
|
39b9f80302 | ||
|
|
1602a3a055 | ||
|
|
fafaea7edc | ||
|
|
e6fb96cfd4 | ||
|
|
e612673ea0 | ||
|
|
fd2406f94e | ||
|
|
cd146415d1 | ||
|
|
2740c965c0 | ||
|
|
6669165b6b | ||
|
|
a06bcd4c57 | ||
|
|
6df1f6fad1 | ||
|
|
683befaec1 | ||
|
|
10f27e2ff2 | ||
|
|
d121a94c20 | ||
|
|
567071750b | ||
|
|
115053930e | ||
|
|
ef1346602e | ||
|
|
9417194751 | ||
|
|
69ba806528 | ||
|
|
ae9d58d625 | ||
|
|
d6bab0169f | ||
|
|
d7dd6f3814 | ||
|
|
edfab09eb9 | ||
|
|
0575623dff | ||
|
|
fc8b13c993 | ||
|
|
b531bf1349 | ||
|
|
43ced30f11 | ||
|
|
106bc1c9fc | ||
|
|
f64ee433b7 | ||
|
|
3eb7f52e39 | ||
|
|
7f3dc9b5c4 | ||
|
|
bcdd79320b | ||
|
|
2453abfbea | ||
|
|
efd88c5676 | ||
|
|
4966611866 | ||
|
|
00fe6d95da | ||
|
|
b7521c0fe2 | ||
|
|
a1d942e5c3 | ||
|
|
9e9297838f | ||
|
|
6403242f48 | ||
|
|
737cf3d957 | ||
|
|
8f2f480628 | ||
|
|
a5e0115b19 | ||
|
|
63d0734c71 | ||
|
|
b017fcfe9a | ||
|
|
911d121bb9 | ||
|
|
1c10497b68 | ||
|
|
d96e45ba5b | ||
|
|
657b3a674d | ||
|
|
5177d8c854 | ||
|
|
b2b989434d | ||
|
|
3e9861eecf | ||
|
|
3fc69f4140 | ||
|
|
b1e85c7ceb | ||
|
|
1d994f7330 | ||
|
|
0e76e35b6f | ||
|
|
29e2744155 | ||
|
|
6390bb2b09 | ||
|
|
6f2a6dfbc5 | ||
|
|
b6684ea4f5 | ||
|
|
2857ed5c35 | ||
|
|
8771d352d4 | ||
|
|
748c9f5cb7 | ||
|
|
646a419453 | ||
|
|
c98dfa2556 | ||
|
|
7195e44dce | ||
|
|
c9e2739500 | ||
|
|
2d8e75cab4 | ||
|
|
5a3a56abd8 | ||
|
|
7b89a5f656 | ||
|
|
a4396ebe0f | ||
|
|
85877f3adc | ||
|
|
87335de8a8 | ||
|
|
12405f9f41 | ||
|
|
168b0a0ecb | ||
|
|
234bfae0d5 | ||
|
|
4ac9a65049 | ||
|
|
a8e41f081c | ||
|
|
261c7ad9e4 | ||
|
|
fe96d5cf0a | ||
|
|
8574129892 | ||
|
|
6df12b3f00 | ||
|
|
7f8d306c9c | ||
|
|
9d3f11b493 | ||
|
|
38cc211762 | ||
|
|
e0eabc75c0 | ||
|
|
798502b204 | ||
|
|
9d22f4208f | ||
|
|
56dedc49e3 | ||
|
|
2f0551074c | ||
|
|
d6eb625815 | ||
|
|
4c45cbea18 | ||
|
|
897690d997 | ||
|
|
5a1351f141 | ||
|
|
c22be38747 | ||
|
|
f91f89d409 | ||
|
|
113f43ec42 | ||
|
|
7ef18b6b35 | ||
|
|
a91448c83a | ||
|
|
80b1f2a494 | ||
|
|
57817397a0 | ||
|
|
10fa2a7806 | ||
|
|
9a62d2f8ad | ||
|
|
49816e67bd | ||
|
|
fe536f3fa8 | ||
|
|
c54d513bdd | ||
|
|
dd975ab00d | ||
|
|
2944f7603d | ||
|
|
58f7b4ed7c | ||
|
|
cbea06026a | ||
|
|
8207af9460 | ||
|
|
921fcc0723 | ||
|
|
445fc55772 | ||
|
|
09fbbdbb04 | ||
|
|
4b0e983323 | ||
|
|
ee9f987234 | ||
|
|
f407e3da55 | ||
|
|
f1f7e0e6f9 | ||
|
|
7e93567b18 | ||
|
|
2c8d6e86cc | ||
|
|
bb6300b032 | ||
|
|
e96c5b5f39 | ||
|
|
672c410235 | ||
|
|
459cf64403 | ||
|
|
0158ab6926 | ||
|
|
4e189fe6e7 | ||
|
|
b78ecb1568 | ||
|
|
a122b9fa7a | ||
|
|
323daae63e | ||
|
|
e754f50778 | ||
|
|
034cf22d4d | ||
|
|
2cc9071791 | ||
|
|
b510c70c1e | ||
|
|
001431d326 | ||
|
|
e64435a5c1 | ||
|
|
9c47b767b4 | ||
|
|
2870874329 | ||
|
|
d54fca4e58 | ||
|
|
dcbf538416 | ||
|
|
5b79922b5e | ||
|
|
41b2645dec | ||
|
|
76226e0147 | ||
|
|
76c5aa8533 | ||
|
|
265fb8a5e2 | ||
|
|
8a1a900733 | ||
|
|
20ae7d562b | ||
|
|
c1bfdd893f | ||
|
|
ec2ea37ad2 | ||
|
|
bced73c947 | ||
|
|
5b6585f57d | ||
|
|
c6b844977a | ||
|
|
47eab397ba | ||
|
|
bfe812ea6b | ||
|
|
db1995e63a | ||
|
|
81a2ab599f | ||
|
|
74687c25f5 | ||
|
|
d025066fae | ||
|
|
80ce569874 | ||
|
|
ee13ea74f1 | ||
|
|
40f24e0ea3 | ||
|
|
b523cfc01d | ||
|
|
38dabcf6b2 | ||
|
|
ee6a35d750 | ||
|
|
92d2e1f8d7 | ||
|
|
98d238daa4 | ||
|
|
036fd61a50 | ||
|
|
91cfcc21ff | ||
|
|
132f71d504 | ||
|
|
861e125a4f | ||
|
|
230e65313a | ||
|
|
8a185deefa | ||
|
|
7b9557df90 | ||
|
|
ec5b72f8d5 | ||
|
|
466dd22b44 | ||
|
|
f682002b84 | ||
|
|
7d34caac83 | ||
|
|
28a18303f3 | ||
|
|
3e3a59768e | ||
|
|
d4b9bb9894 | ||
|
|
e01741b557 | ||
|
|
7ec24ad67a | ||
|
|
eff10bbc1d | ||
|
|
73f7278497 | ||
|
|
6d59887487 | ||
|
|
21aca68680 | ||
|
|
214f5e6411 | ||
|
|
2b5ce6ef51 | ||
|
|
b0fd187cba | ||
|
|
c3cd247d4b | ||
|
|
5d911e9450 | ||
|
|
a56d51c594 | ||
|
|
ef328c5497 | ||
|
|
49e4cdb8b9 | ||
|
|
ee52365e88 | ||
|
|
f3060caf04 | ||
|
|
bfef0bc2e9 | ||
|
|
da9926d574 | ||
|
|
ebc8361933 | ||
|
|
71fe046937 | ||
|
|
d5ff7104e5 | ||
|
|
cd4895690a | ||
|
|
1ecf2bcbd5 | ||
|
|
c3d6cc91ec | ||
|
|
6fce1ac267 | ||
|
|
9f24639568 | ||
|
|
8b30023f0d | ||
|
|
c507836617 | ||
|
|
6152bab28d | ||
|
|
6ae29df4d7 | ||
|
|
de54fd4c64 | ||
|
|
859721f3cf | ||
|
|
d134d78979 | ||
|
|
7b81f12dad | ||
|
|
d279161cee | ||
|
|
b5bf819256 | ||
|
|
384724fd11 | ||
|
|
5f70746d39 | ||
|
|
088806ba4c | ||
|
|
45ba4ed594 | ||
|
|
edfa1b3a69 | ||
|
|
db6009126d | ||
|
|
5255cbf5e3 | ||
|
|
eb87cf6f12 | ||
|
|
0b6fba34a3 | ||
|
|
c8b5ee1e54 | ||
|
|
a73ecec11f | ||
|
|
c223464cd0 | ||
|
|
39d09c04a2 | ||
|
|
db5494b316 | ||
|
|
c3dab09a94 | ||
|
|
3ddcbce989 | ||
|
|
0cf19ef66a | ||
|
|
655891170f | ||
|
|
93423a0812 | ||
|
|
78f33f5d6e | ||
|
|
209b7da3b2 | ||
|
|
6f71260acf | ||
|
|
ec6c3f2686 | ||
|
|
62e28d0a72 | ||
|
|
470642f2b7 | ||
|
|
b5002eb6a4 | ||
|
|
ee5698b3a9 | ||
|
|
728ff231ab | ||
|
|
542f938ce2 | ||
|
|
e24d0ac94d | ||
|
|
da2e2544ee | ||
|
|
72add5ab27 | ||
|
|
9ac72ee53f | ||
|
|
c3dac2e385 | ||
|
|
92294a4a92 | ||
|
|
69ff009264 | ||
|
|
27b157580e | ||
|
|
3f288bc9ea | ||
|
|
ce1b9a7daf | ||
|
|
f0512d1a52 | ||
|
|
51866fbd34 | ||
|
|
ee13bc6775 | ||
|
|
e86f62c3e8 | ||
|
|
6c3bf629a1 | ||
|
|
575e779b55 | ||
|
|
dc56ad9816 | ||
|
|
e7d04fc103 | ||
|
|
e2d7d413ef | ||
|
|
e7e9aa0dfa | ||
|
|
f88300a153 | ||
|
|
e54087ece1 | ||
|
|
54561fd2bc | ||
|
|
479c5a514a | ||
|
|
f3c7e1a9dd | ||
|
|
70b5b2f5c6 | ||
|
|
d7811f72ad | ||
|
|
aa20486485 | ||
|
|
33f302a06b | ||
|
|
24cb739d1f | ||
|
|
f0abd6173d | ||
|
|
1817d8f631 | ||
|
|
a308ad5bd7 | ||
|
|
b360527931 | ||
|
|
52b042971a | ||
|
|
2d2778eabf | ||
|
|
d55f8f0492 | ||
|
|
b44d0ea088 | ||
|
|
d981456ddc | ||
|
|
b22c4c4307 | ||
|
|
afc8cc550a | ||
|
|
83b642e98f | ||
|
|
d5d635b7f3 | ||
|
|
6b89e6c381 | ||
|
|
be0dd09801 | ||
|
|
b76cd4abd2 | ||
|
|
0dbf1230bc | ||
|
|
4fd9570332 | ||
|
|
8d77e48190 | ||
|
|
dcce65b2b3 | ||
|
|
4ce31555b2 | ||
|
|
5ed4bc97f3 | ||
|
|
54e37be591 | ||
|
|
eaa717b88a | ||
|
|
bbbc202ee6 | ||
|
|
97364fd0b6 | ||
|
|
c34f11a92f | ||
|
|
e31fc877e2 | ||
|
|
e069fc439e | ||
|
|
5250fcdf08 | ||
|
|
9876ba53f8 | ||
|
|
64662bef8d | ||
|
|
0b8d9084fc | ||
|
|
7be49249d3 | ||
|
|
8a6a8b9623 | ||
|
|
6fc88ff32e | ||
|
|
50928a5027 | ||
|
|
3a431056e2 | ||
|
|
53c3e5f0ab | ||
|
|
0edb025257 | ||
|
|
fded4dbea2 | ||
|
|
7e20e16cff | ||
|
|
1e88f0702a | ||
|
|
68333d34a1 | ||
|
|
740b3f6ae2 | ||
|
|
28fcc53e45 | ||
|
|
2ca477c57f | ||
|
|
9a11d3efd9 | ||
|
|
10d5377ed8 | ||
|
|
ee14efd3c2 | ||
|
|
b4be7d65a6 | ||
|
|
52e1bfae2a | ||
|
|
9c1e703777 | ||
|
|
b49821956a | ||
|
|
a61ba1e7c4 | ||
|
|
d30cc1e119 | ||
|
|
74a3dfc4e1 | ||
|
|
3fe9448229 | ||
|
|
a5cfdfd233 | ||
|
|
bdc19b7c8a | ||
|
|
e92cc8fe2b | ||
|
|
6ee4c62cae | ||
|
|
b047402294 | ||
|
|
7693cecd17 | ||
|
|
558f014d43 | ||
|
|
48508cb5b7 | ||
|
|
44c98e8654 | ||
|
|
9782c264e9 | ||
|
|
9cede6b372 | ||
|
|
decd960867 | ||
|
|
71028e0f06 | ||
|
|
52e96bc0e2 | ||
|
|
178ff62d6a | ||
|
|
9d335eb5cb | ||
|
|
20da3e6352 | ||
|
|
6381959850 | ||
|
|
8916455e4f | ||
|
|
8e214e838e | ||
|
|
23acd3ce01 | ||
|
|
a2e3af0523 | ||
|
|
5455d34f8c | ||
|
|
84512ac77d | ||
|
|
1ec0327ed7 | ||
|
|
0f07b63fd1 | ||
|
|
88ef475629 | ||
|
|
ade61fa756 | ||
|
|
cfc5f7bb2d | ||
|
|
ae9f8304fa | ||
|
|
55755a8e5b | ||
|
|
080050fac2 | ||
|
|
a243ea6353 | ||
|
|
51d2174c0b | ||
|
|
e75db0b14d | ||
|
|
c59a292719 | ||
|
|
be5b8b8dff | ||
|
|
525220b14e | ||
|
|
a9d29c2264 | ||
|
|
8f54dc06a2 | ||
|
|
7daf97f90a | ||
|
|
2cae017738 | ||
|
|
e172f00e0e | ||
|
|
412dacf8be | ||
|
|
cdacf026e4 | ||
|
|
0ca6408580 | ||
|
|
9627a6142d | ||
|
|
6cc783f20b | ||
|
|
3136a75f4d | ||
|
|
a9101f8608 | ||
|
|
af043eda15 | ||
|
|
35c210d36f | ||
|
|
3ed0440bd2 | ||
|
|
c13cff37ef | ||
|
|
fce734662f | ||
|
|
e0ba1a2cd2 | ||
|
|
c72fca2711 | ||
|
|
ae17d88518 | ||
|
|
e19fc49a5f | ||
|
|
95c0378e3c | ||
|
|
7ee3cfd7c9 | ||
|
|
bd2cdeeeab | ||
|
|
77cd93ef89 | ||
|
|
5b063679b5 | ||
|
|
09093a9954 | ||
|
|
df0cfa9735 | ||
|
|
64d7489fd2 | ||
|
|
ecedcd0e7f | ||
|
|
3dff91d691 | ||
|
|
e131ef3714 | ||
|
|
ea0bc278ba | ||
|
|
b553c23d5b | ||
|
|
4f954896a8 | ||
|
|
b259f8b752 | ||
|
|
8be8a8e41b | ||
|
|
79aa060e21 | ||
|
|
f9500729b7 | ||
|
|
204a19e67f | ||
|
|
e6ffe3464c | ||
|
|
0384364c3e | ||
|
|
763facfd78 | ||
|
|
bc88f1dafa | ||
|
|
0c055a1215 | ||
|
|
938d7951ab | ||
|
|
b4466bd9b1 | ||
|
|
31f76aa464 | ||
|
|
c887c164dc | ||
|
|
115ac00222 | ||
|
|
50e79bc087 | ||
|
|
abda616f84 | ||
|
|
9c3048580a | ||
|
|
c1d5faa32a | ||
|
|
d127d8686a | ||
|
|
bc9856b570 | ||
|
|
855071cc19 | ||
|
|
b179540e80 | ||
|
|
6a8e4690d3 | ||
|
|
917ea6ac57 | ||
|
|
7b47a1e842 | ||
|
|
bcd87009e2 | ||
|
|
caf85737c3 | ||
|
|
e1516e0159 | ||
|
|
ee1111e4c9 | ||
|
|
268fe0004c | ||
|
|
0c92a64bb3 | ||
|
|
8b61692754 | ||
|
|
663e6f3ec0 | ||
|
|
17633f5460 | ||
|
|
98c2d2c41b | ||
|
|
5135ff73cb | ||
|
|
58a82cd578 | ||
|
|
d86ea8623b | ||
|
|
cdeeff988e | ||
|
|
930ff266f2 | ||
|
|
d5c0fe632f | ||
|
|
3c5c5eeec2 | ||
|
|
56f017c60c | ||
|
|
b6517840ca | ||
|
|
1ccfea5aa9 | ||
|
|
7e858f4b8d | ||
|
|
7b4f368307 | ||
|
|
06a3502ed8 | ||
|
|
a9a43144ca | ||
|
|
dd968a8ccf | ||
|
|
0d6e1afe54 | ||
|
|
7d9faffd4b | ||
|
|
d7df065320 | ||
|
|
84d4d7f9d9 | ||
|
|
733d6fe56c | ||
|
|
8350544092 | ||
|
|
6a63bc2788 | ||
|
|
66e8c1600e | ||
|
|
82b8d68ffb | ||
|
|
b86bbcd67e | ||
|
|
38b6d607aa | ||
|
|
e1647a5a08 | ||
|
|
bc25190fc7 | ||
|
|
e3a41321cc | ||
|
|
2fd86c93fc | ||
|
|
2b8c461e04 | ||
|
|
a54692d165 | ||
|
|
4b4c59a4bb | ||
|
|
81d688107e | ||
|
|
6e003934fc | ||
|
|
37e1b20ec1 | ||
|
|
d1787b50fd | ||
|
|
9dfc346998 | ||
|
|
9ab4c19945 | ||
|
|
3bab119fa5 | ||
|
|
1fdf3e2aae | ||
|
|
4810aa65a4 | ||
|
|
f798552cf1 | ||
|
|
4dc030d081 | ||
|
|
216499d78b | ||
|
|
60f636ee15 | ||
|
|
f0bf117a04 | ||
|
|
788b6ce821 | ||
|
|
503cd84919 |
46
.appveyor.yml
Normal file
46
.appveyor.yml
Normal file
@@ -0,0 +1,46 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\ncw\rclone
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
|
||||
ORIGPATH: '%PATH%'
|
||||
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
|
||||
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
|
||||
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
|
||||
PATH: '%PATHCC64%'
|
||||
RCLONE_CONFIG_PASS:
|
||||
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
|
||||
|
||||
install:
|
||||
- choco install winfsp -y
|
||||
- choco install zip -y
|
||||
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
|
||||
|
||||
build_script:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go install
|
||||
- go build
|
||||
- make log_since_last_release > %TEMP%\git-log.txt
|
||||
- make version > %TEMP%\version
|
||||
- set /p RCLONE_VERSION=<%TEMP%\version
|
||||
- set PATH=%PATHCC32%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
|
||||
- set PATH=%PATHCC64%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
|
||||
|
||||
test_script:
|
||||
- make GOTAGS=cmount quicktest
|
||||
|
||||
artifacts:
|
||||
- path: rclone.exe
|
||||
- path: build/*-v*.zip
|
||||
|
||||
deploy_script:
|
||||
- IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
34
.circleci/config.yml
Normal file
34
.circleci/config.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
version: 2
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
machine: true
|
||||
|
||||
working_directory: ~/.go_workspace/src/github.com/ncw/rclone
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=billziss/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
.
|
||||
xgo \
|
||||
--targets=android/*,ios/* \
|
||||
.
|
||||
|
||||
- run:
|
||||
name: Prepare artifacts
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -3,3 +3,5 @@ _junk/
|
||||
rclone
|
||||
build
|
||||
docs/public
|
||||
rclone.iml
|
||||
.idea
|
||||
|
||||
14
.gometalinter.json
Normal file
14
.gometalinter.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"Enable": [
|
||||
"deadcode",
|
||||
"errcheck",
|
||||
"goimports",
|
||||
"golint",
|
||||
"ineffassign",
|
||||
"structcheck",
|
||||
"varcheck",
|
||||
"vet"
|
||||
],
|
||||
"EnableGC": true,
|
||||
"Vendor": true
|
||||
}
|
||||
40
.travis.yml
40
.travis.yml
@@ -1,14 +1,17 @@
|
||||
language: go
|
||||
sudo: false
|
||||
osx_image: xcode7.3
|
||||
sudo: required
|
||||
dist: trusty
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
- 1.5.4
|
||||
- 1.6.4
|
||||
- 1.7.4
|
||||
- 1.8
|
||||
- 1.7.6
|
||||
- 1.8.7
|
||||
- 1.9.3
|
||||
- "1.10.1"
|
||||
- tip
|
||||
before_install:
|
||||
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
|
||||
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
|
||||
install:
|
||||
- git fetch --unshallow --tags
|
||||
- make vars
|
||||
@@ -16,19 +19,32 @@ install:
|
||||
script:
|
||||
- make check
|
||||
- make quicktest
|
||||
- make compile_all
|
||||
env:
|
||||
matrix:
|
||||
secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
|
||||
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
include:
|
||||
- os: osx
|
||||
go: 1.8
|
||||
go: "1.10.1"
|
||||
env: GOTAGS=""
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
skip_cleanup: true
|
||||
on:
|
||||
branch: master
|
||||
go: 1.8
|
||||
condition: "`uname` == 'Linux'"
|
||||
all_branches: true
|
||||
go: "1.10.1"
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
184
CONTRIBUTING.md
184
CONTRIBUTING.md
@@ -10,7 +10,7 @@ of filing an issue.
|
||||
|
||||
When filing an issue, please include the following information if
|
||||
possible as well as a description of the problem. Make sure you test
|
||||
with the [latest beta of rclone](http://beta.rclone.org/):
|
||||
with the [latest beta of rclone](https://beta.rclone.org/):
|
||||
|
||||
* Rclone version (eg output from `rclone -V`)
|
||||
* Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
@@ -100,21 +100,74 @@ need to make a remote called `TestDrive`.
|
||||
You can then run the unit tests in the drive directory. These tests
|
||||
are skipped if `TestDrive:` isn't defined.
|
||||
|
||||
cd drive
|
||||
cd backend/drive
|
||||
go test -v
|
||||
|
||||
You can then run the integration tests which tests all of rclone's
|
||||
operations. Normally these get run against the local filing system,
|
||||
but they can be run against any of the remotes.
|
||||
|
||||
cd ../fs
|
||||
cd fs/sync
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -subdir
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then run in that directory
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
|
||||
go run test_all.go
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
make test
|
||||
|
||||
This command is run daily on the the integration test server. You can
|
||||
find the results at https://pub.rclone.org/integration-tests/
|
||||
|
||||
## Code Organisation ##
|
||||
|
||||
Rclone code is organised into a small number of top level directories
|
||||
with modules beneath.
|
||||
|
||||
* backend - the rclone backends for interfacing to cloud providers -
|
||||
* all - import this to load all the cloud providers
|
||||
* ...providers
|
||||
* bin - scripts for use while building or maintaining rclone
|
||||
* cmd - the rclone commands
|
||||
* all - import this to load all the commands
|
||||
* ...commands
|
||||
* docs - the documentation and website
|
||||
* content - adjust these docs only - everything else is autogenerated
|
||||
* fs - main rclone definitions - minimal amount of code
|
||||
* accounting - bandwidth limiting and statistics
|
||||
* asyncreader - an io.Reader which reads ahead
|
||||
* config - manage the config file and flags
|
||||
* driveletter - detect if a name is a drive letter
|
||||
* filter - implements include/exclude filtering
|
||||
* fserrors - rclone specific error handling
|
||||
* fshttp - http handling for rclone
|
||||
* fspath - path handling for rclone
|
||||
* hash - defines rclones hash types and functions
|
||||
* list - list a remote
|
||||
* log - logging facilities
|
||||
* march - iterates directories in lock step
|
||||
* object - in memory Fs objects
|
||||
* operations - primitives for sync, eg Copy, Move
|
||||
* sync - sync directories
|
||||
* walk - walk a directory
|
||||
* fstest - provides integration test framework
|
||||
* fstests - integration tests for the backends
|
||||
* mockdir - mocks an fs.Directory
|
||||
* mockobject - mocks an fs.Object
|
||||
* test_all - Runs integration tests for everything
|
||||
* graphics - the images used in the website etc
|
||||
* lib - libraries used by the backend
|
||||
* atexit - register functions to run when rclone exits
|
||||
* dircache - directory ID to name caching
|
||||
* oauthutil - helpers for using oauth
|
||||
* pacer - retries with backoff and paces operations
|
||||
* readers - a selection of useful io.Readers
|
||||
* rest - a thin abstraction over net/http for REST
|
||||
* vendor - 3rd party code managed by the dep tool
|
||||
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||
|
||||
## Writing Documentation ##
|
||||
|
||||
@@ -139,15 +192,93 @@ Documentation for rclone sub commands is with their code, eg
|
||||
There are separate instructions for making a release in the RELEASE.md
|
||||
file.
|
||||
|
||||
## Updating the vendor dirctory ##
|
||||
## Commit messages ##
|
||||
|
||||
Do these commands to update the entire build directory to the latest
|
||||
version of all the dependencies. This should be done early in the
|
||||
release cycle. Individual dependencies can be added with `godep get`.
|
||||
Please make the first line of your commit message a summary of the
|
||||
change, and prefix it with the directory of the change followed by a
|
||||
colon. The changelog gets made by looking at just these first lines
|
||||
so make it good!
|
||||
|
||||
If you have more to say about the commit, then enter a blank line and
|
||||
carry on the description. Remember to say why the change was needed -
|
||||
the commit itself shows what was changed.
|
||||
|
||||
If the change fixes an issue then write `Fixes #1234` in the commit
|
||||
message. This can be on the subject line if it will fit. If you
|
||||
don't want to close the associated issue just put `#1234` and the
|
||||
change will get linked into the issue.
|
||||
|
||||
Here is an example of a short commit message:
|
||||
|
||||
```
|
||||
drive: add team drive support - fixes #885
|
||||
```
|
||||
|
||||
And here is an example of a longer one:
|
||||
|
||||
```
|
||||
mount: fix hang on errored upload
|
||||
|
||||
In certain circumstances if an upload failed then the mount could hang
|
||||
indefinitely. This was fixed by closing the read pipe after the Put
|
||||
completed. This will cause the write side to return a pipe closed
|
||||
error fixing the hang.
|
||||
|
||||
Fixes #1498
|
||||
```
|
||||
|
||||
## Adding a dependency ##
|
||||
|
||||
rclone uses the [dep](https://github.com/golang/dep) tool to manage
|
||||
its dependencies. All code that rclone needs for building is stored
|
||||
in the `vendor` directory for perfectly reproducable builds.
|
||||
|
||||
The `vendor` directory is entirely managed by the `dep` tool.
|
||||
|
||||
To add a new dependency, run `dep ensure` and `dep` will pull in the
|
||||
new dependency to the `vendor` directory and update the `Gopkg.lock`
|
||||
file.
|
||||
|
||||
You can add constraints on that package in the `Gopkg.toml` file (see
|
||||
the `dep` documentation), but don't unless you really need to.
|
||||
|
||||
Please check in the changes generated by `dep` including the `vendor`
|
||||
directory and `Godep.toml` and `Godep.lock` in a single commit
|
||||
separate from any other code changes. Watch out for new files in
|
||||
`vendor`.
|
||||
|
||||
## Updating a dependency ##
|
||||
|
||||
If you need to update a dependency then run
|
||||
|
||||
dep ensure -update github.com/pkg/errors
|
||||
|
||||
Check in in a single commit as above.
|
||||
|
||||
## Updating all the dependencies ##
|
||||
|
||||
In order to update all the dependencies then run `make update`. This
|
||||
just runs `dep ensure -update`. Check in the changes in a single
|
||||
commit as above.
|
||||
|
||||
This should be done early in the release cycle to pick up new versions
|
||||
of packages in time for them to get some testing.
|
||||
|
||||
## Updating a backend ##
|
||||
|
||||
If you update a backend then please run the unit tests and the
|
||||
integration tests for that backend.
|
||||
|
||||
Assuming the backend is called `remote`, make create a config entry
|
||||
called `TestRemote` for the tests to use.
|
||||
|
||||
Now `cd remote` and run `go test -v` to run the unit tests.
|
||||
|
||||
Then `cd fs` and run `go test -v -remote TestRemote:` to run the
|
||||
integration tests.
|
||||
|
||||
The next section goes into more detail about the tests.
|
||||
|
||||
* make build_dep
|
||||
* make update
|
||||
|
||||
## Writing a new backend ##
|
||||
|
||||
Choose a name. The docs here will use `remote` as an example.
|
||||
@@ -162,25 +293,35 @@ Research
|
||||
|
||||
Getting going
|
||||
|
||||
* Create `remote/remote.go` (copy this from a similar fs)
|
||||
* Add your fs to the imports in `fs/all/all.go`
|
||||
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||
* box is a good one to start from if you have a directory based remote
|
||||
* b2 is a good one to start from if you have a bucket based remote
|
||||
* Add your remote to the imports in `backend/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
|
||||
Unit tests
|
||||
|
||||
* Create a config entry called `TestRemote` for the unit tests to use
|
||||
* Add your fs to the end of `fstest/fstests/gen_tests.go`
|
||||
* generate `remote/remote_test.go` unit tests `cd fstest/fstests; go generate`
|
||||
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
||||
* Make sure all tests pass with `go test -v`
|
||||
|
||||
Integration tests
|
||||
|
||||
* Add your fs to `fs/test_all.go`
|
||||
* Add your fs to `fstest/test_all/test_all.go`
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs`
|
||||
* `go test -v -remote TestRemote:` and
|
||||
* `cd fs/operations`
|
||||
* `go test -v -remote TestRemote:`
|
||||
* `cd fs/sync`
|
||||
* `go test -v -remote TestRemote:`
|
||||
* If you are making a bucket based remote, then check with this also
|
||||
* `go test -v -remote TestRemote: -subdir`
|
||||
* And if your remote defines `ListR` this also
|
||||
* `go test -v -remote TestRemote: -fast-list`
|
||||
|
||||
Add your fs to the docs
|
||||
See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
|
||||
|
||||
* `README.md` - main Github page
|
||||
* `docs/content/remote.md` - main docs page
|
||||
@@ -189,3 +330,4 @@ Add your fs to the docs
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
* `cmd/cmd.go` - the main help for rclone
|
||||
|
||||
513
Godeps/Godeps.json
generated
513
Godeps/Godeps.json
generated
@@ -1,513 +0,0 @@
|
||||
{
|
||||
"ImportPath": "github.com/ncw/rclone",
|
||||
"GoVersion": "go1.7",
|
||||
"GodepVersion": "v75",
|
||||
"Packages": [
|
||||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "bazil.org/fuse",
|
||||
"Rev": "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||
},
|
||||
{
|
||||
"ImportPath": "bazil.org/fuse/fs",
|
||||
"Rev": "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||
},
|
||||
{
|
||||
"ImportPath": "bazil.org/fuse/fuseutil",
|
||||
"Rev": "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||
},
|
||||
{
|
||||
"ImportPath": "cloud.google.com/go/compute/metadata",
|
||||
"Comment": "v0.6.0-68-g0b87d14",
|
||||
"Rev": "0b87d14d90086b53a97dfbd66f3000f7f112b494"
|
||||
},
|
||||
{
|
||||
"ImportPath": "cloud.google.com/go/internal",
|
||||
"Comment": "v0.6.0-68-g0b87d14",
|
||||
"Rev": "0b87d14d90086b53a97dfbd66f3000f7f112b494"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Unknwon/goconfig",
|
||||
"Rev": "87a46d97951ee1ea20ed3b24c25646a79e87ba5d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/VividCortex/ewma",
|
||||
"Comment": "v1.0-20-gc595cd8",
|
||||
"Rev": "c595cd886c223c6c28fc9ae2727a61b5e4693d85"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/awserr",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/client",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/stscreds",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/defaults",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/endpoints",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/request",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/session",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3iface",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/s3/s3manager",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/aws/aws-sdk-go/service/sts",
|
||||
"Comment": "v1.6.24-1-g2d3b3bc",
|
||||
"Rev": "2d3b3bc3aae6a09a9b194aa6eb71326fcbe2e918"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cpuguy83/go-md2man/md2man",
|
||||
"Comment": "v1.0.6",
|
||||
"Rev": "a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Comment": "v1.1.0",
|
||||
"Rev": "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/go-ini/ini",
|
||||
"Comment": "v1.24.0-2-gee900ca",
|
||||
"Rev": "ee900ca565931451fe4e4409bcbd4316331cec1c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "8ee79997227bf9b34611aee7946ae64735e6fd93"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/go-querystring/query",
|
||||
"Rev": "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/googleapis/gax-go",
|
||||
"Rev": "da06d194a00e19ce00d9011a13931c3f6f6887c7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/mousetrap",
|
||||
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/jmespath/go-jmespath",
|
||||
"Comment": "0.2.2-14-gbd40a43",
|
||||
"Rev": "bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kr/fs",
|
||||
"Rev": "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ncw/go-acd",
|
||||
"Rev": "7954f1fad2bda6a7836999003e4481d6e32edc1e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ncw/swift",
|
||||
"Rev": "6c1b1510538e1f00d49a558b7b9b87d71bc454d6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pkg/errors",
|
||||
"Comment": "v0.8.0-2-g248dadf",
|
||||
"Rev": "248dadf4e9068a0b3e79f02ed0a610d935de5302"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pkg/sftp",
|
||||
"Rev": "ff7e52ffd762466ebd2c4e710d5436dccc539f54"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pmezard/go-difflib/difflib",
|
||||
"Comment": "v1.0.0",
|
||||
"Rev": "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/rfjakob/eme",
|
||||
"Comment": "v1.0-15-gfd00240",
|
||||
"Rev": "fd00240838d2e0fe6b2c58bf5b27db843d828ad5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/russross/blackfriday",
|
||||
"Comment": "v1.4-40-g5f33e7b",
|
||||
"Rev": "5f33e7b7878355cd2b7e6b8eefc48a5472c69f70"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shurcooL/sanitized_anchor_name",
|
||||
"Rev": "1dba4b3954bc059efc3991ec364f9f9a35f597d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/skratchdot/open-golang/open",
|
||||
"Rev": "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/cobra",
|
||||
"Rev": "b5d8e8f46a2f829f755b6e33b454e25c61c935e1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/cobra/doc",
|
||||
"Rev": "b5d8e8f46a2f829f755b6e33b454e25c61c935e1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "9ff6c6923cfffbcd502984b8e0c80539a94968b7"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stacktic/dropbox",
|
||||
"Rev": "58f839b21094d5e0af7caf613599830589233d20"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/assert",
|
||||
"Comment": "v1.1.4-27-g4d4bfba",
|
||||
"Rev": "4d4bfba8f1d1027c4fdbe371823030df51419987"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/stretchr/testify/require",
|
||||
"Comment": "v1.1.4-27-g4d4bfba",
|
||||
"Rev": "4d4bfba8f1d1027c4fdbe371823030df51419987"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/tsenart/tb",
|
||||
"Rev": "19f4c3d79d2bd67d0911b2e310b999eeea4454c1"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/curve25519",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ed25519",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ed25519/internal/edwards25519",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/nacl/secretbox",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/pbkdf2",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/poly1305",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/salsa20/salsa",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/scrypt",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh/agent",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/ssh/terminal",
|
||||
"Rev": "453249f01cfeb54c3d549ddb75ff152ca243f9d8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context/ctxhttp",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/http2",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/http2/hpack",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/idna",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/internal/timeseries",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/lex/httplex",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/trace",
|
||||
"Rev": "b4690f45fa1cafc47b1c280c2e75116efe40cc13"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2",
|
||||
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/google",
|
||||
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/internal",
|
||||
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/jws",
|
||||
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/oauth2/jwt",
|
||||
"Rev": "b9780ec78894ab900c062d58ee3076cd9b2a4501"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/unix",
|
||||
"Rev": "075e574b89e4c2d22f2286a7e2b919519c6f3547"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/transform",
|
||||
"Rev": "85c29909967d7f171f821e7a42e7b7af76fb9598"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/text/unicode/norm",
|
||||
"Rev": "85c29909967d7f171f821e7a42e7b7af76fb9598"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/drive/v2",
|
||||
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/gensupport",
|
||||
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/googleapi",
|
||||
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/googleapi/internal/uritemplates",
|
||||
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/api/storage/v1",
|
||||
"Rev": "bc20c61134e1d25265dd60049f5735381e79b631"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/app_identity",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/base",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/datastore",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/log",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/modules",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/appengine/internal/remote_api",
|
||||
"Comment": "v1.0.0-28-g2e4a801",
|
||||
"Rev": "2e4a801b39fc199db615bfca7d0b9f8cd9580599"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/codes",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/credentials",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/grpclog",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/internal",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/metadata",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/naming",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/peer",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/stats",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/tap",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "google.golang.org/grpc/transport",
|
||||
"Comment": "v1.0.5-52-gd0c32ee",
|
||||
"Rev": "d0c32ee6a441117d49856d6120ca9552af413ee0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/yaml.v2",
|
||||
"Rev": "a3f3340b5840cee44f372bddb5880fcbc419b46a"
|
||||
}
|
||||
]
|
||||
}
|
||||
490
Gopkg.lock
generated
Normal file
490
Gopkg.lock
generated
Normal file
@@ -0,0 +1,490 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
packages = [
|
||||
".",
|
||||
"fs",
|
||||
"fuseutil"
|
||||
]
|
||||
revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e"
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479"
|
||||
version = "v0.23.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = [
|
||||
"storage",
|
||||
"version"
|
||||
]
|
||||
revision = "fbe7db0e3f9793ba3e5704efbab84f51436c136e"
|
||||
version = "v18.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = [
|
||||
"autorest",
|
||||
"autorest/adal",
|
||||
"autorest/azure",
|
||||
"autorest/date"
|
||||
]
|
||||
revision = "1f7cd6cfe0adea687ad44a512dfe76140f804318"
|
||||
version = "v10.12.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/goconfig"
|
||||
packages = ["."]
|
||||
revision = "ef1e4c783f8f0478bd8bff0edb3dd0bade552599"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/VividCortex/ewma"
|
||||
packages = ["."]
|
||||
revision = "b24eb346a94c3ba12c1da1e564dbac1b498a77ce"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/a8m/tree"
|
||||
packages = ["."]
|
||||
revision = "3cf936ce15d6100c49d9c75f79c220ae7e579599"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/abbot/go-http-auth"
|
||||
packages = ["."]
|
||||
revision = "0ddd408d5d60ea76e320503cc7dd091992dee608"
|
||||
version = "v0.4.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = [
|
||||
"aws",
|
||||
"aws/awserr",
|
||||
"aws/awsutil",
|
||||
"aws/client",
|
||||
"aws/client/metadata",
|
||||
"aws/corehandlers",
|
||||
"aws/credentials",
|
||||
"aws/credentials/ec2rolecreds",
|
||||
"aws/credentials/endpointcreds",
|
||||
"aws/credentials/stscreds",
|
||||
"aws/csm",
|
||||
"aws/defaults",
|
||||
"aws/ec2metadata",
|
||||
"aws/endpoints",
|
||||
"aws/request",
|
||||
"aws/session",
|
||||
"aws/signer/v4",
|
||||
"internal/sdkio",
|
||||
"internal/sdkrand",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/eventstream",
|
||||
"private/protocol/eventstream/eventstreamapi",
|
||||
"private/protocol/query",
|
||||
"private/protocol/query/queryutil",
|
||||
"private/protocol/rest",
|
||||
"private/protocol/restxml",
|
||||
"private/protocol/xml/xmlutil",
|
||||
"service/s3",
|
||||
"service/s3/s3iface",
|
||||
"service/s3/s3manager",
|
||||
"service/sts"
|
||||
]
|
||||
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
|
||||
version = "v1.14.8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/billziss-gh/cgofuse"
|
||||
packages = ["fuse"]
|
||||
revision = "ea66f9809c71af94522d494d3d617545662ea59d"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
packages = ["."]
|
||||
revision = "af9db2027c98c61ecd8e17caa5bd265792b9b9a2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cpuguy83/go-md2man"
|
||||
packages = ["md2man"]
|
||||
revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
|
||||
version = "v1.0.8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/djherbis/times"
|
||||
packages = ["."]
|
||||
revision = "95292e44976d1217cf3611dc7c8d9466877d3ed5"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
|
||||
packages = [
|
||||
"dropbox",
|
||||
"dropbox/async",
|
||||
"dropbox/common",
|
||||
"dropbox/file_properties",
|
||||
"dropbox/files",
|
||||
"dropbox/seen_state",
|
||||
"dropbox/sharing",
|
||||
"dropbox/team_common",
|
||||
"dropbox/team_policies",
|
||||
"dropbox/users",
|
||||
"dropbox/users_common"
|
||||
]
|
||||
revision = "7afa861bfde5a348d765522b303b6fbd9d250155"
|
||||
version = "v4.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
|
||||
version = "v1.37.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/go-querystring"
|
||||
packages = ["query"]
|
||||
revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/jlaffaye/ftp"
|
||||
packages = ["."]
|
||||
revision = "2403248fa8cc9f7909862627aa7337f13f8e0bf1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
revision = "0b12d6b5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kardianos/osext"
|
||||
packages = ["."]
|
||||
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/marstr/guid"
|
||||
packages = ["."]
|
||||
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-runewidth"
|
||||
packages = ["."]
|
||||
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
|
||||
version = "v0.0.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/go-acd"
|
||||
packages = ["."]
|
||||
revision = "887eb06ab6a255fbf5744b5812788e884078620a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "b2a7479cf26fa841ff90dd932d0221cb5c50782d"
|
||||
version = "v1.0.39"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/nsf/termbox-go"
|
||||
packages = ["."]
|
||||
revision = "5c94acc5e6eb520f1bcd183974e01171cc4c23b3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/okzk/sdnotify"
|
||||
packages = ["."]
|
||||
revision = "ed8ca104421a21947710335006107540e3ecb335"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/patrickmn/go-cache"
|
||||
packages = ["."]
|
||||
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
|
||||
version = "v2.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pengsrc/go-shared"
|
||||
packages = [
|
||||
"buffer",
|
||||
"check",
|
||||
"convert",
|
||||
"log",
|
||||
"reopen"
|
||||
]
|
||||
revision = "807ee759d82c84982a89fb3dc875ef884942f1e5"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "57673e38ea946592a59c26592b7e6fbda646975b"
|
||||
version = "1.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/rfjakob/eme"
|
||||
packages = ["."]
|
||||
revision = "01668ae55fe0b79a483095689043cce3e80260db"
|
||||
version = "v1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/russross/blackfriday"
|
||||
packages = ["."]
|
||||
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
|
||||
version = "v1.5.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
packages = ["."]
|
||||
revision = "f9261e73885de99b1647d68bedadf2b9a99ad11f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/skratchdot/open-golang"
|
||||
packages = ["open"]
|
||||
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [
|
||||
".",
|
||||
"doc"
|
||||
]
|
||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"require"
|
||||
]
|
||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||
version = "v1.2.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/t3rm1n4l/go-mega"
|
||||
packages = ["."]
|
||||
revision = "57978a63bd3f91fa7e188b751a7e7e6dd4e33813"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/xanzy/ssh-agent"
|
||||
packages = ["."]
|
||||
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/yunify/qingstor-sdk-go"
|
||||
packages = [
|
||||
".",
|
||||
"config",
|
||||
"logger",
|
||||
"request",
|
||||
"request/builder",
|
||||
"request/data",
|
||||
"request/errors",
|
||||
"request/signer",
|
||||
"request/unpacker",
|
||||
"service",
|
||||
"utils"
|
||||
]
|
||||
revision = "4f9ac88c5fec7350e960aabd0de1f1ede0ad2895"
|
||||
version = "v2.2.14"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"bcrypt",
|
||||
"blowfish",
|
||||
"curve25519",
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"internal/chacha20",
|
||||
"internal/subtle",
|
||||
"nacl/secretbox",
|
||||
"pbkdf2",
|
||||
"poly1305",
|
||||
"salsa20/salsa",
|
||||
"scrypt",
|
||||
"ssh",
|
||||
"ssh/agent",
|
||||
"ssh/terminal"
|
||||
]
|
||||
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"html",
|
||||
"html/atom",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"publicsuffix",
|
||||
"webdav",
|
||||
"webdav/internal/xml",
|
||||
"websocket"
|
||||
]
|
||||
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [
|
||||
".",
|
||||
"google",
|
||||
"internal",
|
||||
"jws",
|
||||
"jwt"
|
||||
]
|
||||
revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows"
|
||||
]
|
||||
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = [
|
||||
"drive/v3",
|
||||
"gensupport",
|
||||
"googleapi",
|
||||
"googleapi/internal/uritemplates",
|
||||
"storage/v1"
|
||||
]
|
||||
revision = "2eea9ba0a3d94f6ab46508083e299a00bbbc65f6"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/app_identity",
|
||||
"internal/base",
|
||||
"internal/datastore",
|
||||
"internal/log",
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"log",
|
||||
"urlfetch"
|
||||
]
|
||||
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "c1378c5fc821e27711155958ff64b3c74b56818ba4733dbfe0c86d518c32880e"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
11
Gopkg.toml
Normal file
11
Gopkg.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
# pin this to master to pull in the macOS changes
|
||||
# can likely remove for 1.43
|
||||
[[override]]
|
||||
branch = "master"
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
|
||||
# pin this to master to pull in the fix for linux/mips
|
||||
# can likely remove for 1.43
|
||||
[[override]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
@@ -1,17 +1,43 @@
|
||||
When filing an issue, please include the following information if possible as well as a description of the problem. Make sure you test with the latest beta of rclone.
|
||||
<!--
|
||||
|
||||
http://beta.rclone.org/
|
||||
http://rclone.org/downloads/
|
||||
Hi!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the [rclone forum](https://forum.rclone.org/) instead of filing an issue.
|
||||
We understand you are having a problem with rclone or have an idea for an improvement - we want to help you with that!
|
||||
|
||||
> What is your rclone version (eg output from `rclone -V`)
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum
|
||||
|
||||
> Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
https://forum.rclone.org/
|
||||
|
||||
> Which cloud storage system are you using? (eg Google Drive)
|
||||
instead of filing an issue. We'll reply quickly and it won't increase our massive issue backlog.
|
||||
|
||||
> The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||
|
||||
> A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
https://beta.rclone.org/
|
||||
|
||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||
|
||||
If you have an idea for an improvement, then please search the old issues first and if you don't find your idea, make a new issue.
|
||||
|
||||
Thanks
|
||||
|
||||
The Rclone Developers
|
||||
|
||||
-->
|
||||
|
||||
#### What is the problem you are having with rclone?
|
||||
|
||||
|
||||
#### What is your rclone version (eg output from `rclone -V`)
|
||||
|
||||
|
||||
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
|
||||
|
||||
#### Which cloud storage system are you using? (eg Google Drive)
|
||||
|
||||
|
||||
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
87
MAINTAINERS.md
Normal file
87
MAINTAINERS.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# Maintainers guide for rclone #
|
||||
|
||||
Current active maintainers of rclone are
|
||||
|
||||
* Nick Craig-Wood @ncw
|
||||
* Stefan Breunig @breunigs
|
||||
* Ishuah Kariuki @ishuah
|
||||
* Remus Bunduc @remusb - cache subsystem maintainer
|
||||
* Fabian Möller @B4dM4n
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
This is a guide for how to be an rclone maintainer. This is mostly a writeup of what I (@ncw) attempt to do.
|
||||
|
||||
## Triaging Tickets ##
|
||||
|
||||
When a ticket comes in it should be triaged. This means it should be classified by adding labels and placed into a milestone. Quite a lot of tickets need a bit of back and forth to determine whether it is a valid ticket so tickets may remain without labels or milestone for a while.
|
||||
|
||||
Rclone uses the labels like this:
|
||||
|
||||
* `bug` - a definite verified bug
|
||||
* `can't reproduce` - a problem which we can't reproduce
|
||||
* `doc fix` - a bug in the documentation - if users need help understanding the docs add this label
|
||||
* `duplicate` - normally close these and ask the user to subscribe to the original
|
||||
* `enhancement: new remote` - a new rclone backend
|
||||
* `enhancement` - a new feature
|
||||
* `FUSE` - do do with `rclone mount` command
|
||||
* `good first issue` - mark these if you find a small self contained issue - these get shown to new visitors to the project
|
||||
* `help` wanted - mark these if you find a self contained issue - these get shown to new visitors to the project
|
||||
* `IMPORTANT` - note to maintainers not to forget to fix this for the release
|
||||
* `maintenance` - internal enhancement, code re-organisation etc
|
||||
* `Needs Go 1.XX` - waiting for that version of Go to be released
|
||||
* `question` - not a `bug` or `enhancement` - direct to the forum for next time
|
||||
* `Remote: XXX` - which rclone backend this affects
|
||||
* `thinking` - not decided on the course of action yet
|
||||
|
||||
If it turns out to be a bug or an enhancement it should be tagged as such, with the appropriate other tags. Don't forget the "good first issue" tag to give new contributors something easy to do to get going.
|
||||
|
||||
When a ticket is tagged it should be added to a milestone, either the next release, the one after, Soon or Help Wanted. Bugs can be added to the "Known Bugs" milestone if they aren't planned to be fixed or need to wait for something (eg the next go release).
|
||||
|
||||
The milestones have these meanings:
|
||||
|
||||
* v1.XX - stuff we would like to fit into this release
|
||||
* v1.XX+1 - stuff we are leaving until the next release
|
||||
* Soon - stuff we think is a good idea - waiting to be scheduled to a release
|
||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||
|
||||
Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||
|
||||
## Closing Tickets ##
|
||||
|
||||
Close tickets as soon as you can - make sure they are tagged with a release. Post a link to a beta in the ticket with the fix in, asking for feedback.
|
||||
|
||||
## Pull requests ##
|
||||
|
||||
Try to process pull requests promptly!
|
||||
|
||||
Merging pull requests on Github itself works quite well now-a-days so you can squash and rebase or rebase pull requests. rclone doesn't use merge commits. Use the squash and rebase option if you need to edit the commit message.
|
||||
|
||||
After merging the commit, in your local master branch, do `git pull` then run `bin/update-authors.py` to update the authors file then `git push`.
|
||||
|
||||
Sometimes pull requests need to be left open for a while - this especially true of contributions of new backends which take a long time to get right.
|
||||
|
||||
## Merges ##
|
||||
|
||||
If you are merging a branch locally then do `git merge --ff-only branch-name` to avoid a merge commit. You'll need to rebase the branch if it doesn't merge cleanly.
|
||||
|
||||
## Release cycle ##
|
||||
|
||||
Rclone aims for a 6-8 week release cycle. Sometimes release cycles take longer if there is something big to merge that didn't stabilize properly or for personal reasons.
|
||||
|
||||
High impact regressions should be fixed before the next release.
|
||||
|
||||
Near the start of the release cycle the dependencies should be updated with `make update` to give time for bugs to surface.
|
||||
|
||||
Towards the end of the release cycle try not to merge anything too big so let things settle down.
|
||||
|
||||
Follow the instructions in RELEASE.md for making the release. Note that the testing part is the most time consuming often needing several rounds of test and fix depending on exactly how many new features rclone has gained.
|
||||
|
||||
## Mailing list ##
|
||||
|
||||
There is now an invite only mailing list for rclone developers `rclone-dev` on google groups.
|
||||
|
||||
## TODO ##
|
||||
|
||||
I should probably make a dev@rclone.org to register with cloud providers.
|
||||
7469
MANUAL.html
7469
MANUAL.html
File diff suppressed because it is too large
Load Diff
9969
MANUAL.txt
9969
MANUAL.txt
File diff suppressed because it is too large
Load Diff
160
Makefile
160
Makefile
@@ -1,66 +1,100 @@
|
||||
SHELL = /bin/bash
|
||||
TAG := $(shell echo `git describe --tags`-`git rev-parse --abbrev-ref HEAD` | sed 's/-\([0-9]\)-/-0\1-/; s/-\(HEAD\|master\)$$//')
|
||||
SHELL = bash
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
TAG_BRANCH :=
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
GO_LATEST := $(findstring go1.8,$(GO_VERSION))
|
||||
BETA_URL := http://beta.rclone.org/$(TAG)/
|
||||
# Only needed for Go 1.5
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
# Run full tests if go >= go1.9
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 9)')
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||
ifdef GOTAGS
|
||||
BUILDTAGS=-tags "$(GOTAGS)"
|
||||
endif
|
||||
|
||||
.PHONY: rclone
|
||||
.PHONY: rclone vars version
|
||||
|
||||
rclone:
|
||||
touch fs/version.go
|
||||
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)"
|
||||
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
cp -av `go env GOPATH`/bin/rclone .
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo BRANCH="'$(BRANCH)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo GO_LATEST="'$(GO_LATEST)'"
|
||||
@echo FULL_TESTS="'$(FULL_TESTS)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
version:
|
||||
@echo '$(TAG)'
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone
|
||||
go test $(GO_FILES)
|
||||
cd fs && go run test_all.go
|
||||
go install github.com/ncw/rclone/fstest/test_all
|
||||
-go test -v -count 1 $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
|
||||
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
|
||||
@echo "Written logs in test.log and fs/test_all.log"
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(GO_FILES)
|
||||
RCLONE_CONFIG="/notfound" go test -cpu=2 -race $(GO_FILES)
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
||||
ifdef FULL_TESTS
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
||||
endif
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
ifdef GO_LATEST
|
||||
go tool vet -printfuncs Debugf,Infof,Logf,Errorf . 2>&1 | grep -E -v vendor/ ; test $$? -eq 1
|
||||
errcheck $(GO_FILES)
|
||||
ifdef FULL_TESTS
|
||||
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
errcheck $(BUILDTAGS) ./...
|
||||
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
|
||||
go list ./... | grep -v /vendor/ | xargs -i golint {} | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
|
||||
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
|
||||
else
|
||||
@echo Skipping tests as not on Go stable
|
||||
@echo Skipping source quality tests as version of go too old
|
||||
endif
|
||||
|
||||
gometalinter_install:
|
||||
go get -u github.com/alecthomas/gometalinter
|
||||
gometalinter --install --update
|
||||
|
||||
# We aren't using gometalinter as the default linter yet because
|
||||
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
|
||||
# 2. can't get -printfuncs working with the vet linter
|
||||
gometalinter:
|
||||
gometalinter ./...
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
ifdef GO_LATEST
|
||||
ifdef FULL_TESTS
|
||||
go get -u github.com/kisielk/errcheck
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get -u github.com/inconshreveable/mousetrap
|
||||
go get -u github.com/tools/godep
|
||||
endif
|
||||
|
||||
# Get the release dependencies
|
||||
release_dep:
|
||||
go get -u github.com/goreleaser/nfpm/...
|
||||
go get -u github.com/aktau/github-release
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
rm -rf Godeps vendor
|
||||
go get -t -u -f -v ./...
|
||||
godep save ./...
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
dep ensure -update -v
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt
|
||||
|
||||
@@ -79,6 +113,9 @@ MANUAL.txt: MANUAL.md
|
||||
commanddocs: rclone
|
||||
rclone gendocs docs/content/commands/
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
|
||||
install: rclone
|
||||
install -d ${DESTDIR}/usr/bin
|
||||
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
||||
@@ -87,7 +124,7 @@ clean:
|
||||
go clean ./...
|
||||
find . -name \*~ | xargs -r rm -f
|
||||
rm -rf build docs/public
|
||||
rm -f rclone rclonetest/rclonetest
|
||||
rm -f rclone fs/operations/operations.test fs/sync/sync.test fs/test_all.log test.log
|
||||
|
||||
website:
|
||||
cd docs && hugo
|
||||
@@ -95,27 +132,75 @@ website:
|
||||
upload_website: website
|
||||
rclone -v sync docs/public memstore:www-rclone-org
|
||||
|
||||
tarball:
|
||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||
|
||||
sign_upload:
|
||||
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
||||
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
||||
cd build && sha256sum rclone-v* | gpg --clearsign > SHA256SUMS
|
||||
|
||||
check_sign:
|
||||
cd build && gpg --verify MD5SUMS && gpg --decrypt MD5SUMS | md5sum -c
|
||||
cd build && gpg --verify SHA1SUMS && gpg --decrypt SHA1SUMS | sha1sum -c
|
||||
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
||||
|
||||
upload:
|
||||
rclone -v copy build/ memstore:downloads-rclone-org
|
||||
rclone -v copy --exclude '*current*' build/ memstore:downloads-rclone-org/$(TAG)
|
||||
rclone -v copy --include '*current*' --include version.txt build/ memstore:downloads-rclone-org
|
||||
|
||||
upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
|
||||
cross: doc
|
||||
go run bin/cross-compile.go -release current $(TAG)
|
||||
go run bin/cross-compile.go -release current $(BUILDTAGS) $(TAG)
|
||||
|
||||
beta:
|
||||
go run bin/cross-compile.go $(TAG)β
|
||||
go run bin/cross-compile.go $(BUILDTAGS) $(TAG)β
|
||||
rclone -v copy build/ memstore:pub-rclone-org/$(TAG)β
|
||||
@echo Beta release ready at http://pub.rclone.org/$(TAG)%CE%B2/
|
||||
@echo Beta release ready at https://pub.rclone.org/$(TAG)%CE%B2/
|
||||
|
||||
log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
ifdef FULL_TESTS
|
||||
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)β
|
||||
else
|
||||
@echo Skipping compile all as version of go too old
|
||||
endif
|
||||
|
||||
appveyor_upload:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
endif
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(TAG)β
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' build/ memstore:beta-rclone-org
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)β
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
# Fetch the windows builds from appveyor
|
||||
fetch_windows:
|
||||
rclone -v copy --include 'rclone-v*-windows-*.zip' $(BETA_UPLOAD) build/
|
||||
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
|
||||
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
|
||||
md5sum build/rclone-*-windows-*.zip | sort
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server -v -w
|
||||
|
||||
@@ -123,8 +208,8 @@ tag: doc
|
||||
@echo "Old tag is $(LAST_TAG)"
|
||||
@echo "New tag is $(NEW_TAG)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
perl -lpe 's/VERSION/${NEW_TAG}/g; s/DATE/'`date -I`'/g;' docs/content/downloads.md.in > docs/content/downloads.md
|
||||
git tag $(NEW_TAG)
|
||||
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
|
||||
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo " * $(NEW_TAG) -" `date -I` >> docs/content/changelog.md
|
||||
@git log $(LAST_TAG)..$(NEW_TAG) --oneline >> docs/content/changelog.md
|
||||
@@ -133,9 +218,12 @@ tag: doc
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
retag:
|
||||
git tag -f $(LAST_TAG)
|
||||
git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
|
||||
|
||||
startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
|
||||
|
||||
gen_tests:
|
||||
cd fstest/fstests && go generate
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
|
||||
43
README.md
43
README.md
@@ -1,29 +1,40 @@
|
||||
[](http://rclone.org/)
|
||||
[](https://rclone.org/)
|
||||
|
||||
[Website](http://rclone.org) |
|
||||
[Documentation](http://rclone.org/docs/) |
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](http://rclone.org/changelog/) |
|
||||
[Installation](http://rclone.org/install/) |
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
[Forum](https://forum.rclone.org/)
|
||||
[G+](https://google.com/+RcloneOrg)
|
||||
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone) [](https://ci.appveyor.com/project/ncw/rclone) [](https://godoc.org/github.com/ncw/rclone)
|
||||
[](https://travis-ci.org/ncw/rclone)
|
||||
[](https://ci.appveyor.com/project/ncw/rclone)
|
||||
[](https://circleci.com/gh/ncw/rclone/tree/master)
|
||||
[](https://godoc.org/github.com/ncw/rclone)
|
||||
|
||||
Rclone is a command line program to sync files and directories to and from
|
||||
|
||||
* Google Drive
|
||||
* Amazon S3
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore
|
||||
* Dropbox
|
||||
* Google Cloud Storage
|
||||
* Amazon Drive
|
||||
* Microsoft One Drive
|
||||
* Hubic
|
||||
* Amazon S3 / Dreamhost / Ceph / Minio / Wasabi
|
||||
* Backblaze B2
|
||||
* Yandex Disk
|
||||
* Box
|
||||
* Dropbox
|
||||
* FTP
|
||||
* Google Cloud Storage
|
||||
* Google Drive
|
||||
* HTTP
|
||||
* Hubic
|
||||
* Mega
|
||||
* Microsoft Azure Blob Storage
|
||||
* Microsoft OneDrive
|
||||
* OpenDrive
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
|
||||
* pCloud
|
||||
* QingStor
|
||||
* SFTP
|
||||
* Webdav / Owncloud / Nextcloud
|
||||
* Yandex Disk
|
||||
* The local filesystem
|
||||
|
||||
Features
|
||||
@@ -41,7 +52,7 @@ Features
|
||||
See the home page for installation, usage, documentation, changelog
|
||||
and configuration walkthroughs.
|
||||
|
||||
* http://rclone.org/
|
||||
* https://rclone.org/
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
21
RELEASE.md
21
RELEASE.md
@@ -6,29 +6,36 @@ Making a release
|
||||
* git status - make sure everything is checked in
|
||||
* Check travis & appveyor builds are green
|
||||
* make check
|
||||
* make test
|
||||
* make test # see integration test server or run locally
|
||||
* make tag
|
||||
* edit docs/content/changelog.md
|
||||
* make doc
|
||||
* git status - to check for new man pages - git add them
|
||||
* # Update version number in snapcraft.yml
|
||||
* git commit -a -v -m "Version v1.XX"
|
||||
* make retag
|
||||
* make release_dep
|
||||
* # Set the GOPATH for a current stable go compiler
|
||||
* make cross
|
||||
* make upload
|
||||
* make upload_website
|
||||
* git checkout docs/content/commands # to undo date changes in commands
|
||||
* git push --tags origin master
|
||||
* git push --tags origin master:stable # update the stable branch for packager.io
|
||||
* # Wait for the appveyor and travis builds to complete then fetch the windows binaries from appveyor
|
||||
* make fetch_windows
|
||||
* make tarball
|
||||
* make sign_upload
|
||||
* make check_sign
|
||||
* make upload
|
||||
* make upload_website
|
||||
* make upload_github
|
||||
* make startdev
|
||||
* # announce with forum post, twitter post, G+ post
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
* Review any pinned packages in Gopkg.toml and remove if possible
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
* carry forward any patches to vendor stuff
|
||||
* git commit -a -v
|
||||
|
||||
## Make version number go to -DEV and check in
|
||||
|
||||
Make the version number be just in a file?
|
||||
Make the version number be just in a file?
|
||||
|
||||
@@ -1,63 +0,0 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package amazonclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/amazonclouddrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
20
appveyor.yml
20
appveyor.yml
@@ -1,20 +0,0 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\ncw\rclone
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
|
||||
install:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go install
|
||||
|
||||
build_script:
|
||||
- rmdir vendor\bazil.org\fuse /s /q
|
||||
- go test -cpu=2 ./...
|
||||
- go test -cpu=2 -short -race ./...
|
||||
301
b2/api/types.go
301
b2/api/types.go
@@ -1,301 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Error describes a B2 error response
|
||||
type Error struct {
|
||||
Status int `json:"status"` // The numeric HTTP status code. Always matches the status in the HTTP response.
|
||||
Code string `json:"code"` // A single-identifier code that identifies the error.
|
||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||
}
|
||||
|
||||
// Fatal statisfies the Fatal interface
|
||||
//
|
||||
// It indicates which errors should be treated as fatal
|
||||
func (e *Error) Fatal() bool {
|
||||
return e.Status == 403 // 403 errors shouldn't be retried
|
||||
}
|
||||
|
||||
var _ fs.Fataler = (*Error)(nil)
|
||||
|
||||
// Account describes a B2 account
|
||||
type Account struct {
|
||||
ID string `json:"accountId"` // The identifier for the account.
|
||||
}
|
||||
|
||||
// Bucket describes a B2 bucket
|
||||
type Bucket struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
|
||||
// fits in a 64 bit integer such as the type "long" in the programming
|
||||
// language Java. It is intended to be compatible with Java's time
|
||||
// long. For example, it can be passed directly into the java call
|
||||
// Date.setTime(long time).
|
||||
type Timestamp time.Time
|
||||
|
||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
timestamp := (*time.Time)(t).UTC().UnixNano()
|
||||
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Timestamp
|
||||
func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
timestamp, err := strconv.ParseInt(string(data), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
|
||||
return nil
|
||||
}
|
||||
|
||||
const versionFormat = "-v2006-01-02-150405.000"
|
||||
|
||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := (time.Time)(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
}
|
||||
|
||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||
//
|
||||
// It returns the new file name and a timestamp, or the old filename
|
||||
// and a zero timestamp.
|
||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
newRemote = remote
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
if len(base) < len(versionFormat) {
|
||||
return
|
||||
}
|
||||
versionStart := len(base) - len(versionFormat)
|
||||
// Check it ends in -xxx
|
||||
if base[len(base)-4] != '-' {
|
||||
return
|
||||
}
|
||||
// Replace with .xxx for parsing
|
||||
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is unitialised
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return (time.Time)(t).IsZero()
|
||||
}
|
||||
|
||||
// Equal compares two timestamps
|
||||
//
|
||||
// If either are !IsZero then it returns false
|
||||
func (t Timestamp) Equal(s Timestamp) bool {
|
||||
if (time.Time)(t).IsZero() {
|
||||
return false
|
||||
}
|
||||
if (time.Time)(s).IsZero() {
|
||||
return false
|
||||
}
|
||||
return (time.Time)(t).Equal((time.Time)(s))
|
||||
}
|
||||
|
||||
// File is info about a file
|
||||
type File struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
Size int64 `json:"size"` // The number of bytes in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
}
|
||||
|
||||
// ListBucketsResponse is as returned from the b2_list_buckets call
|
||||
type ListBucketsResponse struct {
|
||||
Buckets []Bucket `json:"buckets"`
|
||||
}
|
||||
|
||||
// ListFileNamesRequest is as passed to b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesRequest struct {
|
||||
BucketID string `json:"bucketId"` // required - The bucket to look for file names in.
|
||||
StartFileName string `json:"startFileName,omitempty"` // optional - The first file name to return. If there is a file with this name, it will be returned in the list. If not, the first file name after this the first one after this name.
|
||||
MaxFileCount int `json:"maxFileCount,omitempty"` // optional - The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 1000.
|
||||
StartFileID string `json:"startFileId,omitempty"` // optional - What to pass in to startFileId for the next search to continue where this one left off.
|
||||
Prefix string `json:"prefix,omitempty"` // optional - Files returned will be limited to those with the given prefix. Defaults to the empty string, which matches all files.
|
||||
Delimiter string `json:"delimiter,omitempty"` // Files returned will be limited to those within the top folder, or any one subfolder. Defaults to NULL. Folder names will also be returned. The delimiter character will be used to "break" file names into folders.
|
||||
}
|
||||
|
||||
// ListFileNamesResponse is as received from b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesResponse struct {
|
||||
Files []File `json:"files"` // An array of objects, each one describing one file.
|
||||
NextFileName *string `json:"nextFileName"` // What to pass in to startFileName for the next search to continue where this one left off, or null if there are no more files.
|
||||
NextFileID *string `json:"nextFileId"` // What to pass in to startFileId for the next search to continue where this one left off, or null if there are no more files.
|
||||
}
|
||||
|
||||
// GetUploadURLRequest is passed to b2_get_upload_url
|
||||
type GetUploadURLRequest struct {
|
||||
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
|
||||
}
|
||||
|
||||
// GetUploadURLResponse is received from b2_get_upload_url
|
||||
type GetUploadURLResponse struct {
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_file.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
|
||||
}
|
||||
|
||||
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
|
||||
type FileInfo struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
AccountID string `json:"accountId"` // Your account ID.
|
||||
BucketID string `json:"bucketId"` // The bucket that the file is in.
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// CreateBucketRequest is used to create a bucket
|
||||
type CreateBucketRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// DeleteBucketRequest is used to create a bucket
|
||||
type DeleteBucketRequest struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
}
|
||||
|
||||
// DeleteFileRequest is used to delete a file version
|
||||
type DeleteFileRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
}
|
||||
|
||||
// HideFileRequest is used to delete a file
|
||||
type HideFileRequest struct {
|
||||
BucketID string `json:"bucketId"` // The bucket containing the file to hide.
|
||||
Name string `json:"fileName"` // The name of the file to hide.
|
||||
}
|
||||
|
||||
// GetFileInfoRequest is used to return a FileInfo struct with b2_get_file_info
|
||||
type GetFileInfoRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
}
|
||||
|
||||
// StartLargeFileRequest (b2_start_large_file) Prepares for uploading the parts of a large file.
|
||||
//
|
||||
// If the original source of the file being uploaded has a last
|
||||
// modified time concept, Backblaze recommends using
|
||||
// src_last_modified_millis as the name, and a string holding the base
|
||||
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||
// programming language Java. It is intended to be compatible with
|
||||
// Java's time long. For example, it can be passed directly into the
|
||||
// Java call Date.setTime(long time).
|
||||
//
|
||||
// If the caller knows the SHA1 of the entire large file being
|
||||
// uploaded, Backblaze recommends using large_file_sha1 as the name,
|
||||
// and a 40 byte hex string representing the SHA1.
|
||||
//
|
||||
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
||||
type StartLargeFileRequest struct {
|
||||
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
|
||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||
}
|
||||
|
||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||
type StartLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||
type GetUploadPartURLRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLResponse is received from b2_get_upload_url
|
||||
type GetUploadPartURLResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_part.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_part.
|
||||
}
|
||||
|
||||
// UploadPartResponse is the response to b2_upload_part
|
||||
type UploadPartResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
}
|
||||
|
||||
// FinishLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a FileInfo object (with extra AccountID and BucketID fields which we ignore).
|
||||
//
|
||||
// Large files do not have a SHA1 checksum. The value will always be "none".
|
||||
type FinishLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
SHA1s []string `json:"partSha1Array"` // A JSON array of hex SHA1 checksums of the parts of the large file. This is a double-check that the right parts were uploaded in the right order, and that none were missed. Note that the part numbers start at 1, and the SHA1 of the part 1 is the first string in the array, at index 0.
|
||||
}
|
||||
|
||||
// CancelLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a CancelLargeFileResponse
|
||||
type CancelLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// CancelLargeFileResponse is the response to CancelLargeFileRequest
|
||||
type CancelLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
package api_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/b2/api"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyT api.Timestamp
|
||||
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
||||
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
|
||||
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
||||
)
|
||||
|
||||
func TestTimestampMarshalJSON(t *testing.T) {
|
||||
resB, err := t0.MarshalJSON()
|
||||
res := string(resB)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "3661123", res)
|
||||
|
||||
resB, err = t1.MarshalJSON()
|
||||
res = string(resB)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "981173106123", res)
|
||||
}
|
||||
|
||||
func TestTimestampUnmarshalJSON(t *testing.T) {
|
||||
var tActual api.Timestamp
|
||||
err := tActual.UnmarshalJSON([]byte("981173106123"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
||||
}
|
||||
|
||||
func TestTimestampAddVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
t api.Timestamp
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
|
||||
{t1, "potato", "potato-v2001-02-03-040506-123"},
|
||||
{t1, "", "-v2001-02-03-040506-123"},
|
||||
} {
|
||||
actual := test.t.AddVersion(test.in)
|
||||
assert.Equal(t, test.expected, actual, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampRemoveVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedT api.Timestamp
|
||||
expectedRemote string
|
||||
}{
|
||||
{"potato.txt", emptyT, "potato.txt"},
|
||||
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
|
||||
{"potato-v2001-02-03-040506-123", t1, "potato"},
|
||||
{"-v2001-02-03-040506-123", t1, ""},
|
||||
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
|
||||
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
|
||||
} {
|
||||
actualT, actualRemote := api.RemoveVersion(test.in)
|
||||
assert.Equal(t, test.expectedT, actualT, test.in)
|
||||
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampIsZero(t *testing.T) {
|
||||
assert.True(t, emptyT.IsZero())
|
||||
assert.False(t, t0.IsZero())
|
||||
assert.False(t, t1.IsZero())
|
||||
}
|
||||
|
||||
func TestTimestampEqual(t *testing.T) {
|
||||
assert.False(t, emptyT.Equal(emptyT))
|
||||
assert.False(t, t0.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(t0))
|
||||
assert.False(t, t0.Equal(t1))
|
||||
assert.False(t, t1.Equal(t0))
|
||||
assert.True(t, t0.Equal(t0))
|
||||
assert.True(t, t1.Equal(t1))
|
||||
}
|
||||
@@ -1,63 +0,0 @@
|
||||
// Test B2 filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package b2_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/b2"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*b2.Object)(nil))
|
||||
fstests.RemoteName = "TestB2:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
302
b2/upload.go
302
b2/upload.go
@@ -1,302 +0,0 @@
|
||||
// Upload large files for b2
|
||||
//
|
||||
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// largeUpload is used to control the upload of large files which need chunking
|
||||
type largeUpload struct {
|
||||
f *Fs // parent Fs
|
||||
o *Object // object being uploaded
|
||||
in io.Reader // read the data from here
|
||||
id string // ID of the file being uploaded
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts
|
||||
sha1s []string // slice of SHA1s for each part
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
size := src.Size()
|
||||
parts := size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
}
|
||||
modTime := src.ModTime()
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: o.fs.root + remote,
|
||||
ContentType: fs.MimeType(src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
},
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if calculatedSha1, err := src.Hash(fs.HashSHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up = &largeUpload{
|
||||
f: f,
|
||||
o: o,
|
||||
in: in,
|
||||
id: response.ID,
|
||||
size: size,
|
||||
parts: parts,
|
||||
sha1s: make([]string, parts),
|
||||
}
|
||||
return up, nil
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
|
||||
up.uploadMu.Lock()
|
||||
defer up.uploadMu.Unlock()
|
||||
if len(up.uploads) == 0 {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_get_upload_part_url",
|
||||
}
|
||||
var request = api.GetUploadPartURLRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
}
|
||||
} else {
|
||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||
}
|
||||
return upload, nil
|
||||
}
|
||||
|
||||
// returnUploadURL returns the UploadURL to the cache
|
||||
func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
||||
if upload == nil {
|
||||
return
|
||||
}
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = append(up.uploads, upload)
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// clearUploadURL clears the current UploadURL and the AuthorizationToken
|
||||
func (up *largeUpload) clearUploadURL() {
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = nil
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
calculatedSHA1 := fmt.Sprintf("%x", sha1.Sum(body))
|
||||
up.sha1s[part-1] = calculatedSHA1
|
||||
size := int64(len(body))
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Authorization
|
||||
//
|
||||
// An upload authorization token, from b2_get_upload_part_url.
|
||||
//
|
||||
// X-Bz-Part-Number
|
||||
//
|
||||
// A number from 1 to 10000. The parts uploaded for one file
|
||||
// must have contiguous numbers, starting with 1.
|
||||
//
|
||||
// Content-Length
|
||||
//
|
||||
// The number of bytes in the file being uploaded. Note that
|
||||
// this header is required; you cannot leave it out and just
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100MB.
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
//
|
||||
// The SHA1 checksum of the this part of the file. B2 will
|
||||
// check this when the part is uploaded, to make sure that the
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// passed to b2_finish_large_file.
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Absolute: true,
|
||||
Path: upload.UploadURL,
|
||||
Body: fs.AccountPart(up.o, bytes.NewBuffer(body)),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||
sha1Header: calculatedSHA1,
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
|
||||
var response api.UploadPartResponse
|
||||
|
||||
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(resp, err)
|
||||
// On retryable error clear PartUploadURL
|
||||
if retry {
|
||||
fs.Debugf(up.o, "Clearing part upload URL because of error: %v", err)
|
||||
upload = nil
|
||||
}
|
||||
up.returnUploadURL(upload)
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
} else {
|
||||
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_finish_large_file",
|
||||
}
|
||||
var request = api.FinishLargeFileRequest{
|
||||
ID: up.id,
|
||||
SHA1s: up.sha1s,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_cancel_large_file",
|
||||
}
|
||||
var request = api.CancelLargeFileRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
var response api.CancelLargeFileResponse
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload() error {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||
remaining := up.size
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
var err error
|
||||
fs.AccountByPart(up.o) // Cancel whole file accounting before reading
|
||||
outer:
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()[:reqSize]
|
||||
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
wg.Add(1)
|
||||
go func(part int64, buf []byte) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(part, buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}(part, buf)
|
||||
|
||||
remaining -= reqSize
|
||||
}
|
||||
wg.Wait()
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
||||
cancelErr := up.cancel()
|
||||
if cancelErr != nil {
|
||||
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
// Check any errors
|
||||
fs.Debugf(up.o, "Finishing large file upload")
|
||||
return up.finish()
|
||||
}
|
||||
45
backend/alias/alias.go
Normal file
45
backend/alias/alias.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package alias
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "alias",
|
||||
Description: "Alias for a existing remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
remote := config.FileGet(name, "remote")
|
||||
if remote == "" {
|
||||
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
|
||||
}
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = filepath.ToSlash(root)
|
||||
return fsInfo.NewFs(configName, path.Join(fsPath, root))
|
||||
}
|
||||
104
backend/alias/alias_internal_test.go
Normal file
104
backend/alias/alias_internal_test.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package alias
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/local" // pull in test backend
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
remoteName = "TestAlias"
|
||||
)
|
||||
|
||||
func prepare(t *testing.T, root string) {
|
||||
config.LoadConfig()
|
||||
|
||||
// Configure the remote
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
config.FileSet(remoteName, "remote", root)
|
||||
}
|
||||
|
||||
func TestNewFS(t *testing.T) {
|
||||
type testEntry struct {
|
||||
remote string
|
||||
size int64
|
||||
isDir bool
|
||||
}
|
||||
for testi, test := range []struct {
|
||||
remoteRoot string
|
||||
fsRoot string
|
||||
fsList string
|
||||
wantOK bool
|
||||
entries []testEntry
|
||||
}{
|
||||
{"", "", "", true, []testEntry{
|
||||
{"four", -1, true},
|
||||
{"one%.txt", 6, false},
|
||||
{"three", -1, true},
|
||||
{"two.html", 7, false},
|
||||
}},
|
||||
{"", "four", "", true, []testEntry{
|
||||
{"five", -1, true},
|
||||
{"under four.txt", 9, false},
|
||||
}},
|
||||
{"", "", "four", true, []testEntry{
|
||||
{"four/five", -1, true},
|
||||
{"four/under four.txt", 9, false},
|
||||
}},
|
||||
{"four", "..", "", true, []testEntry{
|
||||
{"four", -1, true},
|
||||
{"one%.txt", 6, false},
|
||||
{"three", -1, true},
|
||||
{"two.html", 7, false},
|
||||
}},
|
||||
{"four", "../three", "", true, []testEntry{
|
||||
{"underthree.txt", 9, false},
|
||||
}},
|
||||
} {
|
||||
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
|
||||
|
||||
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
|
||||
require.NoError(t, err, what)
|
||||
prepare(t, remoteRoot)
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||
require.NoError(t, err, what)
|
||||
gotEntries, err := f.List(test.fsList)
|
||||
require.NoError(t, err, what)
|
||||
|
||||
sort.Sort(gotEntries)
|
||||
|
||||
require.Equal(t, len(test.entries), len(gotEntries), what)
|
||||
for i, gotEntry := range gotEntries {
|
||||
what := fmt.Sprintf("%s, entry=%d", what, i)
|
||||
wantEntry := test.entries[i]
|
||||
|
||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||
require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
|
||||
_, isDir := gotEntry.(fs.Directory)
|
||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFSNoRemote(t *testing.T) {
|
||||
prepare(t, "")
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, f)
|
||||
}
|
||||
|
||||
func TestNewFSInvalidRemote(t *testing.T) {
|
||||
prepare(t, "not_existing_test_remote:")
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, f)
|
||||
}
|
||||
1
backend/alias/test/files/four/five/underfive.txt
Normal file
1
backend/alias/test/files/four/five/underfive.txt
Normal file
@@ -0,0 +1 @@
|
||||
apple
|
||||
1
backend/alias/test/files/four/under four.txt
Normal file
1
backend/alias/test/files/four/under four.txt
Normal file
@@ -0,0 +1 @@
|
||||
beetroot
|
||||
1
backend/alias/test/files/one%.txt
Normal file
1
backend/alias/test/files/one%.txt
Normal file
@@ -0,0 +1 @@
|
||||
hello
|
||||
1
backend/alias/test/files/three/underthree.txt
Normal file
1
backend/alias/test/files/three/underthree.txt
Normal file
@@ -0,0 +1 @@
|
||||
rutabaga
|
||||
1
backend/alias/test/files/two.html
Normal file
1
backend/alias/test/files/two.html
Normal file
@@ -0,0 +1 @@
|
||||
potato
|
||||
29
backend/all/all.go
Normal file
29
backend/all/all.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/backend/alias"
|
||||
_ "github.com/ncw/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/backend/azureblob"
|
||||
_ "github.com/ncw/rclone/backend/b2"
|
||||
_ "github.com/ncw/rclone/backend/box"
|
||||
_ "github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
_ "github.com/ncw/rclone/backend/dropbox"
|
||||
_ "github.com/ncw/rclone/backend/ftp"
|
||||
_ "github.com/ncw/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/backend/http"
|
||||
_ "github.com/ncw/rclone/backend/hubic"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/mega"
|
||||
_ "github.com/ncw/rclone/backend/onedrive"
|
||||
_ "github.com/ncw/rclone/backend/opendrive"
|
||||
_ "github.com/ncw/rclone/backend/pcloud"
|
||||
_ "github.com/ncw/rclone/backend/qingstor"
|
||||
_ "github.com/ncw/rclone/backend/s3"
|
||||
_ "github.com/ncw/rclone/backend/sftp"
|
||||
_ "github.com/ncw/rclone/backend/swift"
|
||||
_ "github.com/ncw/rclone/backend/webdav"
|
||||
_ "github.com/ncw/rclone/backend/yandex"
|
||||
)
|
||||
@@ -3,7 +3,6 @@
|
||||
package amazonclouddrive
|
||||
|
||||
/*
|
||||
|
||||
FIXME make searching for directory in id and file in id more efficient
|
||||
- use the name: search parameter - remember the escaping rules
|
||||
- use Folder GetNode and GetFile
|
||||
@@ -19,37 +18,38 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/dircache"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/ncw/rclone/pacer"
|
||||
"github.com/ncw/rclone/rest"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "amzn1.application-oa2-client.6bf18d2d1f5b485c94c8988bb03ad0e7"
|
||||
rcloneEncryptedClientSecret = "ZP12wYlGw198FtmqfOxyNAGXU3fwVcQdmt--ba1d00wJnUs0LOzvVyXVDbqhbcUqnr5Vd1QejwWmiv1Ep7UJG1kUQeuBP5n9goXWd5MrAf0"
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
assetKind = "ASSET"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
warnFileSize = 50000 << 20 // Display warning for files larger than this size
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
warnFileSize = 50000 << 20 // Display warning for files larger than this size
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
||||
uploadWaitPerGB = fs.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
|
||||
uploadWaitPerGB = flags.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
|
||||
// Description of how to auth for this app
|
||||
acdConfig = &oauth2.Config{
|
||||
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
|
||||
@@ -57,8 +57,8 @@ var (
|
||||
AuthURL: "https://www.amazon.com/ap/oa",
|
||||
TokenURL: "https://api.amazon.com/auth/o2/token",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
|
||||
ClientID: "",
|
||||
ClientSecret: "",
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
)
|
||||
@@ -76,14 +76,20 @@ func init() {
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigClientID,
|
||||
Help: "Amazon Application Client Id - leave blank normally.",
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Amazon Application Client Id - required.",
|
||||
}, {
|
||||
Name: fs.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret - leave blank normally.",
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret - required.",
|
||||
}, {
|
||||
Name: config.ConfigAuthURL,
|
||||
Help: "Auth server URL - leave blank to use Amazon's.",
|
||||
}, {
|
||||
Name: config.ConfigTokenURL,
|
||||
Help: "Token server url - leave blank to use Amazon's.",
|
||||
}},
|
||||
})
|
||||
fs.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
|
||||
flags.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
|
||||
}
|
||||
|
||||
// Fs represents a remote acd server
|
||||
@@ -130,9 +136,6 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a acd path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an acd 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
@@ -168,13 +171,37 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// If query parameters contain X-Amz-Algorithm remove Authorization header
|
||||
//
|
||||
// This happens when ACD redirects to S3 for the download. The oauth
|
||||
// transport puts an Authorization header in which we need to remove
|
||||
// otherwise we get this message from AWS
|
||||
//
|
||||
// Only one auth mechanism allowed; only the X-Amz-Algorithm query
|
||||
// parameter, Signature query string parameter or the Authorization
|
||||
// header should be specified
|
||||
func filterRequest(req *http.Request) {
|
||||
if req.URL.Query().Get("X-Amz-Algorithm") != "" {
|
||||
fs.Debugf(nil, "Removing Authorization: header after redirect to S3")
|
||||
req.Header.Del("Authorization")
|
||||
}
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
oAuthClient, ts, err := oauthutil.NewClient(name, acdConfig)
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
SetRequestFilter(f func(req *http.Request))
|
||||
}); ok {
|
||||
do.SetRequestFilter(filterRequest)
|
||||
} else {
|
||||
fs.Debugf(name+":", "Couldn't add request filter - large file downloads will fail")
|
||||
}
|
||||
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, acdConfig, baseClient)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Amazon Drive: %v", err)
|
||||
}
|
||||
@@ -185,9 +212,19 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
root: root,
|
||||
c: c,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||
noAuthClient: fs.Config.Client(),
|
||||
noAuthClient: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{CaseInsensitive: true, ReadMimeType: true}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.getRootInfo()
|
||||
return err
|
||||
})
|
||||
|
||||
// Update endpoints
|
||||
var resp *http.Response
|
||||
@@ -206,12 +243,6 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
f.trueRootID = *rootInfo.Id
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.getRootInfo()
|
||||
return err
|
||||
})
|
||||
|
||||
f.dirCache = dircache.New(root, f.trueRootID, f)
|
||||
|
||||
// Find the current root
|
||||
@@ -395,45 +426,54 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
||||
return
|
||||
}
|
||||
|
||||
// ListDir reads the directory specified by the job into out, returning any more jobs
|
||||
func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.ListDirJob, err error) {
|
||||
fs.Debugf(f, "Reading %q", job.Path)
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maxTries := fs.Config.LowLevelRetries
|
||||
var iErr error
|
||||
for tries := 1; tries <= maxTries; tries++ {
|
||||
_, err = f.listAll(job.DirID, "", false, false, func(node *acd.Node) bool {
|
||||
remote := job.Path + *node.Name
|
||||
entries = nil
|
||||
_, err = f.listAll(directoryID, "", false, false, func(node *acd.Node) bool {
|
||||
remote := path.Join(dir, *node.Name)
|
||||
switch *node.Kind {
|
||||
case folderKind:
|
||||
if out.IncludeDirectory(remote) {
|
||||
dir := &fs.Dir{
|
||||
Name: remote,
|
||||
Bytes: -1,
|
||||
Count: -1,
|
||||
}
|
||||
dir.When, _ = time.Parse(timeFormat, *node.ModifiedDate) // FIXME
|
||||
if out.AddDir(dir) {
|
||||
return true
|
||||
}
|
||||
if job.Depth > 0 {
|
||||
jobs = append(jobs, dircache.ListDirJob{DirID: *node.Id, Path: remote + "/", Depth: job.Depth - 1})
|
||||
}
|
||||
}
|
||||
// cache the directory ID for later lookups
|
||||
f.dirCache.Put(remote, *node.Id)
|
||||
when, _ := time.Parse(timeFormat, *node.ModifiedDate) // FIXME
|
||||
d := fs.NewDir(remote, when).SetID(*node.Id)
|
||||
entries = append(entries, d)
|
||||
case fileKind:
|
||||
o, err := f.newObjectWithInfo(remote, node)
|
||||
if err != nil {
|
||||
out.SetError(err)
|
||||
return true
|
||||
}
|
||||
if out.Add(o) {
|
||||
iErr = err
|
||||
return true
|
||||
}
|
||||
entries = append(entries, o)
|
||||
default:
|
||||
// ignore ASSET etc
|
||||
}
|
||||
return false
|
||||
})
|
||||
if fs.IsRetryError(err) {
|
||||
fs.Debugf(f, "Directory listing error for %q: %v - low level retry %d/%d", job.Path, err, tries, maxTries)
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
}
|
||||
if fserrors.IsRetryError(err) {
|
||||
fs.Debugf(f, "Directory listing error for %q: %v - low level retry %d/%d", dir, err, tries, maxTries)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
@@ -441,13 +481,7 @@ func (f *Fs) ListDir(out fs.ListOpts, job dircache.ListDirJob) (jobs []dircache.
|
||||
}
|
||||
break
|
||||
}
|
||||
fs.Debugf(f, "Finished reading %q", job.Path)
|
||||
return jobs, err
|
||||
}
|
||||
|
||||
// List walks the path returning iles and directories into out
|
||||
func (f *Fs) List(out fs.ListOpts, dir string) {
|
||||
f.dirCache.List(f, out, dir)
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// checkUpload checks to see if an error occurred after the file was
|
||||
@@ -534,7 +568,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
size := src.Size()
|
||||
// Temporary Object under construction
|
||||
@@ -546,7 +580,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
|
||||
err := o.readMetaData()
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(in, src)
|
||||
return o, o.Update(in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
default:
|
||||
@@ -841,8 +875,8 @@ func (f *Fs) Precision() time.Duration {
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() fs.HashSet {
|
||||
return fs.HashSet(fs.HashMD5)
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
@@ -898,9 +932,9 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t fs.HashType) (string, error) {
|
||||
if t != fs.HashMD5 {
|
||||
return "", fs.ErrHashUnsupported
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if o.info.ContentProperties != nil && o.info.ContentProperties.Md5 != nil {
|
||||
return *o.info.ContentProperties.Md5, nil
|
||||
@@ -983,7 +1017,7 @@ func (o *Object) Storable() bool {
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
bigObject := o.Size() >= int64(tempLinkThreshold)
|
||||
if bigObject {
|
||||
fs.Debugf(o, "Dowloading large object via tempLink")
|
||||
fs.Debugf(o, "Downloading large object via tempLink")
|
||||
}
|
||||
file := acd.File{Node: o.info}
|
||||
var resp *http.Response
|
||||
@@ -1002,7 +1036,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) error {
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
file := acd.File{Node: o.info}
|
||||
var info *acd.File
|
||||
var resp *http.Response
|
||||
@@ -1167,6 +1201,128 @@ func (o *Object) MimeType() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
// If the implementation uses polling, it should adhere to the given interval.
|
||||
//
|
||||
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
checkpoint := config.FileGet(f.name, "checkpoint")
|
||||
|
||||
quit := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
|
||||
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
|
||||
fs.Debugf(f, "Unable to save checkpoint: %v", err)
|
||||
}
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
case <-time.After(pollInterval):
|
||||
}
|
||||
}
|
||||
}()
|
||||
return quit
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
|
||||
var err error
|
||||
var resp *http.Response
|
||||
var reachedEnd bool
|
||||
var csCount int
|
||||
var nodeCount int
|
||||
|
||||
fs.Debugf(f, "Checking for changes on remote (Checkpoint %q)", checkpoint)
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = f.c.Changes.GetChangesFunc(&acd.ChangesOptions{
|
||||
Checkpoint: checkpoint,
|
||||
IncludePurged: true,
|
||||
}, func(changeSet *acd.ChangeSet, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
type entryType struct {
|
||||
path string
|
||||
entryType fs.EntryType
|
||||
}
|
||||
var pathsToClear []entryType
|
||||
csCount++
|
||||
nodeCount += len(changeSet.Nodes)
|
||||
if changeSet.End {
|
||||
reachedEnd = true
|
||||
}
|
||||
if changeSet.Checkpoint != "" {
|
||||
checkpoint = changeSet.Checkpoint
|
||||
}
|
||||
for _, node := range changeSet.Nodes {
|
||||
if path, ok := f.dirCache.GetInv(*node.Id); ok {
|
||||
if node.IsFile() {
|
||||
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
|
||||
} else {
|
||||
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryDirectory})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if node.IsFile() {
|
||||
// translate the parent dir of this object
|
||||
if len(node.Parents) > 0 {
|
||||
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
|
||||
// and append the drive file name to compute the full file name
|
||||
if len(path) > 0 {
|
||||
path = path + "/" + *node.Name
|
||||
} else {
|
||||
path = *node.Name
|
||||
}
|
||||
// this will now clear the actual file too
|
||||
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
|
||||
}
|
||||
} else { // a true root object that is changed
|
||||
pathsToClear = append(pathsToClear, entryType{path: *node.Name, entryType: fs.EntryObject})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
visitedPaths := make(map[string]bool)
|
||||
for _, entry := range pathsToClear {
|
||||
if _, ok := visitedPaths[entry.path]; ok {
|
||||
continue
|
||||
}
|
||||
visitedPaths[entry.path] = true
|
||||
notifyFunc(entry.path, entry.entryType)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
return false, err
|
||||
})
|
||||
fs.Debugf(f, "Got %d ChangeSets with %d Nodes", csCount, nodeCount)
|
||||
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
fs.Debugf(f, "Failed to get Changes: %v", err)
|
||||
return checkpoint
|
||||
}
|
||||
|
||||
if reachedEnd {
|
||||
reachedEnd = false
|
||||
fs.Debugf(f, "All changes were processed. Waiting for more.")
|
||||
} else if checkpoint == "" {
|
||||
fs.Debugf(f, "Did not get any checkpoint, something went wrong! %+v", resp)
|
||||
}
|
||||
return checkpoint
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
if o.info.Id == nil {
|
||||
return ""
|
||||
}
|
||||
return *o.info.Id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1175,6 +1331,8 @@ var (
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
20
backend/amazonclouddrive/amazonclouddrive_test.go
Normal file
20
backend/amazonclouddrive/amazonclouddrive_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
|
||||
// +build acd
|
||||
|
||||
package amazonclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/amazonclouddrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
fstests.Run(t)
|
||||
}
|
||||
1152
backend/azureblob/azureblob.go
Normal file
1152
backend/azureblob/azureblob.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/azureblob/azureblob_test.go
Normal file
17
backend/azureblob/azureblob_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
package azureblob_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/azureblob"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*azureblob.Object)(nil),
|
||||
})
|
||||
}
|
||||
301
backend/b2/api/types.go
Normal file
301
backend/b2/api/types.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
// Error describes a B2 error response
|
||||
type Error struct {
|
||||
Status int `json:"status"` // The numeric HTTP status code. Always matches the status in the HTTP response.
|
||||
Code string `json:"code"` // A single-identifier code that identifies the error.
|
||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||
}
|
||||
|
||||
// Fatal statisfies the Fatal interface
|
||||
//
|
||||
// It indicates which errors should be treated as fatal
|
||||
func (e *Error) Fatal() bool {
|
||||
return e.Status == 403 // 403 errors shouldn't be retried
|
||||
}
|
||||
|
||||
var _ fserrors.Fataler = (*Error)(nil)
|
||||
|
||||
// Account describes a B2 account
|
||||
type Account struct {
|
||||
ID string `json:"accountId"` // The identifier for the account.
|
||||
}
|
||||
|
||||
// Bucket describes a B2 bucket
|
||||
type Bucket struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
|
||||
// fits in a 64 bit integer such as the type "long" in the programming
|
||||
// language Java. It is intended to be compatible with Java's time
|
||||
// long. For example, it can be passed directly into the java call
|
||||
// Date.setTime(long time).
|
||||
type Timestamp time.Time
|
||||
|
||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
timestamp := (*time.Time)(t).UTC().UnixNano()
|
||||
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Timestamp
|
||||
func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
timestamp, err := strconv.ParseInt(string(data), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
|
||||
return nil
|
||||
}
|
||||
|
||||
const versionFormat = "-v2006-01-02-150405.000"
|
||||
|
||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := (time.Time)(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
}
|
||||
|
||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||
//
|
||||
// It returns the new file name and a timestamp, or the old filename
|
||||
// and a zero timestamp.
|
||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
newRemote = remote
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
if len(base) < len(versionFormat) {
|
||||
return
|
||||
}
|
||||
versionStart := len(base) - len(versionFormat)
|
||||
// Check it ends in -xxx
|
||||
if base[len(base)-4] != '-' {
|
||||
return
|
||||
}
|
||||
// Replace with .xxx for parsing
|
||||
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is unitialised
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return (time.Time)(t).IsZero()
|
||||
}
|
||||
|
||||
// Equal compares two timestamps
|
||||
//
|
||||
// If either are !IsZero then it returns false
|
||||
func (t Timestamp) Equal(s Timestamp) bool {
|
||||
if (time.Time)(t).IsZero() {
|
||||
return false
|
||||
}
|
||||
if (time.Time)(s).IsZero() {
|
||||
return false
|
||||
}
|
||||
return (time.Time)(t).Equal((time.Time)(s))
|
||||
}
|
||||
|
||||
// File is info about a file
|
||||
type File struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
Size int64 `json:"size"` // The number of bytes in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
}
|
||||
|
||||
// ListBucketsResponse is as returned from the b2_list_buckets call
|
||||
type ListBucketsResponse struct {
|
||||
Buckets []Bucket `json:"buckets"`
|
||||
}
|
||||
|
||||
// ListFileNamesRequest is as passed to b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesRequest struct {
|
||||
BucketID string `json:"bucketId"` // required - The bucket to look for file names in.
|
||||
StartFileName string `json:"startFileName,omitempty"` // optional - The first file name to return. If there is a file with this name, it will be returned in the list. If not, the first file name after this the first one after this name.
|
||||
MaxFileCount int `json:"maxFileCount,omitempty"` // optional - The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 1000.
|
||||
StartFileID string `json:"startFileId,omitempty"` // optional - What to pass in to startFileId for the next search to continue where this one left off.
|
||||
Prefix string `json:"prefix,omitempty"` // optional - Files returned will be limited to those with the given prefix. Defaults to the empty string, which matches all files.
|
||||
Delimiter string `json:"delimiter,omitempty"` // Files returned will be limited to those within the top folder, or any one subfolder. Defaults to NULL. Folder names will also be returned. The delimiter character will be used to "break" file names into folders.
|
||||
}
|
||||
|
||||
// ListFileNamesResponse is as received from b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesResponse struct {
|
||||
Files []File `json:"files"` // An array of objects, each one describing one file.
|
||||
NextFileName *string `json:"nextFileName"` // What to pass in to startFileName for the next search to continue where this one left off, or null if there are no more files.
|
||||
NextFileID *string `json:"nextFileId"` // What to pass in to startFileId for the next search to continue where this one left off, or null if there are no more files.
|
||||
}
|
||||
|
||||
// GetUploadURLRequest is passed to b2_get_upload_url
|
||||
type GetUploadURLRequest struct {
|
||||
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
|
||||
}
|
||||
|
||||
// GetUploadURLResponse is received from b2_get_upload_url
|
||||
type GetUploadURLResponse struct {
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_file.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
|
||||
}
|
||||
|
||||
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
|
||||
type FileInfo struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
AccountID string `json:"accountId"` // Your account ID.
|
||||
BucketID string `json:"bucketId"` // The bucket that the file is in.
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// CreateBucketRequest is used to create a bucket
|
||||
type CreateBucketRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// DeleteBucketRequest is used to create a bucket
|
||||
type DeleteBucketRequest struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
}
|
||||
|
||||
// DeleteFileRequest is used to delete a file version
|
||||
type DeleteFileRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
}
|
||||
|
||||
// HideFileRequest is used to delete a file
|
||||
type HideFileRequest struct {
|
||||
BucketID string `json:"bucketId"` // The bucket containing the file to hide.
|
||||
Name string `json:"fileName"` // The name of the file to hide.
|
||||
}
|
||||
|
||||
// GetFileInfoRequest is used to return a FileInfo struct with b2_get_file_info
|
||||
type GetFileInfoRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
}
|
||||
|
||||
// StartLargeFileRequest (b2_start_large_file) Prepares for uploading the parts of a large file.
|
||||
//
|
||||
// If the original source of the file being uploaded has a last
|
||||
// modified time concept, Backblaze recommends using
|
||||
// src_last_modified_millis as the name, and a string holding the base
|
||||
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||
// programming language Java. It is intended to be compatible with
|
||||
// Java's time long. For example, it can be passed directly into the
|
||||
// Java call Date.setTime(long time).
|
||||
//
|
||||
// If the caller knows the SHA1 of the entire large file being
|
||||
// uploaded, Backblaze recommends using large_file_sha1 as the name,
|
||||
// and a 40 byte hex string representing the SHA1.
|
||||
//
|
||||
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
||||
type StartLargeFileRequest struct {
|
||||
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
|
||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||
}
|
||||
|
||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||
type StartLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||
type GetUploadPartURLRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLResponse is received from b2_get_upload_url
|
||||
type GetUploadPartURLResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_part.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_part.
|
||||
}
|
||||
|
||||
// UploadPartResponse is the response to b2_upload_part
|
||||
type UploadPartResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
}
|
||||
|
||||
// FinishLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a FileInfo object (with extra AccountID and BucketID fields which we ignore).
|
||||
//
|
||||
// Large files do not have a SHA1 checksum. The value will always be "none".
|
||||
type FinishLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
SHA1s []string `json:"partSha1Array"` // A JSON array of hex SHA1 checksums of the parts of the large file. This is a double-check that the right parts were uploaded in the right order, and that none were missed. Note that the part numbers start at 1, and the SHA1 of the part 1 is the first string in the array, at index 0.
|
||||
}
|
||||
|
||||
// CancelLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a CancelLargeFileResponse
|
||||
type CancelLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// CancelLargeFileResponse is the response to CancelLargeFileRequest
|
||||
type CancelLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
}
|
||||
87
backend/b2/api/types_test.go
Normal file
87
backend/b2/api/types_test.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package api_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
emptyT api.Timestamp
|
||||
t0 = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123456789Z"))
|
||||
t0r = api.Timestamp(fstest.Time("1970-01-01T01:01:01.123000000Z"))
|
||||
t1 = api.Timestamp(fstest.Time("2001-02-03T04:05:06.123000000Z"))
|
||||
)
|
||||
|
||||
func TestTimestampMarshalJSON(t *testing.T) {
|
||||
resB, err := t0.MarshalJSON()
|
||||
res := string(resB)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "3661123", res)
|
||||
|
||||
resB, err = t1.MarshalJSON()
|
||||
res = string(resB)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "981173106123", res)
|
||||
}
|
||||
|
||||
func TestTimestampUnmarshalJSON(t *testing.T) {
|
||||
var tActual api.Timestamp
|
||||
err := tActual.UnmarshalJSON([]byte("981173106123"))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, (time.Time)(t1), (time.Time)(tActual))
|
||||
}
|
||||
|
||||
func TestTimestampAddVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
t api.Timestamp
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{t0, "potato.txt", "potato-v1970-01-01-010101-123.txt"},
|
||||
{t1, "potato", "potato-v2001-02-03-040506-123"},
|
||||
{t1, "", "-v2001-02-03-040506-123"},
|
||||
} {
|
||||
actual := test.t.AddVersion(test.in)
|
||||
assert.Equal(t, test.expected, actual, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampRemoveVersion(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedT api.Timestamp
|
||||
expectedRemote string
|
||||
}{
|
||||
{"potato.txt", emptyT, "potato.txt"},
|
||||
{"potato-v1970-01-01-010101-123.txt", t0r, "potato.txt"},
|
||||
{"potato-v2001-02-03-040506-123", t1, "potato"},
|
||||
{"-v2001-02-03-040506-123", t1, ""},
|
||||
{"potato-v2A01-02-03-040506-123", emptyT, "potato-v2A01-02-03-040506-123"},
|
||||
{"potato-v2001-02-03-040506=123", emptyT, "potato-v2001-02-03-040506=123"},
|
||||
} {
|
||||
actualT, actualRemote := api.RemoveVersion(test.in)
|
||||
assert.Equal(t, test.expectedT, actualT, test.in)
|
||||
assert.Equal(t, test.expectedRemote, actualRemote, test.in)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimestampIsZero(t *testing.T) {
|
||||
assert.True(t, emptyT.IsZero())
|
||||
assert.False(t, t0.IsZero())
|
||||
assert.False(t, t1.IsZero())
|
||||
}
|
||||
|
||||
func TestTimestampEqual(t *testing.T) {
|
||||
assert.False(t, emptyT.Equal(emptyT))
|
||||
assert.False(t, t0.Equal(emptyT))
|
||||
assert.False(t, emptyT.Equal(t0))
|
||||
assert.False(t, t0.Equal(t1))
|
||||
assert.False(t, t1.Equal(t0))
|
||||
assert.True(t, t0.Equal(t0))
|
||||
assert.True(t, t1.Equal(t1))
|
||||
}
|
||||
@@ -5,14 +5,13 @@ package b2
|
||||
// checking SHA1s?
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"hash"
|
||||
gohash "hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
@@ -20,10 +19,17 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/b2/api"
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/pacer"
|
||||
"github.com/ncw/rclone/rest"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -46,11 +52,12 @@ const (
|
||||
|
||||
// Globals
|
||||
var (
|
||||
minChunkSize = fs.SizeSuffix(100E6)
|
||||
minChunkSize = fs.SizeSuffix(5E6)
|
||||
chunkSize = fs.SizeSuffix(96 * 1024 * 1024)
|
||||
uploadCutoff = fs.SizeSuffix(200E6)
|
||||
b2TestMode = fs.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
|
||||
b2Versions = fs.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
|
||||
b2TestMode = flags.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
|
||||
b2Versions = flags.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
|
||||
b2HardDelete = flags.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
)
|
||||
|
||||
@@ -72,8 +79,8 @@ func init() {
|
||||
},
|
||||
},
|
||||
})
|
||||
fs.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||
fs.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
|
||||
flags.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||
flags.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
@@ -86,6 +93,8 @@ type Fs struct {
|
||||
endpoint string // name of the starting api endpoint
|
||||
srv *rest.Client // the connection to the b2 server
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
bucketIDMutex sync.Mutex // mutex to protect _bucketID
|
||||
_bucketID string // the ID of the bucket we are working on
|
||||
info api.AuthorizeAccountResponse // result of authorize call
|
||||
@@ -184,7 +193,7 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
@@ -225,7 +234,7 @@ func errorHandler(resp *http.Response) error {
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
if uploadCutoff < chunkSize {
|
||||
return nil, errors.Errorf("b2: upload cutoff must be less than chunk size %v - was %v", chunkSize, uploadCutoff)
|
||||
return nil, errors.Errorf("b2: upload cutoff (%v) must be greater than or equal to chunk size (%v)", uploadCutoff, chunkSize)
|
||||
}
|
||||
if chunkSize < minChunkSize {
|
||||
return nil, errors.Errorf("b2: chunk size can't be less than %v - was %v", minChunkSize, chunkSize)
|
||||
@@ -234,15 +243,15 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
account := fs.ConfigFileGet(name, "account")
|
||||
account := config.FileGet(name, "account")
|
||||
if account == "" {
|
||||
return nil, errors.New("account not found")
|
||||
}
|
||||
key := fs.ConfigFileGet(name, "key")
|
||||
key := config.FileGet(name, "key")
|
||||
if key == "" {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
endpoint := fs.ConfigFileGet(name, "endpoint", defaultEndpoint)
|
||||
endpoint := config.FileGet(name, "endpoint", defaultEndpoint)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
@@ -250,11 +259,15 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
account: account,
|
||||
key: key,
|
||||
endpoint: endpoint,
|
||||
srv: rest.NewClient(fs.Config.Client()).SetErrorHandler(errorHandler),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
bufferTokens: make(chan []byte, fs.Config.Transfers),
|
||||
}
|
||||
f.features = (&fs.Features{ReadMimeType: true, WriteMimeType: true}).Fill(f)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
// Set the test flag if required
|
||||
if *b2TestMode != "" {
|
||||
testMode := strings.TrimSpace(*b2TestMode)
|
||||
@@ -301,9 +314,9 @@ func (f *Fs) authorizeAccount() error {
|
||||
f.authMu.Lock()
|
||||
defer f.authMu.Unlock()
|
||||
opts := rest.Opts{
|
||||
Absolute: true,
|
||||
Method: "GET",
|
||||
Path: f.endpoint + "/b2api/v1/b2_authorize_account",
|
||||
Path: "/b2api/v1/b2_authorize_account",
|
||||
RootURL: f.endpoint,
|
||||
UserName: f.account,
|
||||
Password: f.key,
|
||||
ExtraHeaders: map[string]string{"Authorization": ""}, // unset the Authorization for this request
|
||||
@@ -436,18 +449,14 @@ var errEndList = errors.New("end list")
|
||||
// than 1000)
|
||||
//
|
||||
// If hidden is set then it will list the hidden (deleted) files too.
|
||||
func (f *Fs) list(dir string, level int, prefix string, limit int, hidden bool, fn listFn) error {
|
||||
func (f *Fs) list(dir string, recurse bool, prefix string, limit int, hidden bool, fn listFn) error {
|
||||
root := f.root
|
||||
if dir != "" {
|
||||
root += dir + "/"
|
||||
}
|
||||
delimiter := ""
|
||||
switch level {
|
||||
case 1:
|
||||
if !recurse {
|
||||
delimiter = "/"
|
||||
case fs.MaxLevel:
|
||||
default:
|
||||
return fs.ErrorLevelNotSupported
|
||||
}
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
@@ -495,7 +504,7 @@ func (f *Fs) list(dir string, level int, prefix string, limit int, hidden bool,
|
||||
}
|
||||
remote := file.Name[len(f.root):]
|
||||
// Check for directory
|
||||
isDirectory := level != 0 && strings.HasSuffix(remote, "/")
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if isDirectory {
|
||||
remote = remote[:len(remote)-1]
|
||||
}
|
||||
@@ -520,77 +529,125 @@ func (f *Fs) list(dir string, level int, prefix string, limit int, hidden bool,
|
||||
return nil
|
||||
}
|
||||
|
||||
// listFiles walks the path returning files and directories to out
|
||||
func (f *Fs) listFiles(out fs.ListOpts, dir string) {
|
||||
defer out.Finished()
|
||||
// List the objects
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(remote string, object *api.File, isDirectory bool, last *string) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, time.Time{})
|
||||
return d, nil
|
||||
}
|
||||
if remote == *last {
|
||||
remote = object.UploadTimestamp.AddVersion(remote)
|
||||
} else {
|
||||
*last = remote
|
||||
}
|
||||
// hide objects represent deleted files which we don't list
|
||||
if object.Action == "hide" {
|
||||
return nil, nil
|
||||
}
|
||||
o, err := f.newObjectWithInfo(remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// mark the bucket as being OK
|
||||
func (f *Fs) markBucketOK() {
|
||||
if f.bucket != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
f.bucketOK = true
|
||||
f.bucketOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
last := ""
|
||||
err := f.list(dir, out.Level(), "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
dir := &fs.Dir{
|
||||
Name: remote,
|
||||
Bytes: -1,
|
||||
Count: -1,
|
||||
}
|
||||
if out.AddDir(dir) {
|
||||
return fs.ErrorListAborted
|
||||
}
|
||||
} else {
|
||||
if remote == last {
|
||||
remote = object.UploadTimestamp.AddVersion(remote)
|
||||
} else {
|
||||
last = remote
|
||||
}
|
||||
// hide objects represent deleted files which we don't list
|
||||
if object.Action == "hide" {
|
||||
return nil
|
||||
}
|
||||
o, err := f.newObjectWithInfo(remote, object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if out.Add(o) {
|
||||
return fs.ErrorListAborted
|
||||
}
|
||||
err = f.list(dir, false, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
out.SetError(err)
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// listBuckets returns all the buckets to out
|
||||
func (f *Fs) listBuckets(out fs.ListOpts, dir string) {
|
||||
defer out.Finished()
|
||||
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
out.SetError(fs.ErrorListOnlyRoot)
|
||||
return
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
err := f.listBucketsToFn(func(bucket *api.Bucket) error {
|
||||
dir := &fs.Dir{
|
||||
Name: bucket.Name,
|
||||
Bytes: -1,
|
||||
Count: -1,
|
||||
}
|
||||
if out.AddDir(dir) {
|
||||
return fs.ErrorListAborted
|
||||
}
|
||||
err = f.listBucketsToFn(func(bucket *api.Bucket) error {
|
||||
d := fs.NewDir(bucket.Name, time.Time{})
|
||||
entries = append(entries, d)
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
out.SetError(err)
|
||||
return nil, err
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// List walks the path returning files and directories to out
|
||||
func (f *Fs) List(out fs.ListOpts, dir string) {
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if f.bucket == "" {
|
||||
f.listBuckets(out, dir)
|
||||
} else {
|
||||
f.listFiles(out, dir)
|
||||
return f.listBuckets(dir)
|
||||
}
|
||||
return
|
||||
return f.listDir(dir)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.bucket == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
last := ""
|
||||
err = f.list(dir, true, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// listBucketFn is called from listBucketsToFn to handle a bucket
|
||||
@@ -660,19 +717,25 @@ func (f *Fs) clearBucketID() {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo) (fs.Object, error) {
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
fs := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return fs, fs.Update(in, src)
|
||||
return fs, fs.Update(in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
// Can't create subdirs
|
||||
if dir != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.bucketOK {
|
||||
return nil
|
||||
}
|
||||
opts := rest.Opts{
|
||||
@@ -697,6 +760,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
_, getBucketErr := f.getBucketID()
|
||||
if getBucketErr == nil {
|
||||
// found so it is our bucket
|
||||
f.bucketOK = true
|
||||
return nil
|
||||
}
|
||||
if getBucketErr != fs.ErrorDirNotFound {
|
||||
@@ -707,6 +771,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
return errors.Wrap(err, "failed to create bucket")
|
||||
}
|
||||
f.setBucketID(response.ID)
|
||||
f.bucketOK = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -714,6 +779,8 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
@@ -737,6 +804,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete bucket")
|
||||
}
|
||||
f.bucketOK = false
|
||||
f.clearBucketID()
|
||||
f.clearUploadURL()
|
||||
return nil
|
||||
@@ -747,6 +815,31 @@ func (f *Fs) Precision() time.Duration {
|
||||
return time.Millisecond
|
||||
}
|
||||
|
||||
// hide hides a file on the remote
|
||||
func (f *Fs) hide(Name string) error {
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_hide_file",
|
||||
}
|
||||
var request = api.HideFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: Name,
|
||||
}
|
||||
var response api.File
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to hide %q", Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteByID deletes a file version given Name and ID
|
||||
func (f *Fs) deleteByID(ID, Name string) error {
|
||||
opts := rest.Opts{
|
||||
@@ -795,16 +888,16 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for object := range toBeDeleted {
|
||||
fs.Stats.Checking(object.Name)
|
||||
accounting.Stats.Checking(object.Name)
|
||||
checkErr(f.deleteByID(object.ID, object.Name))
|
||||
fs.Stats.DoneChecking(object.Name)
|
||||
accounting.Stats.DoneChecking(object.Name)
|
||||
}
|
||||
}()
|
||||
}
|
||||
last := ""
|
||||
checkErr(f.list("", fs.MaxLevel, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
|
||||
checkErr(f.list("", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if !isDirectory {
|
||||
fs.Stats.Checking(remote)
|
||||
accounting.Stats.Checking(remote)
|
||||
if oldOnly && last != remote {
|
||||
if object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
@@ -817,7 +910,7 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
last = remote
|
||||
fs.Stats.DoneChecking(remote)
|
||||
accounting.Stats.DoneChecking(remote)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
@@ -841,8 +934,8 @@ func (f *Fs) CleanUp() error {
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() fs.HashSet {
|
||||
return fs.HashSet(fs.HashSHA1)
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -866,9 +959,9 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the Sha-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t fs.HashType) (string, error) {
|
||||
if t != fs.HashSHA1 {
|
||||
return "", fs.ErrHashUnsupported
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
if t != hash.SHA1 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if o.sha1 == "" {
|
||||
// Error is logged in readMetaData
|
||||
@@ -947,7 +1040,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
maxSearched = maxVersions
|
||||
}
|
||||
var info *api.File
|
||||
err = o.fs.list("", fs.MaxLevel, baseRemote, maxSearched, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
err = o.fs.list("", true, baseRemote, maxSearched, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
}
|
||||
@@ -1021,7 +1114,7 @@ type openFile struct {
|
||||
o *Object // Object we are reading for
|
||||
resp *http.Response // response of the GET
|
||||
body io.Reader // reading from here
|
||||
hash hash.Hash // currently accumulating SHA1
|
||||
hash gohash.Hash // currently accumulating SHA1
|
||||
bytes int64 // number of bytes read on this connection
|
||||
eof bool // whether we have read end of file
|
||||
}
|
||||
@@ -1066,7 +1159,7 @@ func (file *openFile) Close() (err error) {
|
||||
// Check the SHA1
|
||||
receivedSHA1 := file.o.sha1
|
||||
calculatedSHA1 := fmt.Sprintf("%x", file.hash.Sum(nil))
|
||||
if receivedSHA1 != calculatedSHA1 {
|
||||
if receivedSHA1 != "" && receivedSHA1 != calculatedSHA1 {
|
||||
return errors.Errorf("object corrupted on transfer - SHA1 mismatch (want %q got %q)", receivedSHA1, calculatedSHA1)
|
||||
}
|
||||
|
||||
@@ -1079,10 +1172,9 @@ var _ io.ReadCloser = &openFile{}
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Absolute: true,
|
||||
Path: o.fs.info.DownloadURL,
|
||||
Options: options,
|
||||
Method: "GET",
|
||||
RootURL: o.fs.info.DownloadURL,
|
||||
Options: options,
|
||||
}
|
||||
// Download by id if set otherwise by name
|
||||
if o.id != "" {
|
||||
@@ -1161,14 +1253,43 @@ func urlEncode(in string) string {
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo) (err error) {
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
if *b2Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
err = o.fs.Mkdir("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
size := src.Size()
|
||||
|
||||
// If a large file upload in chunks - see upload.go
|
||||
if size >= int64(uploadCutoff) {
|
||||
if size == -1 {
|
||||
// Check if the file is large enough for a chunked upload (needs to be at least two chunks)
|
||||
buf := o.fs.getUploadBlock()
|
||||
n, err := io.ReadFull(in, buf)
|
||||
if err == nil {
|
||||
bufReader := bufio.NewReader(in)
|
||||
in = bufReader
|
||||
_, err = bufReader.Peek(1)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
fs.Debugf(o, "File is big enough for chunked streaming")
|
||||
up, err := o.fs.newLargeUpload(o, in, src)
|
||||
if err != nil {
|
||||
o.fs.putUploadBlock(buf)
|
||||
return err
|
||||
}
|
||||
return up.Stream(buf)
|
||||
} else if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(o, "File has %d bytes, which makes only one chunk. Using direct upload.", n)
|
||||
defer o.fs.putUploadBlock(buf)
|
||||
size = int64(n)
|
||||
in = bytes.NewReader(buf[:n])
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
} else if size > int64(uploadCutoff) {
|
||||
up, err := o.fs.newLargeUpload(o, in, src)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -1177,42 +1298,13 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) (err error) {
|
||||
}
|
||||
|
||||
modTime := src.ModTime()
|
||||
calculatedSha1, _ := src.Hash(fs.HashSHA1)
|
||||
|
||||
// If source cannot provide the hash, copy to a temporary file
|
||||
// and calculate the hash while doing so.
|
||||
// Then we serve the temporary file.
|
||||
calculatedSha1, _ := src.Hash(hash.SHA1)
|
||||
if calculatedSha1 == "" {
|
||||
// Open a temp file to copy the input
|
||||
fd, err := ioutil.TempFile("", "rclone-b2-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_ = os.Remove(fd.Name()) // Delete the file - may not work on Windows
|
||||
defer func() {
|
||||
_ = fd.Close() // Ignore error may have been closed already
|
||||
_ = os.Remove(fd.Name()) // Delete the file - may have been deleted already
|
||||
}()
|
||||
|
||||
// Copy the input while calculating the sha1
|
||||
hash := sha1.New()
|
||||
teed := io.TeeReader(in, hash)
|
||||
n, err := io.Copy(fd, teed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != size {
|
||||
return errors.Errorf("read %d bytes expecting %d", n, size)
|
||||
}
|
||||
calculatedSha1 = fmt.Sprintf("%x", hash.Sum(nil))
|
||||
|
||||
// Rewind the temporary file
|
||||
_, err = fd.Seek(0, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Set input to temporary file
|
||||
in = fd
|
||||
calculatedSha1 = "hex_digits_at_end"
|
||||
har := newHashAppendingReader(in, sha1.New())
|
||||
size += int64(har.AdditionalLength())
|
||||
in = har
|
||||
}
|
||||
|
||||
// Get upload URL
|
||||
@@ -1279,10 +1371,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo) (err error) {
|
||||
// will be returned with the download.
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Absolute: true,
|
||||
Path: upload.UploadURL,
|
||||
Body: in,
|
||||
Method: "POST",
|
||||
RootURL: upload.UploadURL,
|
||||
Body: in,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-File-Name": urlEncode(o.fs.root + o.remote),
|
||||
@@ -1320,27 +1411,10 @@ func (o *Object) Remove() error {
|
||||
if *b2Versions {
|
||||
return errNotWithVersions
|
||||
}
|
||||
bucketID, err := o.fs.getBucketID()
|
||||
if err != nil {
|
||||
return err
|
||||
if *b2HardDelete {
|
||||
return o.fs.deleteByID(o.id, o.fs.root+o.remote)
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_hide_file",
|
||||
}
|
||||
var request = api.HideFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: o.fs.root + o.remote,
|
||||
}
|
||||
var response api.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &request, &response)
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to delete file")
|
||||
}
|
||||
return nil
|
||||
return o.fs.hide(o.fs.root + o.remote)
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
@@ -1348,11 +1422,19 @@ func (o *Object) MimeType() string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.CleanUpper = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
17
backend/b2/b2_test.go
Normal file
17
backend/b2/b2_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test B2 filesystem interface
|
||||
package b2_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestB2:",
|
||||
NilObject: (*b2.Object)(nil),
|
||||
})
|
||||
}
|
||||
433
backend/b2/upload.go
Normal file
433
backend/b2/upload.go
Normal file
@@ -0,0 +1,433 @@
|
||||
// Upload large files for b2
|
||||
//
|
||||
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type hashAppendingReader struct {
|
||||
h gohash.Hash
|
||||
in io.Reader
|
||||
hexSum string
|
||||
hexReader io.Reader
|
||||
}
|
||||
|
||||
// Read returns bytes all bytes from the original reader, then the hex sum
|
||||
// of what was read so far, then EOF.
|
||||
func (har *hashAppendingReader) Read(b []byte) (int, error) {
|
||||
if har.hexReader == nil {
|
||||
n, err := har.in.Read(b)
|
||||
if err == io.EOF {
|
||||
har.in = nil // allow GC
|
||||
err = nil // allow reading hexSum before EOF
|
||||
|
||||
har.hexSum = hex.EncodeToString(har.h.Sum(nil))
|
||||
har.hexReader = strings.NewReader(har.hexSum)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
return har.hexReader.Read(b)
|
||||
}
|
||||
|
||||
// AdditionalLength returns how many bytes the appended hex sum will take up.
|
||||
func (har *hashAppendingReader) AdditionalLength() int {
|
||||
return hex.EncodedLen(har.h.Size())
|
||||
}
|
||||
|
||||
// HexSum returns the hash sum as hex. It's only available after the original
|
||||
// reader has EOF'd. It's an empty string before that.
|
||||
func (har *hashAppendingReader) HexSum() string {
|
||||
return har.hexSum
|
||||
}
|
||||
|
||||
// newHashAppendingReader takes a Reader and a Hash and will append the hex sum
|
||||
// after the original reader reaches EOF. The increased size depends on the
|
||||
// given hash, which may be queried through AdditionalLength()
|
||||
func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader {
|
||||
withHash := io.TeeReader(in, h)
|
||||
return &hashAppendingReader{h: h, in: withHash}
|
||||
}
|
||||
|
||||
// largeUpload is used to control the upload of large files which need chunking
|
||||
type largeUpload struct {
|
||||
f *Fs // parent Fs
|
||||
o *Object // object being uploaded
|
||||
in io.Reader // read the data from here
|
||||
wrap accounting.WrapFn // account parts being transferred
|
||||
id string // ID of the file being uploaded
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts, if known
|
||||
sha1s []string // slice of SHA1s for each part
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
size := src.Size()
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize))
|
||||
} else {
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
}
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
modTime := src.ModTime()
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: o.fs.root + remote,
|
||||
ContentType: fs.MimeType(src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
},
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
up = &largeUpload{
|
||||
f: f,
|
||||
o: o,
|
||||
in: in,
|
||||
wrap: wrap,
|
||||
id: response.ID,
|
||||
size: size,
|
||||
parts: parts,
|
||||
sha1s: make([]string, sha1SliceSize),
|
||||
}
|
||||
return up, nil
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
|
||||
up.uploadMu.Lock()
|
||||
defer up.uploadMu.Unlock()
|
||||
if len(up.uploads) == 0 {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_get_upload_part_url",
|
||||
}
|
||||
var request = api.GetUploadPartURLRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
}
|
||||
} else {
|
||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||
}
|
||||
return upload, nil
|
||||
}
|
||||
|
||||
// returnUploadURL returns the UploadURL to the cache
|
||||
func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
||||
if upload == nil {
|
||||
return
|
||||
}
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = append(up.uploads, upload)
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// clearUploadURL clears the current UploadURL and the AuthorizationToken
|
||||
func (up *largeUpload) clearUploadURL() {
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = nil
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
|
||||
size := int64(len(body)) + int64(in.AdditionalLength())
|
||||
|
||||
// Authorization
|
||||
//
|
||||
// An upload authorization token, from b2_get_upload_part_url.
|
||||
//
|
||||
// X-Bz-Part-Number
|
||||
//
|
||||
// A number from 1 to 10000. The parts uploaded for one file
|
||||
// must have contiguous numbers, starting with 1.
|
||||
//
|
||||
// Content-Length
|
||||
//
|
||||
// The number of bytes in the file being uploaded. Note that
|
||||
// this header is required; you cannot leave it out and just
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100MB.
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
//
|
||||
// The SHA1 checksum of the this part of the file. B2 will
|
||||
// check this when the part is uploaded, to make sure that the
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// passed to b2_finish_large_file.
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: upload.UploadURL,
|
||||
Body: up.wrap(in),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||
sha1Header: "hex_digits_at_end",
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
|
||||
var response api.UploadPartResponse
|
||||
|
||||
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(resp, err)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||
}
|
||||
// On retryable error clear PartUploadURL
|
||||
if retry {
|
||||
fs.Debugf(up.o, "Clearing part upload URL because of error: %v", err)
|
||||
upload = nil
|
||||
}
|
||||
up.returnUploadURL(upload)
|
||||
up.sha1s[part-1] = in.HexSum()
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
} else {
|
||||
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish() error {
|
||||
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_finish_large_file",
|
||||
}
|
||||
var request = api.FinishLargeFileRequest{
|
||||
ID: up.id,
|
||||
SHA1s: up.sha1s,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_cancel_large_file",
|
||||
}
|
||||
var request = api.CancelLargeFileRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
var response api.CancelLargeFileResponse
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||
wg.Add(1)
|
||||
go func(part int64, buf []byte) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(part, buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}(part, buf)
|
||||
}
|
||||
|
||||
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
||||
cancelErr := up.cancel()
|
||||
if cancelErr != nil {
|
||||
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return up.finish()
|
||||
}
|
||||
|
||||
// Stream uploads the chunks from the input, starting with a required initial
|
||||
// chunk. Assumes the file size is unknown and will upload until the input
|
||||
// reaches EOF.
|
||||
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
|
||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||
errs := make(chan error, 1)
|
||||
hasMoreParts := true
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Transfer initial chunk
|
||||
up.size = int64(len(initialUploadBlock))
|
||||
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
|
||||
|
||||
outer:
|
||||
for part := int64(2); hasMoreParts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()
|
||||
|
||||
// Read the chunk
|
||||
var n int
|
||||
n, err = io.ReadFull(up.in, buf)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
buf = buf[:n]
|
||||
hasMoreParts = false
|
||||
err = nil
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||
up.f.putUploadBlock(buf)
|
||||
err = nil
|
||||
break outer
|
||||
} else if err != nil {
|
||||
// other kinds of errors indicate failure
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Keep stats up to date
|
||||
up.parts = part
|
||||
up.size += int64(n)
|
||||
if part > maxParts {
|
||||
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
}
|
||||
wg.Wait()
|
||||
up.sha1s = up.sha1s[:up.parts]
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload() error {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||
remaining := up.size
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
var err error
|
||||
outer:
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()[:reqSize]
|
||||
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
remaining -= reqSize
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
}
|
||||
192
backend/box/api/types.go
Normal file
192
backend/box/api/types.go
Normal file
@@ -0,0 +1,192 @@
|
||||
// Package api has type definitions for box
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// 2017-05-03T07:26:10-07:00
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// box API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is returned from box when things go wrong
|
||||
type Error struct {
|
||||
Type string `json:"type"`
|
||||
Status int `json:"status"`
|
||||
Code string `json:"code"`
|
||||
ContextInfo json.RawMessage
|
||||
HelpURL string `json:"help_url"`
|
||||
Message string `json:"message"`
|
||||
RequestID string `json:"request_id"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.ContextInfo != nil {
|
||||
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
ItemStatusActive = "active"
|
||||
ItemStatusTrashed = "trashed"
|
||||
ItemStatusDeleted = "deleted"
|
||||
)
|
||||
|
||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||
type Item struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||
CreatedAt Time `json:"created_at"`
|
||||
ModifiedAt Time `json:"modified_at"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the item
|
||||
func (i *Item) ModTime() (t time.Time) {
|
||||
t = time.Time(i.ContentModifiedAt)
|
||||
if t.IsZero() {
|
||||
t = time.Time(i.ModifiedAt)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// FolderItems is returned from the GetFolderItems call
|
||||
type FolderItems struct {
|
||||
TotalCount int `json:"total_count"`
|
||||
Entries []Item `json:"entries"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
Order []struct {
|
||||
By string `json:"by"`
|
||||
Direction string `json:"direction"`
|
||||
} `json:"order"`
|
||||
}
|
||||
|
||||
// Parent defined the ID of the parent directory
|
||||
type Parent struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// CreateFolder is the request for Create Folder
|
||||
type CreateFolder struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// UploadFile is the request for Upload File
|
||||
type UploadFile struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
}
|
||||
|
||||
// UpdateFileModTime is used in Update File Info
|
||||
type UpdateFileModTime struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
}
|
||||
|
||||
// UpdateFileMove is the request for Upload File to change name and parent
|
||||
type UpdateFileMove struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// CopyFile is the request for Copy File
|
||||
type CopyFile struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// UploadSessionRequest is uses in Create Upload Session
|
||||
type UploadSessionRequest struct {
|
||||
FolderID string `json:"folder_id,omitempty"` // don't pass for update
|
||||
FileSize int64 `json:"file_size"`
|
||||
FileName string `json:"file_name,omitempty"` // optional for update
|
||||
}
|
||||
|
||||
// UploadSessionResponse is returned from Create Upload Session
|
||||
type UploadSessionResponse struct {
|
||||
TotalParts int `json:"total_parts"`
|
||||
PartSize int64 `json:"part_size"`
|
||||
SessionEndpoints struct {
|
||||
ListParts string `json:"list_parts"`
|
||||
Commit string `json:"commit"`
|
||||
UploadPart string `json:"upload_part"`
|
||||
Status string `json:"status"`
|
||||
Abort string `json:"abort"`
|
||||
} `json:"session_endpoints"`
|
||||
SessionExpiresAt Time `json:"session_expires_at"`
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
NumPartsProcessed int `json:"num_parts_processed"`
|
||||
}
|
||||
|
||||
// Part defines the return from upload part call which are passed to commit upload also
|
||||
type Part struct {
|
||||
PartID string `json:"part_id"`
|
||||
Offset int `json:"offset"`
|
||||
Size int `json:"size"`
|
||||
Sha1 string `json:"sha1"`
|
||||
}
|
||||
|
||||
// UploadPartResponse is returned from the upload part call
|
||||
type UploadPartResponse struct {
|
||||
Part Part `json:"part"`
|
||||
}
|
||||
|
||||
// CommitUpload is used in the Commit Upload call
|
||||
type CommitUpload struct {
|
||||
Parts []Part `json:"parts"`
|
||||
Attributes struct {
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
} `json:"attributes"`
|
||||
}
|
||||
1067
backend/box/box.go
Normal file
1067
backend/box/box.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/box/box_test.go
Normal file
17
backend/box/box_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Box filesystem interface
|
||||
package box_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/box"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestBox:",
|
||||
NilObject: (*box.Object)(nil),
|
||||
})
|
||||
}
|
||||
273
backend/box/upload.go
Normal file
273
backend/box/upload.go
Normal file
@@ -0,0 +1,273 @@
|
||||
// multpart upload for box
|
||||
|
||||
package box
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/box/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions",
|
||||
RootURL: uploadURL,
|
||||
}
|
||||
request := api.UploadSessionRequest{
|
||||
FileSize: size,
|
||||
}
|
||||
// If object has an ID then it is existing so create a new version
|
||||
if o.id != "" {
|
||||
opts.Path = "/files/" + o.id + "/upload_sessions"
|
||||
} else {
|
||||
opts.Path = "/files/upload_sessions"
|
||||
request.FolderID = directoryID
|
||||
request.FileName = replaceReservedChars(leaf)
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// sha1Digest produces a digest using sha1 as per RFC3230
|
||||
func sha1Digest(digest []byte) string {
|
||||
return "sha=" + base64.StdEncoding.EncodeToString(digest)
|
||||
}
|
||||
|
||||
// uploadPart uploads a part in an upload session
|
||||
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
||||
chunkSize := int64(len(chunk))
|
||||
sha1sum := sha1.Sum(chunk)
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/files/upload_sessions/" + SessionID,
|
||||
RootURL: uploadURL,
|
||||
ContentType: "application/octet-stream",
|
||||
ContentLength: &chunkSize,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Digest": sha1Digest(sha1sum[:]),
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts.Body = wrap(bytes.NewReader(chunk))
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// commitUpload finishes an upload session
|
||||
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions/" + SessionID + "/commit",
|
||||
RootURL: uploadURL,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Digest": sha1Digest(sha1sum),
|
||||
},
|
||||
}
|
||||
request := api.CommitUpload{
|
||||
Parts: parts,
|
||||
}
|
||||
request.Attributes.ContentModifiedAt = api.Time(modTime)
|
||||
request.Attributes.ContentCreatedAt = api.Time(modTime)
|
||||
var body []byte
|
||||
var resp *http.Response
|
||||
maxTries := fs.Config.LowLevelRetries
|
||||
const defaultDelay = 10
|
||||
var tries int
|
||||
outer:
|
||||
for tries = 0; tries < maxTries; tries++ {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
body, err = rest.ReadBody(resp)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
delay := defaultDelay
|
||||
why := "unknown"
|
||||
if err != nil {
|
||||
// Sometimes we get 400 Error with
|
||||
// parts_mismatch immediately after uploading
|
||||
// the last part. Ignore this error and wait.
|
||||
if boxErr, ok := err.(*api.Error); ok && boxErr.Code == "parts_mismatch" {
|
||||
why = err.Error()
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated:
|
||||
break outer
|
||||
case http.StatusAccepted:
|
||||
why = "not ready yet"
|
||||
delayString := resp.Header.Get("Retry-After")
|
||||
if delayString != "" {
|
||||
delay, err = strconv.Atoi(delayString)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Couldn't decode Retry-After header %q: %v", delayString, err)
|
||||
delay = defaultDelay
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
}
|
||||
if tries >= maxTries {
|
||||
return nil, errors.New("too many tries to commit multipart upload - increase --low-level-retries")
|
||||
}
|
||||
err = json.Unmarshal(body, &result)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// abortUpload cancels an upload session
|
||||
func (o *Object) abortUpload(SessionID string) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/files/upload_sessions/" + SessionID,
|
||||
RootURL: uploadURL,
|
||||
NoResponse: true,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||
// Create upload session
|
||||
session, err := o.createUploadSession(leaf, directoryID, size)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload create session failed")
|
||||
}
|
||||
chunkSize := session.PartSize
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
|
||||
|
||||
// Cancel the session if something went wrong
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := o.abortUpload(session.ID)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// Upload the chunks
|
||||
remaining := size
|
||||
position := int64(0)
|
||||
parts := make([]api.Part, session.TotalParts)
|
||||
hash := sha1.New()
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
outer:
|
||||
for part := 0; part < session.TotalParts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
}
|
||||
|
||||
// Make a block of memory
|
||||
buf := make([]byte, reqSize)
|
||||
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(in, buf)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to read source")
|
||||
break outer
|
||||
}
|
||||
|
||||
// Make the global hash (must be done sequentially)
|
||||
_, _ = hash.Write(buf)
|
||||
|
||||
// Transfer the chunk
|
||||
wg.Add(1)
|
||||
o.fs.uploadToken.Get()
|
||||
go func(part int, position int64) {
|
||||
defer wg.Done()
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
parts[part] = partResponse.Part
|
||||
}(part, position)
|
||||
|
||||
// ready for next block
|
||||
remaining -= chunkSize
|
||||
position += chunkSize
|
||||
}
|
||||
wg.Wait()
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||
}
|
||||
|
||||
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
||||
return errors.Errorf("multipart upload failed %v - not sure why", o)
|
||||
}
|
||||
return o.setMetaData(&result.Entries[0])
|
||||
}
|
||||
1574
backend/cache/cache.go
vendored
Normal file
1574
backend/cache/cache.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1707
backend/cache/cache_internal_test.go
vendored
Normal file
1707
backend/cache/cache_internal_test.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
78
backend/cache/cache_mount_unix_test.go
vendored
Normal file
78
backend/cache/cache_mount_unix_test.go
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
// +build !plan9,!windows
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/cmd/mount"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
device := f.Name() + ":" + f.Root()
|
||||
var options = []fuse.MountOption{
|
||||
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
|
||||
fuse.Subtype("rclone"),
|
||||
fuse.FSName(device), fuse.VolumeName(device),
|
||||
fuse.NoAppleDouble(),
|
||||
fuse.NoAppleXattr(),
|
||||
//fuse.AllowOther(),
|
||||
}
|
||||
err := os.MkdirAll(r.mntDir, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
c, err := fuse.Mount(r.mntDir, options...)
|
||||
require.NoError(t, err)
|
||||
filesys := mount.NewFS(f)
|
||||
server := fusefs.New(c, nil)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
r.unmountRes = make(chan error, 1)
|
||||
go func() {
|
||||
err := server.Serve(filesys)
|
||||
closeErr := c.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
r.unmountRes <- err
|
||||
}()
|
||||
|
||||
// check if the mount process has an error to report
|
||||
<-c.Ready
|
||||
require.NoError(t, c.MountError)
|
||||
|
||||
r.unmountFn = func() error {
|
||||
// Shutdown the VFS
|
||||
filesys.VFS.Shutdown()
|
||||
return fuse.Unmount(r.mntDir)
|
||||
}
|
||||
|
||||
r.vfs = filesys.VFS
|
||||
r.isMounted = true
|
||||
}
|
||||
|
||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||
var err error
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
err = r.unmountFn()
|
||||
if err != nil {
|
||||
//log.Printf("signal to umount failed - retrying: %v", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = <-r.unmountRes
|
||||
require.NoError(t, err)
|
||||
err = r.vfs.CleanUp()
|
||||
require.NoError(t, err)
|
||||
r.isMounted = false
|
||||
}
|
||||
124
backend/cache/cache_mount_windows_test.go
vendored
Normal file
124
backend/cache/cache_mount_windows_test.go
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
// +build windows
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/ncw/rclone/cmd/cmount"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// waitFor runs fn() until it returns true or the timeout expires
|
||||
func waitFor(fn func() bool) (ok bool) {
|
||||
const totalWait = 10 * time.Second
|
||||
const individualWait = 10 * time.Millisecond
|
||||
for i := 0; i < int(totalWait/individualWait); i++ {
|
||||
ok = fn()
|
||||
if ok {
|
||||
return ok
|
||||
}
|
||||
time.Sleep(individualWait)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
// FIXME implement cmount
|
||||
t.Skip("windows not supported yet")
|
||||
|
||||
device := f.Name() + ":" + f.Root()
|
||||
options := []string{
|
||||
"-o", "fsname=" + device,
|
||||
"-o", "subtype=rclone",
|
||||
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
|
||||
"-o", "uid=-1",
|
||||
"-o", "gid=-1",
|
||||
"-o", "allow_other",
|
||||
// This causes FUSE to supply O_TRUNC with the Open
|
||||
// call which is more efficient for cmount. However
|
||||
// it does not work with cgofuse on Windows with
|
||||
// WinFSP so cmount must work with or without it.
|
||||
"-o", "atomic_o_trunc",
|
||||
"--FileSystemName=rclone",
|
||||
}
|
||||
|
||||
fsys := cmount.NewFS(f)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
r.unmountRes = make(chan error, 1)
|
||||
go func() {
|
||||
var err error
|
||||
ok := host.Mount(r.mntDir, options)
|
||||
if !ok {
|
||||
err = errors.New("mount failed")
|
||||
}
|
||||
r.unmountRes <- err
|
||||
}()
|
||||
|
||||
// unmount
|
||||
r.unmountFn = func() error {
|
||||
// Shutdown the VFS
|
||||
fsys.VFS.Shutdown()
|
||||
if host.Unmount() {
|
||||
if !waitFor(func() bool {
|
||||
_, err := os.Stat(r.mntDir)
|
||||
return err != nil
|
||||
}) {
|
||||
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return errors.New("host unmount failed")
|
||||
}
|
||||
|
||||
// Wait for the filesystem to become ready, checking the file
|
||||
// system didn't blow up before starting
|
||||
select {
|
||||
case err := <-r.unmountRes:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second * 3):
|
||||
}
|
||||
|
||||
// Wait for the mount point to be available on Windows
|
||||
// On Windows the Init signal comes slightly before the mount is ready
|
||||
if !waitFor(func() bool {
|
||||
_, err := os.Stat(r.mntDir)
|
||||
return err == nil
|
||||
}) {
|
||||
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
|
||||
}
|
||||
|
||||
r.vfs = fsys.VFS
|
||||
r.isMounted = true
|
||||
}
|
||||
|
||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||
// FIXME implement cmount
|
||||
t.Skip("windows not supported yet")
|
||||
var err error
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
err = r.unmountFn()
|
||||
if err != nil {
|
||||
//log.Printf("signal to umount failed - retrying: %v", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = <-r.unmountRes
|
||||
require.NoError(t, err)
|
||||
err = r.vfs.CleanUp()
|
||||
require.NoError(t, err)
|
||||
r.isMounted = false
|
||||
}
|
||||
21
backend/cache/cache_test.go
vendored
Normal file
21
backend/cache/cache_test.go
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
// Test Cache filesystem interface
|
||||
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
})
|
||||
}
|
||||
6
backend/cache/cache_unsupported.go
vendored
Normal file
6
backend/cache/cache_unsupported.go
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9
|
||||
|
||||
package cache
|
||||
455
backend/cache/cache_upload_test.go
vendored
Normal file
455
backend/cache/cache_upload_test.go
vendored
Normal file
@@ -0,0 +1,455 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
|
||||
// create some rand test data
|
||||
testSize := int64(524288000)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
|
||||
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(524416032), ti.Size())
|
||||
} else {
|
||||
require.Equal(t, testSize, ti.Size())
|
||||
}
|
||||
de1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
runInstance.completeBackgroundUpload(t, "one", bu)
|
||||
// check if it was removed from temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if it can be read
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, data2, 1024)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(10485760)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
time.Sleep(time.Second * 5)
|
||||
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
//require.NoError(t, err)
|
||||
|
||||
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
|
||||
require.NoError(t, err)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(1048576)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
testReader2 := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
|
||||
require.False(t, os.IsNotExist(err))
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
require.NoError(t, err)
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
|
||||
|
||||
if runInstance.wrappedIsExternal && i < totalFiles-1 {
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
lastFile = remote
|
||||
}
|
||||
|
||||
// check if cache lists all files, likely temp upload didn't finish yet
|
||||
de1, err := runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
|
||||
// wait for background uploader to do its thing
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
|
||||
|
||||
// retry until we have no more temp files and fail if they don't go down to 0
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if cache lists all files
|
||||
de1, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove - allowed
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("second/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.Error(t, err)
|
||||
var started bool
|
||||
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
|
||||
require.NoError(t, err)
|
||||
require.False(t, started)
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Rmdir - allowed
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "directory not empty")
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.False(t, started)
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename -- allowed
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove -- allowed
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update -- allowed
|
||||
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||
require.NoError(t, err)
|
||||
obj2, err := rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||
require.Equal(t, "one content updated", string(data2))
|
||||
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(67), tmpInfo.Size())
|
||||
} else {
|
||||
require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
}
|
||||
|
||||
// test SetModTime -- allowed
|
||||
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, secondModTime, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Rmdir
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.Error(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update - this seems to work. Why? FIXME
|
||||
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
|
||||
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
|
||||
// require.Equal(t, "one content", string(data2))
|
||||
//
|
||||
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
// require.NoError(t, err)
|
||||
// if runInstance.rootIsCrypt {
|
||||
// require.Equal(t, int64(67), tmpInfo.Size())
|
||||
// } else {
|
||||
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
// }
|
||||
//})
|
||||
//require.Error(t, err)
|
||||
|
||||
// test SetModTime -- seems to work cause of previous
|
||||
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//require.Equal(t, secondModTime, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
130
backend/cache/directory.go
vendored
Normal file
130
backend/cache/directory.go
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"path"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Directory is a generic dir that stores basic information about it
|
||||
type Directory struct {
|
||||
fs.Directory `json:"-"`
|
||||
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
Dir string `json:"dir"` // abs path of the directory
|
||||
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
|
||||
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
|
||||
|
||||
CacheItems int64 `json:"items"` // number of objects or -1 for unknown
|
||||
CacheType string `json:"cacheType"` // object type
|
||||
CacheTs *time.Time `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NewDirectory builds an empty dir which will be used to unmarshal data in it
|
||||
func NewDirectory(f *Fs, remote string) *Directory {
|
||||
cd := ShallowDirectory(f, remote)
|
||||
t := time.Now()
|
||||
cd.CacheTs = &t
|
||||
|
||||
return cd
|
||||
}
|
||||
|
||||
// ShallowDirectory builds an empty dir which will be used to unmarshal data in it
|
||||
func ShallowDirectory(f *Fs, remote string) *Directory {
|
||||
var cd *Directory
|
||||
fullRemote := cleanPath(path.Join(f.Root(), remote))
|
||||
|
||||
// build a new one
|
||||
dir := cleanPath(path.Dir(fullRemote))
|
||||
name := cleanPath(path.Base(fullRemote))
|
||||
cd = &Directory{
|
||||
CacheFs: f,
|
||||
Name: name,
|
||||
Dir: dir,
|
||||
CacheModTime: time.Now().UnixNano(),
|
||||
CacheSize: 0,
|
||||
CacheItems: 0,
|
||||
CacheType: "Directory",
|
||||
}
|
||||
|
||||
return cd
|
||||
}
|
||||
|
||||
// DirectoryFromOriginal builds one from a generic fs.Directory
|
||||
func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
|
||||
var cd *Directory
|
||||
fullRemote := path.Join(f.Root(), d.Remote())
|
||||
|
||||
dir := cleanPath(path.Dir(fullRemote))
|
||||
name := cleanPath(path.Base(fullRemote))
|
||||
t := time.Now()
|
||||
cd = &Directory{
|
||||
Directory: d,
|
||||
CacheFs: f,
|
||||
Name: name,
|
||||
Dir: dir,
|
||||
CacheModTime: d.ModTime().UnixNano(),
|
||||
CacheSize: d.Size(),
|
||||
CacheItems: d.Items(),
|
||||
CacheType: "Directory",
|
||||
CacheTs: &t,
|
||||
}
|
||||
|
||||
return cd
|
||||
}
|
||||
|
||||
// Fs returns its FS info
|
||||
func (d *Directory) Fs() fs.Info {
|
||||
return d.CacheFs
|
||||
}
|
||||
|
||||
// String returns a human friendly name for this object
|
||||
func (d *Directory) String() string {
|
||||
if d == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return d.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (d *Directory) Remote() string {
|
||||
return d.CacheFs.cleanRootFromPath(d.abs())
|
||||
}
|
||||
|
||||
// abs returns the absolute path to the dir
|
||||
func (d *Directory) abs() string {
|
||||
return cleanPath(path.Join(d.Dir, d.Name))
|
||||
}
|
||||
|
||||
// parentRemote returns the absolute path parent remote
|
||||
func (d *Directory) parentRemote() string {
|
||||
absPath := d.abs()
|
||||
if absPath == "" {
|
||||
return ""
|
||||
}
|
||||
return cleanPath(path.Dir(absPath))
|
||||
}
|
||||
|
||||
// ModTime returns the cached ModTime
|
||||
func (d *Directory) ModTime() time.Time {
|
||||
return time.Unix(0, d.CacheModTime)
|
||||
}
|
||||
|
||||
// Size returns the cached Size
|
||||
func (d *Directory) Size() int64 {
|
||||
return d.CacheSize
|
||||
}
|
||||
|
||||
// Items returns the cached Items
|
||||
func (d *Directory) Items() int64 {
|
||||
return d.CacheItems
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Directory = (*Directory)(nil)
|
||||
)
|
||||
668
backend/cache/handle.go
vendored
Normal file
668
backend/cache/handle.go
vendored
Normal file
@@ -0,0 +1,668 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var uploaderMap = make(map[string]*backgroundWriter)
|
||||
var uploaderMapMx sync.Mutex
|
||||
|
||||
// initBackgroundUploader returns a single instance
|
||||
func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
|
||||
// write lock to create one
|
||||
uploaderMapMx.Lock()
|
||||
defer uploaderMapMx.Unlock()
|
||||
if b, ok := uploaderMap[fs.String()]; ok {
|
||||
// if it was already started we close it so that it can be started again
|
||||
if b.running {
|
||||
b.close()
|
||||
} else {
|
||||
return b, nil
|
||||
}
|
||||
}
|
||||
|
||||
bb := newBackgroundWriter(fs)
|
||||
uploaderMap[fs.String()] = bb
|
||||
return uploaderMap[fs.String()], nil
|
||||
}
|
||||
|
||||
// Handle is managing the read/write/seek operations on an open handle
|
||||
type Handle struct {
|
||||
cachedObject *Object
|
||||
cfs *Fs
|
||||
memory *Memory
|
||||
preloadQueue chan int64
|
||||
preloadOffset int64
|
||||
offset int64
|
||||
seenOffsets map[int64]bool
|
||||
mu sync.Mutex
|
||||
confirmReading chan bool
|
||||
|
||||
UseMemory bool
|
||||
workers []*worker
|
||||
closed bool
|
||||
reading bool
|
||||
}
|
||||
|
||||
// NewObjectHandle returns a new Handle for an existing Object
|
||||
func NewObjectHandle(o *Object, cfs *Fs) *Handle {
|
||||
r := &Handle{
|
||||
cachedObject: o,
|
||||
cfs: cfs,
|
||||
offset: 0,
|
||||
preloadOffset: -1, // -1 to trigger the first preload
|
||||
|
||||
UseMemory: cfs.chunkMemory,
|
||||
reading: false,
|
||||
}
|
||||
r.seenOffsets = make(map[int64]bool)
|
||||
r.memory = NewMemory(-1)
|
||||
|
||||
// create a larger buffer to queue up requests
|
||||
r.preloadQueue = make(chan int64, r.cfs.totalWorkers*10)
|
||||
r.confirmReading = make(chan bool)
|
||||
r.startReadWorkers()
|
||||
return r
|
||||
}
|
||||
|
||||
// cacheFs is a convenience method to get the parent cache FS of the object's manager
|
||||
func (r *Handle) cacheFs() *Fs {
|
||||
return r.cfs
|
||||
}
|
||||
|
||||
// storage is a convenience method to get the persistent storage of the object's manager
|
||||
func (r *Handle) storage() *Persistent {
|
||||
return r.cacheFs().cache
|
||||
}
|
||||
|
||||
// String representation of this reader
|
||||
func (r *Handle) String() string {
|
||||
return r.cachedObject.abs()
|
||||
}
|
||||
|
||||
// startReadWorkers will start the worker pool
|
||||
func (r *Handle) startReadWorkers() {
|
||||
if r.hasAtLeastOneWorker() {
|
||||
return
|
||||
}
|
||||
totalWorkers := r.cacheFs().totalWorkers
|
||||
|
||||
if r.cacheFs().plexConnector.isConfigured() {
|
||||
if !r.cacheFs().plexConnector.isConnected() {
|
||||
err := r.cacheFs().plexConnector.authenticate()
|
||||
if err != nil {
|
||||
fs.Errorf(r, "failed to authenticate to Plex: %v", err)
|
||||
}
|
||||
}
|
||||
if r.cacheFs().plexConnector.isConnected() {
|
||||
totalWorkers = 1
|
||||
}
|
||||
}
|
||||
|
||||
r.scaleWorkers(totalWorkers)
|
||||
}
|
||||
|
||||
// scaleOutWorkers will increase the worker pool count by the provided amount
|
||||
func (r *Handle) scaleWorkers(desired int) {
|
||||
current := len(r.workers)
|
||||
if current == desired {
|
||||
return
|
||||
}
|
||||
if current > desired {
|
||||
// scale in gracefully
|
||||
for i := 0; i < current-desired; i++ {
|
||||
r.preloadQueue <- -1
|
||||
}
|
||||
} else {
|
||||
// scale out
|
||||
for i := 0; i < desired-current; i++ {
|
||||
w := &worker{
|
||||
r: r,
|
||||
ch: r.preloadQueue,
|
||||
id: current + i,
|
||||
}
|
||||
go w.run()
|
||||
|
||||
r.workers = append(r.workers, w)
|
||||
}
|
||||
}
|
||||
// ignore first scale out from 0
|
||||
if current != 0 {
|
||||
fs.Debugf(r, "scale workers to %v", desired)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Handle) confirmExternalReading() {
|
||||
// if we have a max value of workers
|
||||
// then we skip this step
|
||||
if len(r.workers) > 1 ||
|
||||
!r.cacheFs().plexConnector.isConfigured() {
|
||||
return
|
||||
}
|
||||
if !r.cacheFs().plexConnector.isPlaying(r.cachedObject) {
|
||||
return
|
||||
}
|
||||
fs.Infof(r, "confirmed reading by external reader")
|
||||
r.scaleWorkers(r.cacheFs().totalMaxWorkers)
|
||||
}
|
||||
|
||||
// queueOffset will send an offset to the workers if it's different from the last one
|
||||
func (r *Handle) queueOffset(offset int64) {
|
||||
if offset != r.preloadOffset {
|
||||
// clean past in-memory chunks
|
||||
if r.UseMemory {
|
||||
go r.memory.CleanChunksByNeed(offset)
|
||||
}
|
||||
r.confirmExternalReading()
|
||||
r.preloadOffset = offset
|
||||
|
||||
// clear the past seen chunks
|
||||
// they will remain in our persistent storage but will be removed from transient
|
||||
// so they need to be picked up by a worker
|
||||
for k := range r.seenOffsets {
|
||||
if k < offset {
|
||||
r.seenOffsets[k] = false
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
|
||||
if o < 0 || o >= r.cachedObject.Size() {
|
||||
continue
|
||||
}
|
||||
if v, ok := r.seenOffsets[o]; ok && v {
|
||||
continue
|
||||
}
|
||||
|
||||
r.seenOffsets[o] = true
|
||||
r.preloadQueue <- o
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Handle) hasAtLeastOneWorker() bool {
|
||||
oneWorker := false
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
if r.workers[i].isRunning() {
|
||||
oneWorker = true
|
||||
}
|
||||
}
|
||||
return oneWorker
|
||||
}
|
||||
|
||||
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
|
||||
// it can be from transient or persistent cache
|
||||
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
|
||||
func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
var data []byte
|
||||
var err error
|
||||
|
||||
// we calculate the modulus of the requested offset with the size of a chunk
|
||||
offset := chunkStart % r.cacheFs().chunkSize
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart = chunkStart - offset
|
||||
r.queueOffset(chunkStart)
|
||||
found := false
|
||||
|
||||
if r.UseMemory {
|
||||
data, err = r.memory.GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
// we're gonna give the workers a chance to pickup the chunk
|
||||
// and retry a couple of times
|
||||
for i := 0; i < r.cacheFs().readRetries*8; i++ {
|
||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
|
||||
fs.Debugf(r, "%v: chunk retry storage: %v", chunkStart, i)
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
}
|
||||
}
|
||||
|
||||
// not found in ram or
|
||||
// the worker didn't managed to download the chunk in time so we abort and close the stream
|
||||
if err != nil || len(data) == 0 || !found {
|
||||
if !r.hasAtLeastOneWorker() {
|
||||
fs.Errorf(r, "out of workers")
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("chunk not found %v", chunkStart)
|
||||
}
|
||||
|
||||
// first chunk will be aligned with the start
|
||||
if offset > 0 {
|
||||
if offset > int64(len(data)) {
|
||||
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
|
||||
r.offset, chunkStart, len(data), offset, r.cacheFs().chunkSize, r.cachedObject.Size())
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
data = data[int(offset):]
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Read a chunk from storage or len(p)
|
||||
func (r *Handle) Read(p []byte) (n int, err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
var buf []byte
|
||||
|
||||
// first reading
|
||||
if !r.reading {
|
||||
r.reading = true
|
||||
}
|
||||
// reached EOF
|
||||
if r.offset >= r.cachedObject.Size() {
|
||||
return 0, io.EOF
|
||||
}
|
||||
currentOffset := r.offset
|
||||
buf, err = r.getChunk(currentOffset)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
fs.Errorf(r, "(%v/%v) error (%v) response", currentOffset, r.cachedObject.Size(), err)
|
||||
}
|
||||
if len(buf) == 0 && err != io.ErrUnexpectedEOF {
|
||||
return 0, io.EOF
|
||||
}
|
||||
readSize := copy(p, buf)
|
||||
newOffset := currentOffset + int64(readSize)
|
||||
r.offset = newOffset
|
||||
|
||||
return readSize, err
|
||||
}
|
||||
|
||||
// Close will tell the workers to stop
|
||||
func (r *Handle) Close() error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if r.closed {
|
||||
return errors.New("file already closed")
|
||||
}
|
||||
|
||||
close(r.preloadQueue)
|
||||
r.closed = true
|
||||
// wait for workers to complete their jobs before returning
|
||||
waitCount := 3
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
waitIdx := 0
|
||||
for r.workers[i].isRunning() && waitIdx < waitCount {
|
||||
time.Sleep(time.Second)
|
||||
waitIdx++
|
||||
}
|
||||
}
|
||||
r.memory.db.Flush()
|
||||
|
||||
fs.Debugf(r, "cache reader closed %v", r.offset)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Seek will move the current offset based on whence and instruct the workers to move there too
|
||||
func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
var err error
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
fs.Debugf(r, "moving offset set from %v to %v", r.offset, offset)
|
||||
r.offset = offset
|
||||
case io.SeekCurrent:
|
||||
fs.Debugf(r, "moving offset cur from %v to %v", r.offset, r.offset+offset)
|
||||
r.offset += offset
|
||||
case io.SeekEnd:
|
||||
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
||||
r.offset = r.cachedObject.Size() + offset
|
||||
default:
|
||||
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
||||
}
|
||||
|
||||
chunkStart := r.offset - (r.offset % r.cacheFs().chunkSize)
|
||||
if chunkStart >= r.cacheFs().chunkSize {
|
||||
chunkStart = chunkStart - r.cacheFs().chunkSize
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
return r.offset, err
|
||||
}
|
||||
|
||||
type worker struct {
|
||||
r *Handle
|
||||
ch <-chan int64
|
||||
rc io.ReadCloser
|
||||
id int
|
||||
running bool
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// String is a representation of this worker
|
||||
func (w *worker) String() string {
|
||||
return fmt.Sprintf("worker-%v <%v>", w.id, w.r.cachedObject.Name)
|
||||
}
|
||||
|
||||
// reader will return a reader depending on the capabilities of the source reader:
|
||||
// - if it supports seeking it will seek to the desired offset and return the same reader
|
||||
// - if it doesn't support seeking it will close a possible existing one and open at the desired offset
|
||||
// - if there's no reader associated with this worker, it will create one
|
||||
func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error) {
|
||||
var err error
|
||||
r := w.rc
|
||||
if w.rc == nil {
|
||||
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||
return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
if !closeOpen {
|
||||
if do, ok := r.(fs.RangeSeeker); ok {
|
||||
_, err = do.RangeSeek(offset, io.SeekStart, end-offset)
|
||||
return r, err
|
||||
} else if do, ok := r.(io.Seeker); ok {
|
||||
_, err = do.Seek(offset, io.SeekStart)
|
||||
return r, err
|
||||
}
|
||||
}
|
||||
|
||||
_ = w.rc.Close()
|
||||
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||
r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (w *worker) isRunning() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.running
|
||||
}
|
||||
|
||||
func (w *worker) setRunning(f bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.running = f
|
||||
}
|
||||
|
||||
// run is the main loop for the worker which receives offsets to preload
|
||||
func (w *worker) run() {
|
||||
var err error
|
||||
var data []byte
|
||||
defer w.setRunning(false)
|
||||
defer func() {
|
||||
if w.rc != nil {
|
||||
_ = w.rc.Close()
|
||||
w.setRunning(false)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
chunkStart, open := <-w.ch
|
||||
w.setRunning(true)
|
||||
if chunkStart < 0 || !open {
|
||||
break
|
||||
}
|
||||
|
||||
// skip if it exists
|
||||
if w.r.UseMemory {
|
||||
if w.r.memory.HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
|
||||
// add it in ram if it's in the persistent storage
|
||||
data, err = w.r.storage().GetChunk(w.r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + w.r.cacheFs().chunkSize
|
||||
// TODO: Remove this comment if it proves to be reliable for #1896
|
||||
//if chunkEnd > w.r.cachedObject.Size() {
|
||||
// chunkEnd = w.r.cachedObject.Size()
|
||||
//}
|
||||
|
||||
w.download(chunkStart, chunkEnd, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||
var err error
|
||||
var data []byte
|
||||
|
||||
// stop retries
|
||||
if retry >= w.r.cacheFs().readRetries {
|
||||
return
|
||||
}
|
||||
// back-off between retries
|
||||
if retry > 0 {
|
||||
time.Sleep(time.Second * time.Duration(retry))
|
||||
}
|
||||
|
||||
closeOpen := false
|
||||
if retry > 0 {
|
||||
closeOpen = true
|
||||
}
|
||||
w.rc, err = w.reader(chunkStart, chunkEnd, closeOpen)
|
||||
// we seem to be getting only errors so we abort
|
||||
if err != nil {
|
||||
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
|
||||
err = w.r.cachedObject.refreshFromSource(true)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "%v", err)
|
||||
}
|
||||
w.download(chunkStart, chunkEnd, retry+1)
|
||||
return
|
||||
}
|
||||
|
||||
data = make([]byte, chunkEnd-chunkStart)
|
||||
var sourceRead int
|
||||
sourceRead, err = io.ReadFull(w.rc, data)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
||||
err = w.r.cachedObject.refreshFromSource(true)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "%v", err)
|
||||
}
|
||||
w.download(chunkStart, chunkEnd, retry+1)
|
||||
return
|
||||
}
|
||||
data = data[:sourceRead] // reslice to remove extra garbage
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(w, "partial downloaded chunk %v", fs.SizeSuffix(chunkStart))
|
||||
} else {
|
||||
fs.Debugf(w, "downloaded chunk %v", chunkStart)
|
||||
}
|
||||
|
||||
if w.r.UseMemory {
|
||||
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
|
||||
}
|
||||
}
|
||||
|
||||
err = w.r.storage().AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "failed caching chunk in storage %v: %v", chunkStart, err)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// BackgroundUploadStarted is a state for a temp file that has started upload
|
||||
BackgroundUploadStarted = iota
|
||||
// BackgroundUploadCompleted is a state for a temp file that has completed upload
|
||||
BackgroundUploadCompleted
|
||||
// BackgroundUploadError is a state for a temp file that has an error upload
|
||||
BackgroundUploadError
|
||||
)
|
||||
|
||||
// BackgroundUploadState is an entity that maps to an existing file which is stored on the temp fs
|
||||
type BackgroundUploadState struct {
|
||||
Remote string
|
||||
Status int
|
||||
Error error
|
||||
}
|
||||
|
||||
type backgroundWriter struct {
|
||||
fs *Fs
|
||||
stateCh chan int
|
||||
running bool
|
||||
notifyCh chan BackgroundUploadState
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func newBackgroundWriter(f *Fs) *backgroundWriter {
|
||||
b := &backgroundWriter{
|
||||
fs: f,
|
||||
stateCh: make(chan int),
|
||||
notifyCh: make(chan BackgroundUploadState),
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) close() {
|
||||
b.stateCh <- 2
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.running = false
|
||||
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) pause() {
|
||||
b.stateCh <- 1
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) play() {
|
||||
b.stateCh <- 0
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) isRunning() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.running
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) notify(remote string, status int, err error) {
|
||||
state := BackgroundUploadState{
|
||||
Remote: remote,
|
||||
Status: status,
|
||||
Error: err,
|
||||
}
|
||||
select {
|
||||
case b.notifyCh <- state:
|
||||
fs.Debugf(remote, "notified background upload state: %v", state.Status)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) run() {
|
||||
state := 0
|
||||
for {
|
||||
b.mu.Lock()
|
||||
b.running = true
|
||||
b.mu.Unlock()
|
||||
select {
|
||||
case s := <-b.stateCh:
|
||||
state = s
|
||||
default:
|
||||
//
|
||||
}
|
||||
switch state {
|
||||
case 1:
|
||||
runtime.Gosched()
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
continue
|
||||
case 2:
|
||||
return
|
||||
}
|
||||
|
||||
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), b.fs.tempWriteWait)
|
||||
if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
remote := b.fs.cleanRootFromPath(absPath)
|
||||
b.notify(remote, BackgroundUploadStarted, nil)
|
||||
fs.Infof(remote, "background upload: started upload")
|
||||
err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
|
||||
if err != nil {
|
||||
b.notify(remote, BackgroundUploadError, err)
|
||||
_ = b.fs.cache.rollbackPendingUpload(absPath)
|
||||
fs.Errorf(remote, "background upload: %v", err)
|
||||
continue
|
||||
}
|
||||
// clean empty dirs up to root
|
||||
thisDir := cleanPath(path.Dir(remote))
|
||||
for thisDir != "" {
|
||||
thisList, err := b.fs.tempFs.List(thisDir)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(thisList) > 0 {
|
||||
break
|
||||
}
|
||||
err = b.fs.tempFs.Rmdir(thisDir)
|
||||
fs.Debugf(thisDir, "cleaned from temp path")
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
thisDir = cleanPath(path.Dir(thisDir))
|
||||
}
|
||||
fs.Infof(remote, "background upload: uploaded entry")
|
||||
err = b.fs.cache.removePendingUpload(absPath)
|
||||
if err != nil && !strings.Contains(err.Error(), "pending upload not found") {
|
||||
fs.Errorf(remote, "background upload: %v", err)
|
||||
}
|
||||
parentCd := NewDirectory(b.fs, cleanPath(path.Dir(remote)))
|
||||
err = b.fs.cache.ExpireDir(parentCd)
|
||||
if err != nil {
|
||||
fs.Errorf(parentCd, "background upload: cache expire error: %v", err)
|
||||
}
|
||||
b.fs.notifyChangeUpstream(remote, fs.EntryObject)
|
||||
fs.Infof(remote, "finished background upload")
|
||||
b.notify(remote, BackgroundUploadCompleted, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ io.ReadCloser = (*Handle)(nil)
|
||||
_ io.Seeker = (*Handle)(nil)
|
||||
)
|
||||
358
backend/cache/object.go
vendored
Normal file
358
backend/cache/object.go
vendored
Normal file
@@ -0,0 +1,358 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"io"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
objectInCache = "Object"
|
||||
objectPendingUpload = "TempObject"
|
||||
)
|
||||
|
||||
// Object is a generic file like object that stores basic information about it
|
||||
type Object struct {
|
||||
fs.Object `json:"-"`
|
||||
|
||||
ParentFs fs.Fs `json:"-"` // parent fs
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
Dir string `json:"dir"` // abs path of the object
|
||||
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
|
||||
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
|
||||
CacheStorable bool `json:"storable"` // says whether this object can be stored
|
||||
CacheType string `json:"cacheType"`
|
||||
CacheTs time.Time `json:"cacheTs"`
|
||||
CacheHashes map[hash.Type]string // all supported hashes cached
|
||||
|
||||
refreshMutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewObject builds one from a generic fs.Object
|
||||
func NewObject(f *Fs, remote string) *Object {
|
||||
fullRemote := path.Join(f.Root(), remote)
|
||||
dir, name := path.Split(fullRemote)
|
||||
|
||||
cacheType := objectInCache
|
||||
parentFs := f.UnWrap()
|
||||
if f.tempWritePath != "" {
|
||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||
if err == nil { // queued for upload
|
||||
cacheType = objectPendingUpload
|
||||
parentFs = f.tempFs
|
||||
fs.Debugf(fullRemote, "pending upload found")
|
||||
}
|
||||
}
|
||||
|
||||
co := &Object{
|
||||
ParentFs: parentFs,
|
||||
CacheFs: f,
|
||||
Name: cleanPath(name),
|
||||
Dir: cleanPath(dir),
|
||||
CacheModTime: time.Now().UnixNano(),
|
||||
CacheSize: 0,
|
||||
CacheStorable: false,
|
||||
CacheType: cacheType,
|
||||
CacheTs: time.Now(),
|
||||
}
|
||||
return co
|
||||
}
|
||||
|
||||
// ObjectFromOriginal builds one from a generic fs.Object
|
||||
func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
||||
var co *Object
|
||||
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
|
||||
dir, name := path.Split(fullRemote)
|
||||
|
||||
cacheType := objectInCache
|
||||
parentFs := f.UnWrap()
|
||||
if f.tempWritePath != "" {
|
||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||
if err == nil { // queued for upload
|
||||
cacheType = objectPendingUpload
|
||||
parentFs = f.tempFs
|
||||
fs.Debugf(fullRemote, "pending upload found")
|
||||
}
|
||||
}
|
||||
|
||||
co = &Object{
|
||||
ParentFs: parentFs,
|
||||
CacheFs: f,
|
||||
Name: cleanPath(name),
|
||||
Dir: cleanPath(dir),
|
||||
CacheType: cacheType,
|
||||
CacheTs: time.Now(),
|
||||
}
|
||||
co.updateData(o)
|
||||
return co
|
||||
}
|
||||
|
||||
func (o *Object) updateData(source fs.Object) {
|
||||
o.Object = source
|
||||
o.CacheModTime = source.ModTime().UnixNano()
|
||||
o.CacheSize = source.Size()
|
||||
o.CacheStorable = source.Storable()
|
||||
o.CacheTs = time.Now()
|
||||
o.CacheHashes = make(map[hash.Type]string)
|
||||
}
|
||||
|
||||
// Fs returns its FS info
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.CacheFs
|
||||
}
|
||||
|
||||
// String returns a human friendly name for this object
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
p := path.Join(o.Dir, o.Name)
|
||||
return o.CacheFs.cleanRootFromPath(p)
|
||||
}
|
||||
|
||||
// abs returns the absolute path to the object
|
||||
func (o *Object) abs() string {
|
||||
return path.Join(o.Dir, o.Name)
|
||||
}
|
||||
|
||||
// ModTime returns the cached ModTime
|
||||
func (o *Object) ModTime() time.Time {
|
||||
_ = o.refresh()
|
||||
return time.Unix(0, o.CacheModTime)
|
||||
}
|
||||
|
||||
// Size returns the cached Size
|
||||
func (o *Object) Size() int64 {
|
||||
_ = o.refresh()
|
||||
return o.CacheSize
|
||||
}
|
||||
|
||||
// Storable returns the cached Storable
|
||||
func (o *Object) Storable() bool {
|
||||
_ = o.refresh()
|
||||
return o.CacheStorable
|
||||
}
|
||||
|
||||
// refresh will check if the object info is expired and request the info from source if it is
|
||||
// all these conditions must be true to ignore a refresh
|
||||
// 1. cache ts didn't expire yet
|
||||
// 2. is not pending a notification from the wrapped fs
|
||||
func (o *Object) refresh() error {
|
||||
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
||||
isExpired := time.Now().After(o.CacheTs.Add(o.CacheFs.fileAge))
|
||||
if !isExpired && !isNotified {
|
||||
return nil
|
||||
}
|
||||
|
||||
return o.refreshFromSource(true)
|
||||
}
|
||||
|
||||
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
|
||||
func (o *Object) refreshFromSource(force bool) error {
|
||||
o.refreshMutex.Lock()
|
||||
defer o.refreshMutex.Unlock()
|
||||
var err error
|
||||
var liveObject fs.Object
|
||||
|
||||
if o.Object != nil && !force {
|
||||
return nil
|
||||
}
|
||||
if o.isTempFile() {
|
||||
liveObject, err = o.ParentFs.NewObject(o.Remote())
|
||||
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
||||
} else {
|
||||
liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
|
||||
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
||||
}
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error refreshing object in : %v", err)
|
||||
return err
|
||||
}
|
||||
o.updateData(liveObject)
|
||||
o.persist()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModTime sets the ModTime of this object
|
||||
func (o *Object) SetModTime(t time.Time) error {
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := o.Object.SetModTime(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.CacheModTime = t.UnixNano()
|
||||
o.persist()
|
||||
fs.Debugf(o, "updated ModTime: %v", t)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open is used to request a specific part of the file using fs.RangeOption
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
if err := o.refreshFromSource(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
cacheReader := NewObjectHandle(o, o.CacheFs)
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
}
|
||||
_, err = cacheReader.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return readers.NewLimitedReadCloser(cacheReader, limit), nil
|
||||
}
|
||||
|
||||
// Update will change the object data
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
if o.CacheFs.tempWritePath != "" {
|
||||
o.CacheFs.backgroundRunner.pause()
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||
return errors.Errorf("%v is currently uploading, can't update", o)
|
||||
}
|
||||
}
|
||||
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
||||
|
||||
// FIXME use reliable upload
|
||||
err := o.Object.Update(in, src, options...)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error updating source: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// deleting cached chunks and info to be replaced with new ones
|
||||
_ = o.CacheFs.cache.RemoveObject(o.abs())
|
||||
// advertise to ChangeNotify if wrapped doesn't do that
|
||||
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
|
||||
|
||||
o.CacheModTime = src.ModTime().UnixNano()
|
||||
o.CacheSize = src.Size()
|
||||
o.CacheHashes = make(map[hash.Type]string)
|
||||
o.CacheTs = time.Now()
|
||||
o.persist()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the object from both the cache and the source
|
||||
func (o *Object) Remove() error {
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
if o.CacheFs.tempWritePath != "" {
|
||||
o.CacheFs.backgroundRunner.pause()
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||
return errors.Errorf("%v is currently uploading, can't delete", o)
|
||||
}
|
||||
}
|
||||
err := o.Object.Remove()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Debugf(o, "removing object")
|
||||
_ = o.CacheFs.cache.RemoveObject(o.abs())
|
||||
_ = o.CacheFs.cache.removePendingUpload(o.abs())
|
||||
parentCd := NewDirectory(o.CacheFs, cleanPath(path.Dir(o.Remote())))
|
||||
_ = o.CacheFs.cache.ExpireDir(parentCd)
|
||||
// advertise to ChangeNotify if wrapped doesn't do that
|
||||
o.CacheFs.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hash requests a hash of the object and stores in the cache
|
||||
// since it might or might not be called, this is lazy loaded
|
||||
func (o *Object) Hash(ht hash.Type) (string, error) {
|
||||
_ = o.refresh()
|
||||
if o.CacheHashes == nil {
|
||||
o.CacheHashes = make(map[hash.Type]string)
|
||||
}
|
||||
|
||||
cachedHash, found := o.CacheHashes[ht]
|
||||
if found {
|
||||
return cachedHash, nil
|
||||
}
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
return "", err
|
||||
}
|
||||
liveHash, err := o.Object.Hash(ht)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
o.CacheHashes[ht] = liveHash
|
||||
|
||||
o.persist()
|
||||
fs.Debugf(o, "object hash cached: %v", liveHash)
|
||||
|
||||
return liveHash, nil
|
||||
}
|
||||
|
||||
// persist adds this object to the persistent cache
|
||||
func (o *Object) persist() *Object {
|
||||
err := o.CacheFs.cache.AddObject(o)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "failed to cache object: %v", err)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Object) isTempFile() bool {
|
||||
_, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
|
||||
if err != nil {
|
||||
o.CacheType = objectInCache
|
||||
return false
|
||||
}
|
||||
|
||||
o.CacheType = objectPendingUpload
|
||||
return true
|
||||
}
|
||||
|
||||
func (o *Object) tempFileStartedUpload() bool {
|
||||
started, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return started
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
282
backend/cache/plex.go
vendored
Normal file
282
backend/cache/plex.go
vendored
Normal file
@@ -0,0 +1,282 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
const (
|
||||
// defPlexLoginURL is the default URL for Plex login
|
||||
defPlexLoginURL = "https://plex.tv/users/sign_in.json"
|
||||
defPlexNotificationURL = "%s/:/websockets/notifications?X-Plex-Token=%s"
|
||||
)
|
||||
|
||||
// PlaySessionStateNotification is part of the API response of Plex
|
||||
type PlaySessionStateNotification struct {
|
||||
SessionKey string `json:"sessionKey"`
|
||||
GUID string `json:"guid"`
|
||||
Key string `json:"key"`
|
||||
ViewOffset int64 `json:"viewOffset"`
|
||||
State string `json:"state"`
|
||||
TranscodeSession string `json:"transcodeSession"`
|
||||
}
|
||||
|
||||
// NotificationContainer is part of the API response of Plex
|
||||
type NotificationContainer struct {
|
||||
Type string `json:"type"`
|
||||
Size int `json:"size"`
|
||||
PlaySessionState []PlaySessionStateNotification `json:"PlaySessionStateNotification"`
|
||||
}
|
||||
|
||||
// PlexNotification is part of the API response of Plex
|
||||
type PlexNotification struct {
|
||||
Container NotificationContainer `json:"NotificationContainer"`
|
||||
}
|
||||
|
||||
// plexConnector is managing the cache integration with Plex
|
||||
type plexConnector struct {
|
||||
url *url.URL
|
||||
username string
|
||||
password string
|
||||
token string
|
||||
f *Fs
|
||||
mu sync.Mutex
|
||||
running bool
|
||||
runningMu sync.Mutex
|
||||
stateCache *cache.Cache
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := &plexConnector{
|
||||
f: f,
|
||||
url: u,
|
||||
username: username,
|
||||
password: password,
|
||||
token: "",
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
}
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := &plexConnector{
|
||||
f: f,
|
||||
url: u,
|
||||
token: token,
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
}
|
||||
pc.listenWebsocket()
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
func (p *plexConnector) closeWebsocket() {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
fs.Infof("plex", "stopped Plex watcher")
|
||||
p.running = false
|
||||
}
|
||||
|
||||
func (p *plexConnector) listenWebsocket() {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
|
||||
u := strings.Replace(p.url.String(), "http://", "ws://", 1)
|
||||
u = strings.Replace(u, "https://", "wss://", 1)
|
||||
conn, err := websocket.Dial(fmt.Sprintf(defPlexNotificationURL, strings.TrimRight(u, "/"), p.token),
|
||||
"", "http://localhost")
|
||||
if err != nil {
|
||||
fs.Errorf("plex", "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
p.running = true
|
||||
go func() {
|
||||
for {
|
||||
if !p.isConnected() {
|
||||
break
|
||||
}
|
||||
|
||||
notif := &PlexNotification{}
|
||||
err := websocket.JSON.Receive(conn, notif)
|
||||
if err != nil {
|
||||
fs.Debugf("plex", "%v", err)
|
||||
p.closeWebsocket()
|
||||
break
|
||||
}
|
||||
// we're only interested in play events
|
||||
if notif.Container.Type == "playing" {
|
||||
// we loop through each of them
|
||||
for _, v := range notif.Container.PlaySessionState {
|
||||
// event type of playing
|
||||
if v.State == "playing" {
|
||||
// if it's not cached get the details and cache them
|
||||
if _, found := p.stateCache.Get(v.Key); !found {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", p.url.String(), v.Key), nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var data []byte
|
||||
data, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
p.stateCache.Set(v.Key, data, cache.DefaultExpiration)
|
||||
}
|
||||
} else if v.State == "stopped" {
|
||||
p.stateCache.Delete(v.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// fillDefaultHeaders will add common headers to requests
|
||||
func (p *plexConnector) fillDefaultHeaders(req *http.Request) {
|
||||
req.Header.Add("X-Plex-Client-Identifier", fmt.Sprintf("rclone (%v)", p.f.String()))
|
||||
req.Header.Add("X-Plex-Product", fmt.Sprintf("rclone (%v)", p.f.Name()))
|
||||
req.Header.Add("X-Plex-Version", fs.Version)
|
||||
req.Header.Add("Accept", "application/json")
|
||||
if p.token != "" {
|
||||
req.Header.Add("X-Plex-Token", p.token)
|
||||
}
|
||||
}
|
||||
|
||||
// authenticate will generate a token based on a username/password
|
||||
func (p *plexConnector) authenticate() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
form := url.Values{}
|
||||
form.Set("user[login]", p.username)
|
||||
form.Add("user[password]", p.password)
|
||||
req, err := http.NewRequest("POST", defPlexLoginURL, strings.NewReader(form.Encode()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var data map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain token: %v", err)
|
||||
}
|
||||
tokenGen, ok := get(data, "user", "authToken")
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to obtain token: %v", data)
|
||||
}
|
||||
token, ok := tokenGen.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to obtain token: %v", data)
|
||||
}
|
||||
p.token = token
|
||||
if p.token != "" {
|
||||
config.FileSet(p.f.Name(), "plex_token", p.token)
|
||||
config.SaveConfig()
|
||||
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
|
||||
}
|
||||
p.listenWebsocket()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isConnected checks if this rclone is authenticated to Plex
|
||||
func (p *plexConnector) isConnected() bool {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
return p.running
|
||||
}
|
||||
|
||||
// isConfigured checks if this rclone is configured to use a Plex server
|
||||
func (p *plexConnector) isConfigured() bool {
|
||||
return p.url != nil
|
||||
}
|
||||
|
||||
func (p *plexConnector) isPlaying(co *Object) bool {
|
||||
var err error
|
||||
if !p.isConnected() {
|
||||
p.listenWebsocket()
|
||||
}
|
||||
|
||||
remote := co.Remote()
|
||||
if cr, yes := p.f.isWrappedByCrypt(); yes {
|
||||
remote, err = cr.DecryptFileName(co.Remote())
|
||||
if err != nil {
|
||||
fs.Debugf("plex", "can not decrypt wrapped file: %v", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
isPlaying := false
|
||||
for _, v := range p.stateCache.Items() {
|
||||
if bytes.Contains(v.Object.([]byte), []byte(remote)) {
|
||||
isPlaying = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return isPlaying
|
||||
}
|
||||
|
||||
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||
for _, p := range path {
|
||||
switch idx := p.(type) {
|
||||
case string:
|
||||
if mm, ok := m.(map[string]interface{}); ok {
|
||||
if val, found := mm[idx]; found {
|
||||
m = val
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
case int:
|
||||
if mm, ok := m.([]interface{}); ok {
|
||||
if len(mm) > idx {
|
||||
m = mm[idx]
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
return m, true
|
||||
}
|
||||
98
backend/cache/storage_memory.go
vendored
Normal file
98
backend/cache/storage_memory.go
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Memory is a wrapper of transient storage for a go-cache store
|
||||
type Memory struct {
|
||||
db *cache.Cache
|
||||
}
|
||||
|
||||
// NewMemory builds this cache storage
|
||||
// defaultExpiration will set the expiry time of chunks in this storage
|
||||
func NewMemory(defaultExpiration time.Duration) *Memory {
|
||||
mem := &Memory{}
|
||||
err := mem.Connect(defaultExpiration)
|
||||
if err != nil {
|
||||
fs.Errorf("cache", "can't open ram connection: %v", err)
|
||||
}
|
||||
|
||||
return mem
|
||||
}
|
||||
|
||||
// Connect will create a connection for the storage
|
||||
func (m *Memory) Connect(defaultExpiration time.Duration) error {
|
||||
m.db = cache.New(defaultExpiration, -1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasChunk confirms the existence of a single chunk of an object
|
||||
func (m *Memory) HasChunk(cachedObject *Object, offset int64) bool {
|
||||
key := cachedObject.abs() + "-" + strconv.FormatInt(offset, 10)
|
||||
_, found := m.db.Get(key)
|
||||
return found
|
||||
}
|
||||
|
||||
// GetChunk will retrieve a single chunk which belongs to a cached object or an error if it doesn't find it
|
||||
func (m *Memory) GetChunk(cachedObject *Object, offset int64) ([]byte, error) {
|
||||
key := cachedObject.abs() + "-" + strconv.FormatInt(offset, 10)
|
||||
var data []byte
|
||||
|
||||
if x, found := m.db.Get(key); found {
|
||||
data = x.([]byte)
|
||||
return data, nil
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("couldn't get cached object data at offset %v", offset)
|
||||
}
|
||||
|
||||
// AddChunk adds a new chunk of a cached object
|
||||
func (m *Memory) AddChunk(fp string, data []byte, offset int64) error {
|
||||
return m.AddChunkAhead(fp, data, offset, time.Second)
|
||||
}
|
||||
|
||||
// AddChunkAhead adds a new chunk of a cached object
|
||||
func (m *Memory) AddChunkAhead(fp string, data []byte, offset int64, t time.Duration) error {
|
||||
key := fp + "-" + strconv.FormatInt(offset, 10)
|
||||
m.db.Set(key, data, cache.DefaultExpiration)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanChunksByAge will cleanup on a cron basis
|
||||
func (m *Memory) CleanChunksByAge(chunkAge time.Duration) {
|
||||
m.db.DeleteExpired()
|
||||
}
|
||||
|
||||
// CleanChunksByNeed will cleanup chunks after the FS passes a specific chunk
|
||||
func (m *Memory) CleanChunksByNeed(offset int64) {
|
||||
var items map[string]cache.Item
|
||||
|
||||
items = m.db.Items()
|
||||
for key := range items {
|
||||
sepIdx := strings.LastIndex(key, "-")
|
||||
keyOffset, err := strconv.ParseInt(key[sepIdx+1:], 10, 64)
|
||||
if err != nil {
|
||||
fs.Errorf("cache", "couldn't parse offset entry %v", key)
|
||||
continue
|
||||
}
|
||||
|
||||
if keyOffset < offset {
|
||||
m.db.Delete(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CleanChunksBySize will cleanup chunks after the total size passes a certain point
|
||||
func (m *Memory) CleanChunksBySize(maxSize int64) {
|
||||
// NOOP
|
||||
}
|
||||
1099
backend/cache/storage_persistent.go
vendored
Normal file
1099
backend/cache/storage_persistent.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1087
backend/crypt/cipher.go
Normal file
1087
backend/crypt/cipher.go
Normal file
File diff suppressed because it is too large
Load Diff
1290
backend/crypt/cipher_test.go
Normal file
1290
backend/crypt/cipher_test.go
Normal file
File diff suppressed because it is too large
Load Diff
748
backend/crypt/crypt.go
Normal file
748
backend/crypt/crypt.go
Normal file
@@ -0,0 +1,748 @@
|
||||
// Package crypt provides wrappers for Fs and Object which implement encryption
|
||||
package crypt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
cryptShowMapping = flags.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "crypt",
|
||||
Description: "Encrypt/Decrypt a remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote to encrypt/decrypt.\nNormally should contain a ':' and a path, eg \"myremote:path/to/dir\",\n\"myremote:bucket\" or maybe \"myremote:\" (not recommended).",
|
||||
}, {
|
||||
Name: "filename_encryption",
|
||||
Help: "How to encrypt the filenames.",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "off",
|
||||
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
|
||||
}, {
|
||||
Value: "standard",
|
||||
Help: "Encrypt the filenames see the docs for the details.",
|
||||
}, {
|
||||
Value: "obfuscate",
|
||||
Help: "Very simple filename obfuscation.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "directory_name_encryption",
|
||||
Help: "Option to either encrypt directory names or leave them intact.",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "true",
|
||||
Help: "Encrypt directory names.",
|
||||
},
|
||||
{
|
||||
Value: "false",
|
||||
Help: "Don't encrypt directory names, leave them intact.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Password or pass phrase for encryption.",
|
||||
IsPassword: true,
|
||||
}, {
|
||||
Name: "password2",
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
IsPassword: true,
|
||||
Optional: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// NewCipher constructs a Cipher for the given config name
|
||||
func NewCipher(name string) (Cipher, error) {
|
||||
mode, err := NewNameEncryptionMode(config.FileGet(name, "filename_encryption", "standard"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dirNameEncrypt, err := strconv.ParseBool(config.FileGet(name, "directory_name_encryption", "true"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
password := config.FileGet(name, "password", "")
|
||||
if password == "" {
|
||||
return nil, errors.New("password not set in config file")
|
||||
}
|
||||
password, err = obscure.Reveal(password)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password")
|
||||
}
|
||||
salt := config.FileGet(name, "password2", "")
|
||||
if salt != "" {
|
||||
salt, err = obscure.Reveal(salt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password2")
|
||||
}
|
||||
}
|
||||
cipher, err := newCipher(mode, password, salt, dirNameEncrypt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make cipher")
|
||||
}
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
cipher, err := NewCipher(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote := config.FileGet(name, "remote")
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := path.Join(remote, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err := fs.NewFs(remotePath)
|
||||
// if that didn't produce a file, look for a directory
|
||||
if err != fs.ErrorIsFile {
|
||||
remotePath = path.Join(remote, cipher.EncryptDirName(rpath))
|
||||
wrappedFs, err = fs.NewFs(remotePath)
|
||||
}
|
||||
if err != fs.ErrorIsFile && err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to make remote %q to wrap", remotePath)
|
||||
}
|
||||
f := &Fs{
|
||||
Fs: wrappedFs,
|
||||
name: name,
|
||||
root: rpath,
|
||||
cipher: cipher,
|
||||
}
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||
WriteMimeType: false,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
doChangeNotify := wrappedFs.Features().ChangeNotify
|
||||
if doChangeNotify != nil {
|
||||
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
decrypted, err := f.DecryptFileName(path)
|
||||
if err != nil {
|
||||
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
|
||||
return
|
||||
}
|
||||
notifyFunc(decrypted, entryType)
|
||||
}
|
||||
return doChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
cipher Cipher
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Encrypted drive '%s:%s'", f.name, f.root)
|
||||
}
|
||||
|
||||
// Encrypt an object file name to entries.
|
||||
func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
||||
remote := obj.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptFileName(remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "Skipping undecryptable file name: %v", err)
|
||||
return
|
||||
}
|
||||
if *cryptShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newObject(obj))
|
||||
}
|
||||
|
||||
// Encrypt an directory file name to entries.
|
||||
func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
||||
remote := dir.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "Skipping undecryptable dir name: %v", err)
|
||||
return
|
||||
}
|
||||
if *cryptShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newDir(dir))
|
||||
}
|
||||
|
||||
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
||||
func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||
newEntries = entries[:0] // in place filter
|
||||
for _, entry := range entries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
f.add(&newEntries, x)
|
||||
case fs.Directory:
|
||||
f.addDir(&newEntries, x)
|
||||
default:
|
||||
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
}
|
||||
return newEntries, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.Fs.List(f.cipher.EncryptDirName(dir))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.encryptEntries(entries)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
|
||||
newEntries, err := f.encryptEntries(entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return callback(newEntries)
|
||||
})
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(o), nil
|
||||
}
|
||||
|
||||
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, err := f.cipher.EncryptData(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the encrypted data
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
var hasher *hash.MultiHasher
|
||||
if ht != hash.None {
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check the hashes of the encrypted data if we were comparing them
|
||||
if ht != hash.None && hasher != nil {
|
||||
srcHash := hasher.Sums()[ht]
|
||||
var dstHash string
|
||||
dstHash, err = o.Hash(ht)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||
}
|
||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||
// remove object
|
||||
err = o.Remove()
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
}
|
||||
}
|
||||
|
||||
return f.newObject(o), nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.put(in, src, options, f.Fs.Put)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.put(in, src, options, f.Fs.Features().PutStream)
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
return f.Fs.Mkdir(f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return f.Fs.Rmdir(f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
//
|
||||
// Implement this if you have a way of deleting all the files
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge() error {
|
||||
do := f.Fs.Features().Purge
|
||||
if do == nil {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return do()
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.Fs.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
o, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(oResult), nil
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.Fs.Features().Move
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
o, ok := src.(*Object)
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(oResult), nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
do := f.Fs.Features().DirMove
|
||||
if do == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
|
||||
}
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
//
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
do := f.Fs.Features().PutUnchecked
|
||||
if do == nil {
|
||||
return nil, errors.New("can't PutUnchecked")
|
||||
}
|
||||
wrappedIn, err := f.cipher.EncryptData(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o, err := do(wrappedIn, f.newObjectInfo(src))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(o), nil
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
// otherwise cleaning up old versions of files.
|
||||
func (f *Fs) CleanUp() error {
|
||||
do := f.Fs.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp")
|
||||
}
|
||||
return do()
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
}
|
||||
return do()
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.Fs
|
||||
}
|
||||
|
||||
// EncryptFileName returns an encrypted file name
|
||||
func (f *Fs) EncryptFileName(fileName string) string {
|
||||
return f.cipher.EncryptFileName(fileName)
|
||||
}
|
||||
|
||||
// DecryptFileName returns a decrypted file name
|
||||
func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
||||
return f.cipher.DecryptFileName(encryptedFileName)
|
||||
}
|
||||
|
||||
// ComputeHash takes the nonce from o, and encrypts the contents of
|
||||
// src with it, and calcuates the hash given by HashType on the fly
|
||||
//
|
||||
// Note that we break lots of encapsulation in this function.
|
||||
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
// Read the nonce - opening the file is sufficient to read the nonce in
|
||||
// use a limited read so we only read the header
|
||||
in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
}
|
||||
d, err := f.cipher.(*cipher).newDecrypter(in)
|
||||
if err != nil {
|
||||
_ = in.Close()
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
}
|
||||
nonce := d.nonce
|
||||
// fs.Debugf(o, "Read nonce % 2x", nonce)
|
||||
|
||||
// Check nonce isn't all zeros
|
||||
isZero := true
|
||||
for i := range nonce {
|
||||
if nonce[i] != 0 {
|
||||
isZero = false
|
||||
}
|
||||
}
|
||||
if isZero {
|
||||
fs.Errorf(o, "empty nonce read")
|
||||
}
|
||||
|
||||
// Close d (and hence in) once we have read the nonce
|
||||
err = d.Close()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to close nonce read")
|
||||
}
|
||||
|
||||
// Open the src for input
|
||||
in, err = src.Open()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open src")
|
||||
}
|
||||
defer fs.CheckClose(in, &err)
|
||||
|
||||
// Now encrypt the src with the nonce
|
||||
out, err := f.cipher.(*cipher).newEncrypter(in, &nonce)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make encrypter")
|
||||
}
|
||||
|
||||
// pipe into hash
|
||||
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make hasher")
|
||||
}
|
||||
_, err = io.Copy(m, out)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to hash data")
|
||||
}
|
||||
|
||||
return m.Sums()[hashType], nil
|
||||
}
|
||||
|
||||
// Object describes a wrapped for being read from the Fs
|
||||
//
|
||||
// This decrypts the remote name and decrypts the data
|
||||
type Object struct {
|
||||
fs.Object
|
||||
f *Fs
|
||||
}
|
||||
|
||||
func (f *Fs) newObject(o fs.Object) *Object {
|
||||
return &Object{
|
||||
Object: o,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.f
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
remote := o.Object.Remote()
|
||||
decryptedName, err := o.f.cipher.DecryptFileName(remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "Undecryptable file name: %v", err)
|
||||
return remote
|
||||
}
|
||||
return decryptedName
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
size, err := o.f.cipher.DecryptedSize(o.Object.Size())
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Bad size for decrypt: %v", err)
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ht hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// UnWrap returns the wrapped Object
|
||||
func (o *Object) UnWrap() fs.Object {
|
||||
return o.Object
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
var openOptions []fs.OpenOption
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
// pass on Options to underlying open if appropriate
|
||||
openOptions = append(openOptions, option)
|
||||
}
|
||||
}
|
||||
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
if underlyingOffset == 0 && underlyingLimit < 0 {
|
||||
// Open with no seek
|
||||
return o.Object.Open(openOptions...)
|
||||
}
|
||||
// Open stream with a range of underlyingOffset, underlyingLimit
|
||||
end := int64(-1)
|
||||
if underlyingLimit >= 0 {
|
||||
end = underlyingOffset + underlyingLimit - 1
|
||||
if end >= o.Object.Size() {
|
||||
end = -1
|
||||
}
|
||||
}
|
||||
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
|
||||
return o.Object.Open(newOpenOptions...)
|
||||
}, offset, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return o.Object, o.Object.Update(in, src, options...)
|
||||
}
|
||||
_, err := o.f.put(in, src, options, update)
|
||||
return err
|
||||
}
|
||||
|
||||
// newDir returns a dir with the Name decrypted
|
||||
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
|
||||
new := fs.NewDirCopy(dir)
|
||||
remote := dir.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "Undecryptable dir name: %v", err)
|
||||
} else {
|
||||
new.SetRemote(decryptedRemote)
|
||||
}
|
||||
return new
|
||||
}
|
||||
|
||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||
//
|
||||
// This encrypts the remote name and adjusts the size
|
||||
type ObjectInfo struct {
|
||||
fs.ObjectInfo
|
||||
f *Fs
|
||||
}
|
||||
|
||||
func (f *Fs) newObjectInfo(src fs.ObjectInfo) *ObjectInfo {
|
||||
return &ObjectInfo{
|
||||
ObjectInfo: src,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *ObjectInfo) Fs() fs.Info {
|
||||
return o.f
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *ObjectInfo) Remote() string {
|
||||
return o.f.cipher.EncryptFileName(o.ObjectInfo.Remote())
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *ObjectInfo) Size() int64 {
|
||||
size := o.ObjectInfo.Size()
|
||||
if size < 0 {
|
||||
return size
|
||||
}
|
||||
return o.f.cipher.EncryptedSize(size)
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
)
|
||||
62
backend/crypt/crypt_test.go
Normal file
62
backend/crypt/crypt_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
// Test Crypt filesystem interface
|
||||
package crypt_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestStandard runs integration tests against the remote
|
||||
func TestStandard(t *testing.T) {
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestOff runs integration tests against the remote
|
||||
func TestOff(t *testing.T) {
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
|
||||
name := "TestCrypt2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestObfuscate runs integration tests against the remote
|
||||
func TestObfuscate(t *testing.T) {
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
})
|
||||
}
|
||||
1739
backend/drive/drive.go
Normal file
1739
backend/drive/drive.go
Normal file
File diff suppressed because it is too large
Load Diff
112
backend/drive/drive_internal_test.go
Normal file
112
backend/drive/drive_internal_test.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package drive
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/api/drive/v3"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const exampleExportFormats = `{
|
||||
"application/vnd.google-apps.document": [
|
||||
"application/rtf",
|
||||
"application/vnd.oasis.opendocument.text",
|
||||
"text/html",
|
||||
"application/pdf",
|
||||
"application/epub+zip",
|
||||
"application/zip",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.spreadsheet": [
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet",
|
||||
"text/tab-separated-values",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"text/csv",
|
||||
"application/zip",
|
||||
"application/vnd.oasis.opendocument.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.jam": [
|
||||
"application/pdf"
|
||||
],
|
||||
"application/vnd.google-apps.script": [
|
||||
"application/vnd.google-apps.script+json"
|
||||
],
|
||||
"application/vnd.google-apps.presentation": [
|
||||
"application/vnd.oasis.opendocument.presentation",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.form": [
|
||||
"application/zip"
|
||||
],
|
||||
"application/vnd.google-apps.drawing": [
|
||||
"image/svg+xml",
|
||||
"image/png",
|
||||
"application/pdf",
|
||||
"image/jpeg"
|
||||
]
|
||||
}`
|
||||
|
||||
var exportFormats map[string][]string
|
||||
|
||||
// Load the example export formats into exportFormats for testing
|
||||
func TestInternalLoadExampleExportFormats(t *testing.T) {
|
||||
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &exportFormats))
|
||||
}
|
||||
|
||||
func TestInternalParseExtensions(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
wantErr error
|
||||
}{
|
||||
{"doc", []string{"doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
|
||||
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
|
||||
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
|
||||
} {
|
||||
f := new(Fs)
|
||||
gotErr := f.parseExtensions(test.in)
|
||||
if test.wantErr == nil {
|
||||
assert.NoError(t, gotErr)
|
||||
} else {
|
||||
assert.EqualError(t, gotErr, test.wantErr.Error())
|
||||
}
|
||||
assert.Equal(t, test.want, f.extensions)
|
||||
}
|
||||
|
||||
// Test it is appending
|
||||
f := new(Fs)
|
||||
assert.Nil(t, f.parseExtensions("docx,svg"))
|
||||
assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
|
||||
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
|
||||
|
||||
}
|
||||
|
||||
func TestInternalFindExportFormat(t *testing.T) {
|
||||
item := new(drive.File)
|
||||
item.MimeType = "application/vnd.google-apps.document"
|
||||
for _, test := range []struct {
|
||||
extensions []string
|
||||
wantExtension string
|
||||
wantMimeType string
|
||||
}{
|
||||
{[]string{}, "", ""},
|
||||
{[]string{"pdf"}, "pdf", "application/pdf"},
|
||||
{[]string{"pdf", "rtf", "xls"}, "pdf", "application/pdf"},
|
||||
{[]string{"xls", "rtf", "pdf"}, "rtf", "application/rtf"},
|
||||
{[]string{"xls", "csv", "svg"}, "", ""},
|
||||
} {
|
||||
f := new(Fs)
|
||||
f.extensions = test.extensions
|
||||
gotExtension, gotMimeType := f.findExportFormat("file", exportFormats[item.MimeType])
|
||||
assert.Equal(t, test.wantExtension, gotExtension)
|
||||
assert.Equal(t, test.wantMimeType, gotMimeType)
|
||||
}
|
||||
}
|
||||
17
backend/drive/drive_test.go
Normal file
17
backend/drive/drive_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Drive filesystem interface
|
||||
package drive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDrive:",
|
||||
NilObject: (*drive.Object)(nil),
|
||||
})
|
||||
}
|
||||
249
backend/drive/upload.go
Normal file
249
backend/drive/upload.go
Normal file
@@ -0,0 +1,249 @@
|
||||
// Upload for drive
|
||||
//
|
||||
// Docs
|
||||
// Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable
|
||||
// Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices
|
||||
// Files insert: https://developers.google.com/drive/v2/reference/files/insert
|
||||
// Files update: https://developers.google.com/drive/v2/reference/files/update
|
||||
//
|
||||
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const (
|
||||
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
|
||||
statusResumeIncomplete = 308
|
||||
)
|
||||
|
||||
// resumableUpload is used by the generated APIs to provide resumable uploads.
|
||||
// It is not used by developers directly.
|
||||
type resumableUpload struct {
|
||||
f *Fs
|
||||
remote string
|
||||
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
|
||||
URI string
|
||||
// Media is the object being uploaded.
|
||||
Media io.Reader
|
||||
// MediaType defines the media type, e.g. "image/jpeg".
|
||||
MediaType string
|
||||
// ContentLength is the full size of the object being uploaded.
|
||||
ContentLength int64
|
||||
// Return value
|
||||
ret *drive.File
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
|
||||
params := make(url.Values)
|
||||
params.Set("alt", "json")
|
||||
params.Set("uploadType", "resumable")
|
||||
params.Set("fields", partialFields)
|
||||
if f.isTeamDrive {
|
||||
params.Set("supportsTeamDrives", "true")
|
||||
}
|
||||
if *driveKeepRevisionForever {
|
||||
params.Set("keepRevisionForever", "true")
|
||||
}
|
||||
urls := "https://www.googleapis.com/upload/drive/v3/files"
|
||||
method := "POST"
|
||||
if fileID != "" {
|
||||
params.Set("setModifiedDate", "true")
|
||||
urls += "/{fileId}"
|
||||
method = "PATCH"
|
||||
}
|
||||
urls += "?" + params.Encode()
|
||||
var res *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var body io.Reader
|
||||
body, err = googleapi.WithoutDataWrapper.JSONReader(info)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var req *http.Request
|
||||
req, err = http.NewRequest(method, urls, body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileID,
|
||||
})
|
||||
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
|
||||
req.Header.Set("X-Upload-Content-Type", contentType)
|
||||
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
|
||||
res, err = f.client.Do(req)
|
||||
if err == nil {
|
||||
defer googleapi.CloseBody(res)
|
||||
err = googleapi.CheckResponse(res)
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
loc := res.Header.Get("Location")
|
||||
rx := &resumableUpload{
|
||||
f: f,
|
||||
remote: remote,
|
||||
URI: loc,
|
||||
Media: in,
|
||||
MediaType: contentType,
|
||||
ContentLength: size,
|
||||
}
|
||||
return rx.Upload()
|
||||
}
|
||||
|
||||
// Make an http.Request for the range passed in
|
||||
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||
req, _ := http.NewRequest("POST", rx.URI, body)
|
||||
req.ContentLength = reqSize
|
||||
if reqSize != 0 {
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
|
||||
} else {
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
|
||||
}
|
||||
req.Header.Set("Content-Type", rx.MediaType)
|
||||
return req
|
||||
}
|
||||
|
||||
// rangeRE matches the transfer status response from the server. $1 is
|
||||
// the last byte index uploaded.
|
||||
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
|
||||
|
||||
// Query drive for the amount transferred so far
|
||||
//
|
||||
// If error is nil, then start should be valid
|
||||
func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
req := rx.makeRequest(0, nil, 0)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer googleapi.CloseBody(res)
|
||||
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
|
||||
return rx.ContentLength, nil
|
||||
}
|
||||
if res.StatusCode != statusResumeIncomplete {
|
||||
err = googleapi.CheckResponse(res)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
|
||||
}
|
||||
Range := res.Header.Get("Range")
|
||||
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
|
||||
start, err = strconv.ParseInt(m[1], 10, 64)
|
||||
if err == nil {
|
||||
return start, nil
|
||||
}
|
||||
}
|
||||
return 0, errors.Errorf("unable to parse range %q", Range)
|
||||
}
|
||||
|
||||
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
||||
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
req := rx.makeRequest(start, chunk, chunkSize)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 599, err
|
||||
}
|
||||
defer googleapi.CloseBody(res)
|
||||
if res.StatusCode == statusResumeIncomplete {
|
||||
return res.StatusCode, nil
|
||||
}
|
||||
err = googleapi.CheckResponse(res)
|
||||
if err != nil {
|
||||
return res.StatusCode, err
|
||||
}
|
||||
|
||||
// When the entire file upload is complete, the server
|
||||
// responds with an HTTP 201 Created along with any metadata
|
||||
// associated with this resource. If this request had been
|
||||
// updating an existing entity rather than creating a new one,
|
||||
// the HTTP response code for a completed upload would have
|
||||
// been 200 OK.
|
||||
//
|
||||
// So parse the response out of the body. We aren't expecting
|
||||
// any other 2xx codes, so we parse it unconditionaly on
|
||||
// StatusCode
|
||||
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
||||
return 598, err
|
||||
}
|
||||
|
||||
return res.StatusCode, nil
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
// It retries each chunk using the pacer and --low-level-retries
|
||||
func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
start := int64(0)
|
||||
var StatusCode int
|
||||
var err error
|
||||
buf := make([]byte, int(chunkSize))
|
||||
for start < rx.ContentLength {
|
||||
reqSize := rx.ContentLength - start
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
}
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||
|
||||
// Transfer the chunk
|
||||
err = rx.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
|
||||
again, err := shouldRetry(err)
|
||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||
again = false
|
||||
err = nil
|
||||
}
|
||||
return again, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start += reqSize
|
||||
}
|
||||
// Resume or retry uploads that fail due to connection interruptions or
|
||||
// any 5xx errors, including:
|
||||
//
|
||||
// 500 Internal Server Error
|
||||
// 502 Bad Gateway
|
||||
// 503 Service Unavailable
|
||||
// 504 Gateway Timeout
|
||||
//
|
||||
// Use an exponential backoff strategy if any 5xx server error is
|
||||
// returned when resuming or retrying upload requests. These errors can
|
||||
// occur if a server is getting overloaded. Exponential backoff can help
|
||||
// alleviate these kinds of problems during periods of high volume of
|
||||
// requests or heavy network traffic. Other kinds of requests should not
|
||||
// be handled by exponential backoff but you can still retry a number of
|
||||
// them. When retrying these requests, limit the number of times you
|
||||
// retry them. For example your code could limit to ten retries or less
|
||||
// before reporting an error.
|
||||
//
|
||||
// Handle 404 Not Found errors when doing resumable uploads by starting
|
||||
// the entire upload over from the beginning.
|
||||
if rx.ret == nil {
|
||||
return nil, fserrors.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
|
||||
}
|
||||
return rx.ret, nil
|
||||
}
|
||||
127
backend/dropbox/dbhash/dbhash.go
Normal file
127
backend/dropbox/dbhash/dbhash.go
Normal file
@@ -0,0 +1,127 @@
|
||||
// Package dbhash implements the dropbox hash as described in
|
||||
//
|
||||
// https://www.dropbox.com/developers/reference/content-hash
|
||||
package dbhash
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockSize of the checksum in bytes.
|
||||
BlockSize = sha256.BlockSize
|
||||
// Size of the checksum in bytes.
|
||||
Size = sha256.BlockSize
|
||||
bytesPerBlock = 4 * 1024 * 1024
|
||||
hashReturnedError = "hash function returned error"
|
||||
)
|
||||
|
||||
type digest struct {
|
||||
n int // bytes written into blockHash so far
|
||||
blockHash hash.Hash
|
||||
totalHash hash.Hash
|
||||
sumCalled bool
|
||||
writtenMore bool
|
||||
}
|
||||
|
||||
// New returns a new hash.Hash computing the Dropbox checksum.
|
||||
func New() hash.Hash {
|
||||
d := &digest{}
|
||||
d.Reset()
|
||||
return d
|
||||
}
|
||||
|
||||
// writeBlockHash writes the current block hash into the total hash
|
||||
func (d *digest) writeBlockHash() {
|
||||
blockHash := d.blockHash.Sum(nil)
|
||||
_, err := d.totalHash.Write(blockHash)
|
||||
if err != nil {
|
||||
panic(hashReturnedError)
|
||||
}
|
||||
// reset counters for blockhash
|
||||
d.n = 0
|
||||
d.blockHash.Reset()
|
||||
}
|
||||
|
||||
// Write writes len(p) bytes from p to the underlying data stream. It returns
|
||||
// the number of bytes written from p (0 <= n <= len(p)) and any error
|
||||
// encountered that caused the write to stop early. Write must return a non-nil
|
||||
// error if it returns n < len(p). Write must not modify the slice data, even
|
||||
// temporarily.
|
||||
//
|
||||
// Implementations must not retain p.
|
||||
func (d *digest) Write(p []byte) (n int, err error) {
|
||||
n = len(p)
|
||||
for len(p) > 0 {
|
||||
d.writtenMore = true
|
||||
toWrite := bytesPerBlock - d.n
|
||||
if toWrite > len(p) {
|
||||
toWrite = len(p)
|
||||
}
|
||||
_, err = d.blockHash.Write(p[:toWrite])
|
||||
if err != nil {
|
||||
panic(hashReturnedError)
|
||||
}
|
||||
d.n += toWrite
|
||||
p = p[toWrite:]
|
||||
// Accumulate the total hash
|
||||
if d.n == bytesPerBlock {
|
||||
d.writeBlockHash()
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
// It does not change the underlying hash state.
|
||||
//
|
||||
// TODO(ncw) Sum() can only be called once for this type of hash.
|
||||
// If you call Sum(), then Write() then Sum() it will result in
|
||||
// a panic. Calling Write() then Sum(), then Sum() is OK.
|
||||
func (d *digest) Sum(b []byte) []byte {
|
||||
if d.sumCalled && d.writtenMore {
|
||||
panic("digest.Sum() called more than once")
|
||||
}
|
||||
d.sumCalled = true
|
||||
d.writtenMore = false
|
||||
if d.n != 0 {
|
||||
d.writeBlockHash()
|
||||
}
|
||||
return d.totalHash.Sum(b)
|
||||
}
|
||||
|
||||
// Reset resets the Hash to its initial state.
|
||||
func (d *digest) Reset() {
|
||||
d.n = 0
|
||||
d.totalHash = sha256.New()
|
||||
d.blockHash = sha256.New()
|
||||
d.sumCalled = false
|
||||
d.writtenMore = false
|
||||
}
|
||||
|
||||
// Size returns the number of bytes Sum will return.
|
||||
func (d *digest) Size() int {
|
||||
return d.totalHash.Size()
|
||||
}
|
||||
|
||||
// BlockSize returns the hash's underlying block size.
|
||||
// The Write method must be able to accept any amount
|
||||
// of data, but it may operate more efficiently if all writes
|
||||
// are a multiple of the block size.
|
||||
func (d *digest) BlockSize() int {
|
||||
return d.totalHash.BlockSize()
|
||||
}
|
||||
|
||||
// Sum returns the Dropbox checksum of the data.
|
||||
func Sum(data []byte) [Size]byte {
|
||||
var d digest
|
||||
d.Reset()
|
||||
_, _ = d.Write(data)
|
||||
var out [Size]byte
|
||||
d.Sum(out[:0])
|
||||
return out
|
||||
}
|
||||
|
||||
// must implement this interface
|
||||
var _ hash.Hash = (*digest)(nil)
|
||||
88
backend/dropbox/dbhash/dbhash_test.go
Normal file
88
backend/dropbox/dbhash/dbhash_test.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package dbhash_test
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/dropbox/dbhash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func testChunk(t *testing.T, chunk int) {
|
||||
data := make([]byte, chunk)
|
||||
for i := 0; i < chunk; i++ {
|
||||
data[i] = 'A'
|
||||
}
|
||||
for _, test := range []struct {
|
||||
n int
|
||||
want string
|
||||
}{
|
||||
{0, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"},
|
||||
{1, "1cd6ef71e6e0ff46ad2609d403dc3fee244417089aa4461245a4e4fe23a55e42"},
|
||||
{2, "01e0655fb754d10418a73760f57515f4903b298e6d67dda6bf0987fa79c22c88"},
|
||||
{4096, "8620913d33852befe09f16fff8fd75f77a83160d29f76f07e0276e9690903035"},
|
||||
{4194303, "647c8627d70f7a7d13ce96b1e7710a771a55d41a62c3da490d92e56044d311fa"},
|
||||
{4194304, "d4d63bac5b866c71620185392a8a6218ac1092454a2d16f820363b69852befa3"},
|
||||
{4194305, "8f553da8d00d0bf509d8470e242888be33019c20c0544811f5b2b89e98360b92"},
|
||||
{8388607, "83b30cf4fb5195b04a937727ae379cf3d06673bf8f77947f6a92858536e8369c"},
|
||||
{8388608, "e08b3ba1f538804075c5f939accdeaa9efc7b5c01865c94a41e78ca6550a88e7"},
|
||||
{8388609, "02c8a4aefc2bfc9036f89a7098001865885938ca580e5c9e5db672385edd303c"},
|
||||
} {
|
||||
d := dbhash.New()
|
||||
var toWrite int
|
||||
for toWrite = test.n; toWrite >= chunk; toWrite -= chunk {
|
||||
n, err := d.Write(data)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, chunk, n)
|
||||
}
|
||||
n, err := d.Write(data[:toWrite])
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, toWrite, n)
|
||||
got := hex.EncodeToString(d.Sum(nil))
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("when testing length %d", n))
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestHashChunk16M(t *testing.T) { testChunk(t, 16*1024*1024) }
|
||||
func TestHashChunk8M(t *testing.T) { testChunk(t, 8*1024*1024) }
|
||||
func TestHashChunk4M(t *testing.T) { testChunk(t, 4*1024*1024) }
|
||||
func TestHashChunk2M(t *testing.T) { testChunk(t, 2*1024*1024) }
|
||||
func TestHashChunk1M(t *testing.T) { testChunk(t, 1*1024*1024) }
|
||||
func TestHashChunk64k(t *testing.T) { testChunk(t, 64*1024) }
|
||||
func TestHashChunk32k(t *testing.T) { testChunk(t, 32*1024) }
|
||||
func TestHashChunk2048(t *testing.T) { testChunk(t, 2048) }
|
||||
func TestHashChunk2047(t *testing.T) { testChunk(t, 2047) }
|
||||
|
||||
func TestSumCalledTwice(t *testing.T) {
|
||||
d := dbhash.New()
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
d.Reset()
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
assert.NotPanics(t, func() { d.Sum(nil) })
|
||||
_, _ = d.Write([]byte{1})
|
||||
assert.Panics(t, func() { d.Sum(nil) })
|
||||
}
|
||||
|
||||
func TestSize(t *testing.T) {
|
||||
d := dbhash.New()
|
||||
assert.Equal(t, 32, d.Size())
|
||||
}
|
||||
|
||||
func TestBlockSize(t *testing.T) {
|
||||
d := dbhash.New()
|
||||
assert.Equal(t, 64, d.BlockSize())
|
||||
}
|
||||
|
||||
func TestSum(t *testing.T) {
|
||||
assert.Equal(t,
|
||||
[64]byte{
|
||||
0x1c, 0xd6, 0xef, 0x71, 0xe6, 0xe0, 0xff, 0x46,
|
||||
0xad, 0x26, 0x09, 0xd4, 0x03, 0xdc, 0x3f, 0xee,
|
||||
0x24, 0x44, 0x17, 0x08, 0x9a, 0xa4, 0x46, 0x12,
|
||||
0x45, 0xa4, 0xe4, 0xfe, 0x23, 0xa5, 0x5e, 0x42,
|
||||
},
|
||||
dbhash.Sum([]byte{'A'}),
|
||||
)
|
||||
}
|
||||
1063
backend/dropbox/dropbox.go
Normal file
1063
backend/dropbox/dropbox.go
Normal file
File diff suppressed because it is too large
Load Diff
17
backend/dropbox/dropbox_test.go
Normal file
17
backend/dropbox/dropbox_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Dropbox filesystem interface
|
||||
package dropbox_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/dropbox"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDropbox:",
|
||||
NilObject: (*dropbox.Object)(nil),
|
||||
})
|
||||
}
|
||||
773
backend/ftp/ftp.go
Normal file
773
backend/ftp/ftp.go
Normal file
@@ -0,0 +1,773 @@
|
||||
// Package ftp interfaces with FTP servers
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "ftp",
|
||||
Description: "FTP Connection",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to",
|
||||
Optional: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ftp.example.com",
|
||||
Help: "Connect to ftp.example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21) ",
|
||||
Optional: true,
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Optional: false,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
url string
|
||||
user string
|
||||
pass string
|
||||
dialAddr string
|
||||
poolMu sync.Mutex
|
||||
pool []*ftp.ServerConn
|
||||
}
|
||||
|
||||
// Object describes an FTP file
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
info *FileInfo
|
||||
}
|
||||
|
||||
// FileInfo is the metadata known about an FTP file
|
||||
type FileInfo struct {
|
||||
Name string
|
||||
Size uint64
|
||||
ModTime time.Time
|
||||
IsDir bool
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of this fs
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return f.url
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
c, err := ftp.DialTimeout(f.dialAddr, fs.Config.ConnectTimeout)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
||||
return nil, errors.Wrap(err, "ftpConnection Dial")
|
||||
}
|
||||
err = c.Login(f.user, f.pass)
|
||||
if err != nil {
|
||||
_ = c.Quit()
|
||||
fs.Errorf(f, "Error while Logging in into %s: %s", f.dialAddr, err)
|
||||
return nil, errors.Wrap(err, "ftpConnection Login")
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Get an FTP connection from the pool, or open a new one
|
||||
func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
f.poolMu.Lock()
|
||||
if len(f.pool) > 0 {
|
||||
c = f.pool[0]
|
||||
f.pool = f.pool[1:]
|
||||
}
|
||||
f.poolMu.Unlock()
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
return f.ftpConnection()
|
||||
}
|
||||
|
||||
// Return an FTP connection to the pool
|
||||
//
|
||||
// It nils the pointed to connection out so it can't be reused
|
||||
//
|
||||
// if err is not nil then it checks the connection is alive using a
|
||||
// NOOP request
|
||||
func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
c := *pc
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular FTP error code then check the connection
|
||||
_, isRegularError := errors.Cause(err).(*textproto.Error)
|
||||
if !isRegularError {
|
||||
nopErr := c.NoOp()
|
||||
if nopErr != nil {
|
||||
fs.Debugf(f, "Connection failed, closing: %v", nopErr)
|
||||
_ = c.Quit()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
f.poolMu.Lock()
|
||||
f.pool = append(f.pool, c)
|
||||
f.poolMu.Unlock()
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (ff fs.Fs, err error) {
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
// FIXME Convert the old scheme used for the first beta - remove after release
|
||||
if ftpURL := config.FileGet(name, "url"); ftpURL != "" {
|
||||
fs.Infof(name, "Converting old configuration")
|
||||
u, err := url.Parse(ftpURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to parse old url %q", ftpURL)
|
||||
}
|
||||
parts := strings.Split(u.Host, ":")
|
||||
config.FileSet(name, "host", parts[0])
|
||||
if len(parts) > 1 {
|
||||
config.FileSet(name, "port", parts[1])
|
||||
}
|
||||
config.FileSet(name, "host", u.Host)
|
||||
config.FileSet(name, "user", config.FileGet(name, "username"))
|
||||
config.FileSet(name, "pass", config.FileGet(name, "password"))
|
||||
config.FileDeleteKey(name, "username")
|
||||
config.FileDeleteKey(name, "password")
|
||||
config.FileDeleteKey(name, "url")
|
||||
config.SaveConfig()
|
||||
if u.Path != "" && u.Path != "/" {
|
||||
fs.Errorf(name, "Path %q in FTP URL no longer supported - put it on the end of the remote %s:%s", u.Path, name, u.Path)
|
||||
}
|
||||
}
|
||||
host := config.FileGet(name, "host")
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
port := config.FileGet(name, "port")
|
||||
pass, err = obscure.Reveal(pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFS decrypt password")
|
||||
}
|
||||
if user == "" {
|
||||
user = os.Getenv("USER")
|
||||
}
|
||||
if port == "" {
|
||||
port = "21"
|
||||
}
|
||||
|
||||
dialAddr := host + ":" + port
|
||||
u := "ftp://" + path.Join(dialAddr+"/", root)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
url: u,
|
||||
user: user,
|
||||
pass: pass,
|
||||
dialAddr: dialAddr,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
// Make a connection and pool it to return errors early
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFs")
|
||||
}
|
||||
f.putFtpConnection(&c, nil)
|
||||
if root != "" {
|
||||
// Check to see if the root actually an existing file
|
||||
remote := path.Base(root)
|
||||
f.root = path.Dir(root)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
_, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||
// File doesn't exist so return old f
|
||||
f.root = root
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, err
|
||||
}
|
||||
|
||||
// translateErrorFile turns FTP errors into rclone errors if possible for a file
|
||||
func translateErrorFile(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// translateErrorDir turns FTP errors into rclone errors if possible for a directory
|
||||
func translateErrorDir(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
dir := path.Dir(fullPath)
|
||||
base := path.Base(fullPath)
|
||||
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
}
|
||||
files, err := c.List(dir)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
for _, file := range files {
|
||||
if file.Name == base {
|
||||
return file, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry != nil && entry.Type != ftp.EntryTypeFolder {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: entry.Size,
|
||||
ModTime: entry.Time,
|
||||
}
|
||||
o.info = info
|
||||
|
||||
return o, nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// dirExists checks the directory pointed to by remote exists or not
|
||||
func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(remote)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "dirExists")
|
||||
}
|
||||
if entry != nil && entry.Type == ftp.EntryTypeFolder {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
files, err := c.List(path.Join(f.root, dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorDir(err)
|
||||
}
|
||||
// Annoyingly FTP returns success for a directory which
|
||||
// doesn't exist, so check it really doesn't exist if no
|
||||
// entries found.
|
||||
if len(files) == 0 {
|
||||
exists, err := f.dirExists(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
if !exists {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
for i := range files {
|
||||
object := files[i]
|
||||
newremote := path.Join(dir, object.Name)
|
||||
switch object.Type {
|
||||
case ftp.EntryTypeFolder:
|
||||
if object.Name == "." || object.Name == ".." {
|
||||
continue
|
||||
}
|
||||
d := fs.NewDir(newremote, object.Time)
|
||||
entries = append(entries, d)
|
||||
default:
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: newremote,
|
||||
}
|
||||
info := &FileInfo{
|
||||
Name: newremote,
|
||||
Size: object.Size,
|
||||
ModTime: object.Time,
|
||||
}
|
||||
o.info = info
|
||||
entries = append(entries, o)
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Hashes are not supported
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Precision shows Modified Time not supported
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||
err := f.mkParentDir(src.Remote())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Put mkParentDir failed")
|
||||
}
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
err = o.Update(in, src, options...)
|
||||
return o, err
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
}
|
||||
|
||||
// getInfo reads the FileInfo for a path
|
||||
func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
// defer fs.Trace(remote, "")("fi=%v, err=%v", &fi, &err)
|
||||
dir := path.Dir(remote)
|
||||
base := path.Base(remote)
|
||||
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getInfo")
|
||||
}
|
||||
files, err := c.List(dir)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
|
||||
for i := range files {
|
||||
if files[i].Name == base {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: files[i].Size,
|
||||
ModTime: files[i].Time,
|
||||
IsDir: files[i].Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// mkdir makes the directory and parents using unrooted paths
|
||||
func (f *Fs) mkdir(abspath string) error {
|
||||
if abspath == "." || abspath == "/" {
|
||||
return nil
|
||||
}
|
||||
fi, err := f.getInfo(abspath)
|
||||
if err == nil {
|
||||
if fi.IsDir {
|
||||
return nil
|
||||
}
|
||||
return fs.ErrorIsFile
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return errors.Wrapf(err, "mkdir %q failed", abspath)
|
||||
}
|
||||
parent := path.Dir(abspath)
|
||||
err = f.mkdir(parent)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c, connErr := f.getFtpConnection()
|
||||
if connErr != nil {
|
||||
return errors.Wrap(connErr, "mkdir")
|
||||
}
|
||||
err = c.MakeDir(abspath)
|
||||
f.putFtpConnection(&c, err)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
case 521: // dir already exists: error number according to RFC 959: issue #2363
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// mkParentDir makes the parent of remote if necessary and any
|
||||
// directories above that
|
||||
func (f *Fs) mkParentDir(remote string) error {
|
||||
parent := path.Dir(remote)
|
||||
return f.mkdir(path.Join(f.root, parent))
|
||||
}
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) (err error) {
|
||||
// defer fs.Trace(dir, "")("err=%v", &err)
|
||||
root := path.Join(f.root, dir)
|
||||
return f.mkdir(root)
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||
}
|
||||
err = c.RemoveDir(path.Join(f.root, dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
return translateErrorDir(err)
|
||||
}
|
||||
|
||||
// Move renames a remote file object
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
err := f.mkParentDir(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move mkParentDir failed")
|
||||
}
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
}
|
||||
err = c.Rename(
|
||||
path.Join(srcObj.fs.root, srcObj.remote),
|
||||
path.Join(f.root, remote),
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move Rename failed")
|
||||
}
|
||||
dstObj, err := f.NewObject(remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move NewObject failed")
|
||||
}
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := path.Join(srcFs.root, srcRemote)
|
||||
dstPath := path.Join(f.root, dstRemote)
|
||||
|
||||
// Check if destination exists
|
||||
fi, err := f.getInfo(dstPath)
|
||||
if err == nil {
|
||||
if fi.IsDir {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
return fs.ErrorIsFile
|
||||
} else if err != fs.ErrorObjectNotFound {
|
||||
return errors.Wrapf(err, "DirMove getInfo failed")
|
||||
}
|
||||
|
||||
// Make sure the parent directory exists
|
||||
err = f.mkdir(path.Dir(dstPath))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove mkParentDir dst failed")
|
||||
}
|
||||
|
||||
// Do the move
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "DirMove")
|
||||
}
|
||||
err = c.Rename(
|
||||
srcPath,
|
||||
dstPath,
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "DirMove Rename(%q,%q) failed", srcPath, dstPath)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// String version of o
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the hash of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return int64(o.info.Size)
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return o.info.ModTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Storable returns a boolean as to whether this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// ftpReadCloser implements io.ReadCloser for FTP objects.
|
||||
type ftpReadCloser struct {
|
||||
rc io.ReadCloser
|
||||
c *ftp.ServerConn
|
||||
f *Fs
|
||||
err error // errors found during read
|
||||
}
|
||||
|
||||
// Read bytes into p
|
||||
func (f *ftpReadCloser) Read(p []byte) (n int, err error) {
|
||||
n, err = f.rc.Read(p)
|
||||
if err != nil && err != io.EOF {
|
||||
f.err = err // store any errors for Close to examine
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Close the FTP reader and return the connection to the pool
|
||||
func (f *ftpReadCloser) Close() error {
|
||||
err := f.rc.Close()
|
||||
// if errors while reading or closing, dump the connection
|
||||
if err != nil || f.err != nil {
|
||||
_ = f.c.Quit()
|
||||
} else {
|
||||
f.f.putFtpConnection(&f.c, nil)
|
||||
}
|
||||
// mask the error if it was caused by a premature close
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusTransfertAborted, ftp.StatusFileUnavailable:
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
fd, err := c.RetrFrom(path, uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// Update the already existing object
|
||||
//
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
// defer fs.Trace(o, "src=%v", src)("err=%v", &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// remove the file if upload failed
|
||||
remove := func() {
|
||||
removeErr := o.Remove()
|
||||
if removeErr != nil {
|
||||
fs.Debugf(o, "Failed to remove: %v", removeErr)
|
||||
} else {
|
||||
fs.Debugf(o, "Removed after failed upload: %v", err)
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
err = c.Stor(path, in)
|
||||
if err != nil {
|
||||
_ = c.Quit()
|
||||
remove()
|
||||
return errors.Wrap(err, "update stor")
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
o.info, err = o.fs.getInfo(path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "update getinfo")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() (err error) {
|
||||
// defer fs.Trace(o, "")("err=%v", &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// Check if it's a directory or a file
|
||||
info, err := o.fs.getInfo(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir {
|
||||
err = o.fs.Rmdir(o.remote)
|
||||
} else {
|
||||
c, err := o.fs.getFtpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
err = c.Delete(path)
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
17
backend/ftp/ftp_test.go
Normal file
17
backend/ftp/ftp_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test FTP filesystem interface
|
||||
package ftp_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/ftp"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTP:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
984
backend/googlecloudstorage/googlecloudstorage.go
Normal file
984
backend/googlecloudstorage/googlecloudstorage.go
Normal file
@@ -0,0 +1,984 @@
|
||||
// Package googlecloudstorage provides an interface to Google Cloud Storage
|
||||
package googlecloudstorage
|
||||
|
||||
/*
|
||||
Notes
|
||||
|
||||
Can't set Updated but can set Metadata on object creation
|
||||
|
||||
Patch needs full_control not just read_write
|
||||
|
||||
FIXME Patch/Delete/Get isn't working with files with spaces in - giving 404 error
|
||||
- https://code.google.com/p/google-api-go-client/issues/detail?id=64
|
||||
*/
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
storage "google.golang.org/api/storage/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
rcloneClientID = "202264815644.apps.googleusercontent.com"
|
||||
rcloneEncryptedClientSecret = "Uj7C9jGfb9gmeaV70Lh058cNkWvepr-Es9sBm0zdgil7JaOWF1VySw"
|
||||
timeFormatIn = time.RFC3339
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
metaMtime = "mtime" // key to store mtime under in metadata
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
gcsLocation = flags.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
|
||||
gcsStorageClass = flags.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageFullControlScope},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
}
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "google cloud storage",
|
||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
if config.FileGet(name, "service_account_file") != "" {
|
||||
return
|
||||
}
|
||||
err := oauthutil.Config("google cloud storage", name, storageConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret - leave blank normally.",
|
||||
}, {
|
||||
Name: "project_number",
|
||||
Help: "Project number optional - needed only for list/create/delete buckets - see your developer console.",
|
||||
}, {
|
||||
Name: "service_account_file",
|
||||
Help: "Service Account Credentials JSON file path - needed only if you want use SA instead of interactive login.",
|
||||
}, {
|
||||
Name: "object_acl",
|
||||
Help: "Access Control List for new objects.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "authenticatedRead",
|
||||
Help: "Object owner gets OWNER access, and all Authenticated Users get READER access.",
|
||||
}, {
|
||||
Value: "bucketOwnerFullControl",
|
||||
Help: "Object owner gets OWNER access, and project team owners get OWNER access.",
|
||||
}, {
|
||||
Value: "bucketOwnerRead",
|
||||
Help: "Object owner gets OWNER access, and project team owners get READER access.",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Object owner gets OWNER access [default if left blank].",
|
||||
}, {
|
||||
Value: "projectPrivate",
|
||||
Help: "Object owner gets OWNER access, and project team members get access according to their roles.",
|
||||
}, {
|
||||
Value: "publicRead",
|
||||
Help: "Object owner gets OWNER access, and all Users get READER access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "bucket_acl",
|
||||
Help: "Access Control List for new buckets.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "authenticatedRead",
|
||||
Help: "Project team owners get OWNER access, and all Authenticated Users get READER access.",
|
||||
}, {
|
||||
Value: "private",
|
||||
Help: "Project team owners get OWNER access [default if left blank].",
|
||||
}, {
|
||||
Value: "projectPrivate",
|
||||
Help: "Project team members get access according to their roles.",
|
||||
}, {
|
||||
Value: "publicRead",
|
||||
Help: "Project team owners get OWNER access, and all Users get READER access.",
|
||||
}, {
|
||||
Value: "publicReadWrite",
|
||||
Help: "Project team owners get OWNER access, and all Users get WRITER access.",
|
||||
}},
|
||||
}, {
|
||||
Name: "location",
|
||||
Help: "Location for the newly created buckets.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Empty for default location (US).",
|
||||
}, {
|
||||
Value: "asia",
|
||||
Help: "Multi-regional location for Asia.",
|
||||
}, {
|
||||
Value: "eu",
|
||||
Help: "Multi-regional location for Europe.",
|
||||
}, {
|
||||
Value: "us",
|
||||
Help: "Multi-regional location for United States.",
|
||||
}, {
|
||||
Value: "asia-east1",
|
||||
Help: "Taiwan.",
|
||||
}, {
|
||||
Value: "asia-northeast1",
|
||||
Help: "Tokyo.",
|
||||
}, {
|
||||
Value: "asia-southeast1",
|
||||
Help: "Singapore.",
|
||||
}, {
|
||||
Value: "australia-southeast1",
|
||||
Help: "Sydney.",
|
||||
}, {
|
||||
Value: "europe-west1",
|
||||
Help: "Belgium.",
|
||||
}, {
|
||||
Value: "europe-west2",
|
||||
Help: "London.",
|
||||
}, {
|
||||
Value: "us-central1",
|
||||
Help: "Iowa.",
|
||||
}, {
|
||||
Value: "us-east1",
|
||||
Help: "South Carolina.",
|
||||
}, {
|
||||
Value: "us-east4",
|
||||
Help: "Northern Virginia.",
|
||||
}, {
|
||||
Value: "us-west1",
|
||||
Help: "Oregon.",
|
||||
}},
|
||||
}, {
|
||||
Name: "storage_class",
|
||||
Help: "The storage class to use when storing objects in Google Cloud Storage.",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "",
|
||||
Help: "Default",
|
||||
}, {
|
||||
Value: "MULTI_REGIONAL",
|
||||
Help: "Multi-regional storage class",
|
||||
}, {
|
||||
Value: "REGIONAL",
|
||||
Help: "Regional storage class",
|
||||
}, {
|
||||
Value: "NEARLINE",
|
||||
Help: "Nearline storage class",
|
||||
}, {
|
||||
Value: "COLDLINE",
|
||||
Help: "Coldline storage class",
|
||||
}, {
|
||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
}},
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
projectNumber string // used for finding buckets
|
||||
objectACL string // used when creating new objects
|
||||
bucketACL string // used when creating new buckets
|
||||
location string // location of new buckets
|
||||
storageClass string // storage class of new buckets
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
//
|
||||
// Will definitely have info but maybe not meta
|
||||
type Object struct {
|
||||
fs *Fs // what this object is part of
|
||||
remote string // The remote path
|
||||
url string // download path
|
||||
md5sum string // The MD5Sum of the object
|
||||
bytes int64 // Bytes in the object
|
||||
modTime time.Time // Modified time of the object
|
||||
mimeType string
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
if f.root == "" {
|
||||
return f.bucket
|
||||
}
|
||||
return f.bucket + "/" + f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("Storage bucket %s", f.bucket)
|
||||
}
|
||||
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry determines whehter a given err rates being retried
|
||||
func shouldRetry(err error) (again bool, errOut error) {
|
||||
again = false
|
||||
if err != nil {
|
||||
if fserrors.ShouldRetry(err) {
|
||||
again = true
|
||||
} else {
|
||||
switch gerr := err.(type) {
|
||||
case *googleapi.Error:
|
||||
if gerr.Code >= 500 && gerr.Code < 600 {
|
||||
// All 5xx errors should be retried
|
||||
again = true
|
||||
} else if len(gerr.Errors) > 0 {
|
||||
reason := gerr.Errors[0].Reason
|
||||
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
|
||||
again = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return again, err
|
||||
}
|
||||
|
||||
// Pattern to match a storage path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a storage 'url'
|
||||
func parsePath(path string) (bucket, directory string, err error) {
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
if parts == nil {
|
||||
err = errors.Errorf("couldn't find bucket in storage path %q", path)
|
||||
} else {
|
||||
bucket, directory = parts[1], parts[2]
|
||||
directory = strings.Trim(directory, "/")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
var oAuthClient *http.Client
|
||||
var err error
|
||||
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
serviceAccountCreds := []byte(config.FileGet(name, "service_account_credentials"))
|
||||
serviceAccountPath := config.FileGet(name, "service_account_file")
|
||||
if len(serviceAccountCreds) == 0 && serviceAccountPath != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(serviceAccountPath))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
}
|
||||
serviceAccountCreds = loadedCreds
|
||||
}
|
||||
if len(serviceAccountCreds) > 0 {
|
||||
oAuthClient, err = getServiceAccountClient(serviceAccountCreds)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, storageConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
}
|
||||
|
||||
bucket, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
projectNumber: config.FileGet(name, "project_number"),
|
||||
objectACL: config.FileGet(name, "object_acl"),
|
||||
bucketACL: config.FileGet(name, "bucket_acl"),
|
||||
location: config.FileGet(name, "location"),
|
||||
storageClass: config.FileGet(name, "storage_class"),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
}).Fill(f)
|
||||
if f.objectACL == "" {
|
||||
f.objectACL = "private"
|
||||
}
|
||||
if f.bucketACL == "" {
|
||||
f.bucketACL = "private"
|
||||
}
|
||||
if *gcsLocation != "" {
|
||||
f.location = *gcsLocation
|
||||
}
|
||||
if *gcsStorageClass != "" {
|
||||
f.storageClass = *gcsStorageClass
|
||||
}
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
f.client = oAuthClient
|
||||
f.svc, err = storage.New(f.client)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
|
||||
}
|
||||
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(bucket, directory).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
} else {
|
||||
f.root += "/"
|
||||
}
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if info != nil {
|
||||
o.setMetaData(info)
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
||||
|
||||
// list the objects into the function supplied
|
||||
//
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
|
||||
root := f.root
|
||||
rootLength := len(root)
|
||||
if dir != "" {
|
||||
root += dir + "/"
|
||||
}
|
||||
list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks)
|
||||
if !recurse {
|
||||
list = list.Delimiter("/")
|
||||
}
|
||||
for {
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = list.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code == http.StatusNotFound {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
if !recurse {
|
||||
var object storage.Object
|
||||
for _, prefix := range objects.Prefixes {
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
continue
|
||||
}
|
||||
err = fn(prefix[rootLength:len(prefix)-1], &object, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, object := range objects.Items {
|
||||
if !strings.HasPrefix(object.Name, root) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
remote := object.Name[rootLength:]
|
||||
// is this a directory marker?
|
||||
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
|
||||
if recurse && remote != "" {
|
||||
// add a directory in if --fast-list since will have no prefixes
|
||||
err = fn(remote[:len(remote)-1], object, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if objects.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
list.PageToken(objects.NextPageToken)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
|
||||
return d, nil
|
||||
}
|
||||
o, err := f.newObjectWithInfo(remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// mark the bucket as being OK
|
||||
func (f *Fs) markBucketOK() {
|
||||
if f.bucket != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
f.bucketOK = true
|
||||
f.bucketOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
err = f.list(dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if entry != nil {
|
||||
entries = append(entries, entry)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets
|
||||
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
if f.projectNumber == "" {
|
||||
return nil, errors.New("can't list buckets without project number")
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
|
||||
for {
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
buckets, err = listBuckets.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, bucket := range buckets.Items {
|
||||
d := fs.NewDir(bucket.Name, time.Time{})
|
||||
entries = append(entries, d)
|
||||
}
|
||||
if buckets.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
listBuckets.PageToken(buckets.NextPageToken)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if f.bucket == "" {
|
||||
return f.listBuckets(dir)
|
||||
}
|
||||
return f.listDir(dir)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
//
|
||||
// dir should be "" to start from the root, and should not
|
||||
// have trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
//
|
||||
// It should call callback for each tranche of entries read.
|
||||
// These need not be returned in any particular order. If
|
||||
// callback returns an error then the listing will stop
|
||||
// immediately.
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.bucket == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
// Put the object into the bucket
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return o, o.Update(in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) (err error) {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.bucketOK {
|
||||
return nil
|
||||
}
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
f.bucketOK = true
|
||||
return nil
|
||||
} else if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code != http.StatusNotFound {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
} else {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
|
||||
if f.projectNumber == "" {
|
||||
return errors.New("can't make bucket without project number")
|
||||
}
|
||||
|
||||
bucket := storage.Bucket{
|
||||
Name: f.bucket,
|
||||
Location: f.location,
|
||||
StorageClass: f.storageClass,
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
// to delete was not empty.
|
||||
func (f *Fs) Rmdir(dir string) (err error) {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(f.bucket).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Nanosecond
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
err := f.Mkdir("")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
srcBucket := srcObj.fs.bucket
|
||||
srcObject := srcObj.fs.root + srcObj.remote
|
||||
dstBucket := f.bucket
|
||||
dstObject := f.root + remote
|
||||
var newObject *storage.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Set the metadata for the new object while we have it
|
||||
dstObj.setMetaData(newObject)
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.md5sum, nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.bytes
|
||||
}
|
||||
|
||||
// setMetaData sets the fs data from a storage.Object
|
||||
func (o *Object) setMetaData(info *storage.Object) {
|
||||
o.url = info.MediaLink
|
||||
o.bytes = int64(info.Size)
|
||||
o.mimeType = info.ContentType
|
||||
|
||||
// Read md5sum
|
||||
md5sumData, err := base64.StdEncoding.DecodeString(info.Md5Hash)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Bad MD5 decode: %v", err)
|
||||
} else {
|
||||
o.md5sum = hex.EncodeToString(md5sumData)
|
||||
}
|
||||
|
||||
// read mtime out of metadata if available
|
||||
mtimeString, ok := info.Metadata[metaMtime]
|
||||
if ok {
|
||||
modTime, err := time.Parse(timeFormatIn, mtimeString)
|
||||
if err == nil {
|
||||
o.modTime = modTime
|
||||
return
|
||||
}
|
||||
fs.Debugf(o, "Failed to read mtime from metadata: %s", err)
|
||||
}
|
||||
|
||||
// Fallback to the Updated time
|
||||
modTime, err := time.Parse(timeFormatIn, info.Updated)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Bad time decode: %v", err)
|
||||
} else {
|
||||
o.modTime = modTime
|
||||
}
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
var object *storage.Object
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
o.setMetaData(object)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
// fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
}
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// Returns metadata for an object
|
||||
func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
metadata := make(map[string]string, 1)
|
||||
metadata[metaMtime] = modTime.Format(timeFormatOut)
|
||||
return metadata
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) (err error) {
|
||||
// This only adds metadata so will perserve other metadata
|
||||
object := storage.Object{
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(newObject)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Storable returns a boolean as to whether this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
req, err := http.NewRequest("GET", o.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.client.Do(req)
|
||||
if err == nil {
|
||||
err = googleapi.CheckResponse(res)
|
||||
if err != nil {
|
||||
_ = res.Body.Close() // ignore error
|
||||
}
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, isRanging := req.Header["Range"]
|
||||
if !(res.StatusCode == http.StatusOK || (isRanging && res.StatusCode == http.StatusPartialContent)) {
|
||||
_ = res.Body.Close() // ignore error
|
||||
return nil, errors.Errorf("bad response: %d: %s", res.StatusCode, res.Status)
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
err := o.fs.Mkdir("")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
modTime := src.ModTime()
|
||||
|
||||
object := storage.Object{
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
ContentType: fs.MimeType(src),
|
||||
Updated: modTime.Format(timeFormatOut), // Doesn't get set
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Set the metadata for the new object while we have it
|
||||
o.setMetaData(newObject)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
17
backend/googlecloudstorage/googlecloudstorage_test.go
Normal file
17
backend/googlecloudstorage/googlecloudstorage_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/googlecloudstorage"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestGoogleCloudStorage:",
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
})
|
||||
}
|
||||
489
backend/http/http.go
Normal file
489
backend/http/http.go
Normal file
@@ -0,0 +1,489 @@
|
||||
// Package http provides a filesystem interface using golang.org/net/http
|
||||
//
|
||||
// It treats HTML pages served from the endpoint as directory
|
||||
// listings, and includes any links found as files.
|
||||
package http
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/html"
|
||||
)
|
||||
|
||||
var (
|
||||
errorReadOnly = errors.New("http remotes are read only")
|
||||
timeUnset = time.Unix(0, 0)
|
||||
)
|
||||
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "http",
|
||||
Description: "http Connection",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of http host to connect to",
|
||||
Optional: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
}},
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
endpoint *url.URL
|
||||
endpointURL string // endpoint as a string
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
contentType string
|
||||
}
|
||||
|
||||
// statusError returns an error if the res contained an error
|
||||
func statusError(res *http.Response, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
_ = res.Body.Close()
|
||||
return errors.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
endpoint := config.FileGet(name, "url")
|
||||
if !strings.HasSuffix(endpoint, "/") {
|
||||
endpoint += "/"
|
||||
}
|
||||
|
||||
// Parse the endpoint and stick the root onto it
|
||||
base, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := rest.URLJoin(base, rest.URLPathEscape(root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
|
||||
var isFile = false
|
||||
if !strings.HasSuffix(u.String(), "/") {
|
||||
// Make a client which doesn't follow redirects so the server
|
||||
// doesn't redirect http://host/dir to http://host/dir/
|
||||
noRedir := *client
|
||||
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
// check to see if points to a file
|
||||
res, err := noRedir.Head(u.String())
|
||||
err = statusError(res, err)
|
||||
if err == nil {
|
||||
isFile = true
|
||||
}
|
||||
}
|
||||
|
||||
newRoot := u.String()
|
||||
if isFile {
|
||||
// Point to the parent if this is a file
|
||||
newRoot, _ = path.Split(u.String())
|
||||
} else {
|
||||
if !strings.HasSuffix(newRoot, "/") {
|
||||
newRoot += "/"
|
||||
}
|
||||
}
|
||||
|
||||
u, err = url.Parse(newRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
httpClient: client,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if isFile {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
if !strings.HasSuffix(f.endpointURL, "/") {
|
||||
return nil, errors.New("internal error: url doesn't end with /")
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name returns the configured name of the file system
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root for the filesystem
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns the URL for the filesystem
|
||||
func (f *Fs) String() string {
|
||||
return f.endpointURL
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// NewObject creates a new remote http file object
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := o.stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Stat failed")
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Join's the remote onto the base URL
|
||||
func (f *Fs) url(remote string) string {
|
||||
return f.endpointURL + rest.URLPathEscape(remote)
|
||||
}
|
||||
|
||||
// parse s into an int64, on failure return def
|
||||
func parseInt64(s string, def int64) int64 {
|
||||
n, e := strconv.ParseInt(s, 10, 64)
|
||||
if e != nil {
|
||||
return def
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Errors returned by parseName
|
||||
var (
|
||||
errURLJoinFailed = errors.New("URLJoin failed")
|
||||
errFoundQuestionMark = errors.New("found ? in URL")
|
||||
errHostMismatch = errors.New("host mismatch")
|
||||
errSchemeMismatch = errors.New("scheme mismatch")
|
||||
errNotUnderRoot = errors.New("not under root")
|
||||
errNameIsEmpty = errors.New("name is empty")
|
||||
errNameContainsSlash = errors.New("name contains /")
|
||||
)
|
||||
|
||||
// parseName turns a name as found in the page into a remote path or returns an error
|
||||
func parseName(base *url.URL, name string) (string, error) {
|
||||
// make URL absolute
|
||||
u, err := rest.URLJoin(base, name)
|
||||
if err != nil {
|
||||
return "", errURLJoinFailed
|
||||
}
|
||||
// check it doesn't have URL parameters
|
||||
uStr := u.String()
|
||||
if strings.Index(uStr, "?") >= 0 {
|
||||
return "", errFoundQuestionMark
|
||||
}
|
||||
// check that this is going back to the same host and scheme
|
||||
if base.Host != u.Host {
|
||||
return "", errHostMismatch
|
||||
}
|
||||
if base.Scheme != u.Scheme {
|
||||
return "", errSchemeMismatch
|
||||
}
|
||||
// check has path prefix
|
||||
if !strings.HasPrefix(u.Path, base.Path) {
|
||||
return "", errNotUnderRoot
|
||||
}
|
||||
// calculate the name relative to the base
|
||||
name = u.Path[len(base.Path):]
|
||||
// musn't be empty
|
||||
if name == "" {
|
||||
return "", errNameIsEmpty
|
||||
}
|
||||
// mustn't contain a / - we are looking for a single level directory
|
||||
slash := strings.Index(name, "/")
|
||||
if slash >= 0 && slash != len(name)-1 {
|
||||
return "", errNameContainsSlash
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// Parse turns HTML for a directory into names
|
||||
// base should be the base URL to resolve any relative names from
|
||||
func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
||||
doc, err := html.Parse(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var walk func(*html.Node)
|
||||
walk = func(n *html.Node) {
|
||||
if n.Type == html.ElementNode && n.Data == "a" {
|
||||
for _, a := range n.Attr {
|
||||
if a.Key == "href" {
|
||||
name, err := parseName(base, a.Val)
|
||||
if err == nil {
|
||||
names = append(names, name)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
walk(c)
|
||||
}
|
||||
}
|
||||
walk(doc)
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// Read the directory passed in
|
||||
func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
URL := f.url(dir)
|
||||
u, err := url.Parse(URL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
}
|
||||
if !strings.HasSuffix(URL, "/") {
|
||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
}
|
||||
res, err := f.httpClient.Get(URL)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
}
|
||||
defer fs.CheckClose(res.Body, &err)
|
||||
|
||||
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
||||
switch contentType {
|
||||
case "text/html":
|
||||
names, err = parse(u, res.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "readDir")
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("Can't parse content type %q", contentType)
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if !strings.HasSuffix(dir, "/") && dir != "" {
|
||||
dir += "/"
|
||||
}
|
||||
names, err := f.readDir(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
||||
}
|
||||
for _, name := range names {
|
||||
isDir := name[len(name)-1] == '/'
|
||||
name = strings.TrimRight(name, "/")
|
||||
remote := path.Join(dir, name)
|
||||
if isDir {
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
} else {
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if err = file.stat(); err != nil {
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
continue
|
||||
}
|
||||
entries = append(entries, file)
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// Fs is the filesystem this remote http file object is located within
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// String returns the URL to the remote HTTP file
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote the name of the remote HTTP file, relative to the fs root
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
||||
func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size in bytes of the remote http file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the remote http file
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// url returns the native url of the object
|
||||
func (o *Object) url() string {
|
||||
return o.fs.url(o.remote)
|
||||
}
|
||||
|
||||
// stat updates the info field in the Object
|
||||
func (o *Object) stat() error {
|
||||
url := o.url()
|
||||
res, err := o.fs.httpClient.Head(url)
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat")
|
||||
}
|
||||
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
t = timeUnset
|
||||
}
|
||||
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
||||
o.modTime = t
|
||||
o.contentType = res.Header.Get("Content-Type")
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModTime sets the modification and access time to the specified time
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open a remote http file object for reading. Seek is supported
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
url := o.url()
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
|
||||
// Add optional headers
|
||||
for k, v := range fs.OpenOptionHeaders(options) {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
|
||||
// Do the request
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Remove a remote http file object
|
||||
func (o *Object) Remove() error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
321
backend/http/http_internal_test.go
Normal file
321
backend/http/http_internal_test.go
Normal file
@@ -0,0 +1,321 @@
|
||||
// +build go1.8
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
func prepareServer(t *testing.T) func() {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(fileServer)
|
||||
|
||||
// Configure the remote
|
||||
config.LoadConfig()
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
config.FileSet(remoteName, "type", "http")
|
||||
config.FileSet(remoteName, "url", ts.URL)
|
||||
|
||||
// return a function to tidy up
|
||||
return ts.Close
|
||||
}
|
||||
|
||||
// prepare the test server and return a function to tidy it up afterwards
|
||||
func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
tidy := prepareServer(t)
|
||||
|
||||
// Instantiate it
|
||||
f, err := NewFs(remoteName, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
return f, tidy
|
||||
}
|
||||
|
||||
func testListRoot(t *testing.T, f fs.Fs) {
|
||||
entries, err := f.List("")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
require.Equal(t, 4, len(entries))
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "four", e.Remote())
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
_, ok := e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[1]
|
||||
assert.Equal(t, "one%.txt", e.Remote())
|
||||
assert.Equal(t, int64(6), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[2]
|
||||
assert.Equal(t, "three", e.Remote())
|
||||
assert.Equal(t, int64(-1), e.Size())
|
||||
_, ok = e.(fs.Directory)
|
||||
assert.True(t, ok)
|
||||
|
||||
e = entries[3]
|
||||
assert.Equal(t, "two.html", e.Remote())
|
||||
assert.Equal(t, int64(7), e.Size())
|
||||
_, ok = e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestListRoot(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
testListRoot(t, f)
|
||||
}
|
||||
|
||||
func TestListSubDir(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
entries, err := f.List("three")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
assert.Equal(t, 1, len(entries))
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "three/underthree.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestNewObject(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject("four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "four/under four.txt", o.Remote())
|
||||
assert.Equal(t, int64(9), o.Size())
|
||||
_, ok := o.(*Object)
|
||||
assert.True(t, ok)
|
||||
|
||||
// Test the time is correct on the object
|
||||
|
||||
tObj := o.ModTime()
|
||||
|
||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
||||
require.NoError(t, err)
|
||||
tFile := fi.ModTime()
|
||||
|
||||
dt, ok := fstest.CheckTimeEqualWithPrecision(tObj, tFile, time.Second)
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||
}
|
||||
|
||||
func TestOpen(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject("four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test normal read
|
||||
fd, err := o.Open()
|
||||
require.NoError(t, err)
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
|
||||
// Test with range request
|
||||
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "eetro", string(data))
|
||||
}
|
||||
|
||||
func TestMimeType(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject("four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
do, ok := o.(fs.MimeTyper)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType())
|
||||
}
|
||||
|
||||
func TestIsAFileRoot(t *testing.T) {
|
||||
tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "one%.txt")
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
testListRoot(t, f)
|
||||
}
|
||||
|
||||
func TestIsAFileSubDir(t *testing.T) {
|
||||
tidy := prepareServer(t)
|
||||
defer tidy()
|
||||
|
||||
f, err := NewFs(remoteName, "three/underthree.txt")
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
entries, err := f.List("")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
assert.Equal(t, 1, len(entries))
|
||||
|
||||
e := entries[0]
|
||||
assert.Equal(t, "underthree.txt", e.Remote())
|
||||
assert.Equal(t, int64(9), e.Size())
|
||||
_, ok := e.(*Object)
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func TestParseName(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
base string
|
||||
val string
|
||||
wantErr error
|
||||
want string
|
||||
}{
|
||||
{"http://example.com/", "potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "potato?download=true", errFoundQuestionMark, ""},
|
||||
{"http://example.com/dir/", "../dir/potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "..", errNotUnderRoot, ""},
|
||||
{"http://example.com/dir/", "http://example.com/", errNotUnderRoot, ""},
|
||||
{"http://example.com/dir/", "http://example.com/dir/", errNameIsEmpty, ""},
|
||||
{"http://example.com/dir/", "http://example.com/dir/potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "https://example.com/dir/potato", errSchemeMismatch, ""},
|
||||
{"http://example.com/dir/", "http://notexample.com/dir/potato", errHostMismatch, ""},
|
||||
{"http://example.com/dir/", "/dir/", errNameIsEmpty, ""},
|
||||
{"http://example.com/dir/", "/dir/potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "subdir/potato", errNameContainsSlash, ""},
|
||||
{"http://example.com/dir/", "With percent %25.txt", nil, "With percent %.txt"},
|
||||
{"http://example.com/dir/", "With colon :", errURLJoinFailed, ""},
|
||||
{"http://example.com/dir/", rest.URLPathEscape("With colon :"), nil, "With colon :"},
|
||||
{"http://example.com/Dungeons%20%26%20Dragons/", "/Dungeons%20&%20Dragons/D%26D%20Basic%20%28Holmes%2C%20B%2C%20X%2C%20BECMI%29/", nil, "D&D Basic (Holmes, B, X, BECMI)/"},
|
||||
} {
|
||||
u, err := url.Parse(test.base)
|
||||
require.NoError(t, err)
|
||||
got, gotErr := parseName(u, test.val)
|
||||
what := fmt.Sprintf("test %d base=%q, val=%q", i, test.base, test.val)
|
||||
assert.Equal(t, test.wantErr, gotErr, what)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
}
|
||||
}
|
||||
|
||||
// Load HTML from the file given and parse it, checking it against the entries passed in
|
||||
func parseHTML(t *testing.T, name string, base string, want []string) {
|
||||
in, err := os.Open(filepath.Join(testPath, "index_files", name))
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
require.NoError(t, in.Close())
|
||||
}()
|
||||
if base == "" {
|
||||
base = "http://example.com/"
|
||||
}
|
||||
u, err := url.Parse(base)
|
||||
require.NoError(t, err)
|
||||
entries, err := parse(u, in)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, want, entries)
|
||||
}
|
||||
|
||||
func TestParseEmpty(t *testing.T) {
|
||||
parseHTML(t, "empty.html", "", []string(nil))
|
||||
}
|
||||
|
||||
func TestParseApache(t *testing.T) {
|
||||
parseHTML(t, "apache.html", "http://example.com/nick/pub/", []string{
|
||||
"SWIG-embed.tar.gz",
|
||||
"avi2dvd.pl",
|
||||
"cambert.exe",
|
||||
"cambert.gz",
|
||||
"fedora_demo.gz",
|
||||
"gchq-challenge/",
|
||||
"mandelterm/",
|
||||
"pgp-key.txt",
|
||||
"pymath/",
|
||||
"rclone",
|
||||
"readdir.exe",
|
||||
"rush_hour_solver_cut_down.py",
|
||||
"snake-puzzle/",
|
||||
"stressdisk/",
|
||||
"timer-test",
|
||||
"words-to-regexp.pl",
|
||||
"Now 100% better.mp3",
|
||||
"Now better.mp3",
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseMemstore(t *testing.T) {
|
||||
parseHTML(t, "memstore.html", "", []string{
|
||||
"test/",
|
||||
"v1.35/",
|
||||
"v1.36-01-g503cd84/",
|
||||
"rclone-beta-latest-freebsd-386.zip",
|
||||
"rclone-beta-latest-freebsd-amd64.zip",
|
||||
"rclone-beta-latest-windows-amd64.zip",
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseNginx(t *testing.T) {
|
||||
parseHTML(t, "nginx.html", "", []string{
|
||||
"deltas/",
|
||||
"objects/",
|
||||
"refs/",
|
||||
"state/",
|
||||
"config",
|
||||
"summary",
|
||||
})
|
||||
}
|
||||
|
||||
func TestParseCaddy(t *testing.T) {
|
||||
parseHTML(t, "caddy.html", "", []string{
|
||||
"mimetype.zip",
|
||||
"rclone-delete-empty-dirs.py",
|
||||
"rclone-show-empty-dirs.py",
|
||||
"stat-windows-386.zip",
|
||||
"v1.36-155-gcf29ee8b-team-driveβ/",
|
||||
"v1.36-156-gca76b3fb-team-driveβ/",
|
||||
"v1.36-156-ge1f0e0f5-team-driveβ/",
|
||||
"v1.36-22-g06ea13a-ssh-agentβ/",
|
||||
})
|
||||
}
|
||||
1
backend/http/test/files/four/under four.txt
Normal file
1
backend/http/test/files/four/under four.txt
Normal file
@@ -0,0 +1 @@
|
||||
beetroot
|
||||
1
backend/http/test/files/one%.txt
Normal file
1
backend/http/test/files/one%.txt
Normal file
@@ -0,0 +1 @@
|
||||
hello
|
||||
1
backend/http/test/files/three/underthree.txt
Normal file
1
backend/http/test/files/three/underthree.txt
Normal file
@@ -0,0 +1 @@
|
||||
rutabaga
|
||||
1
backend/http/test/files/two.html
Normal file
1
backend/http/test/files/two.html
Normal file
@@ -0,0 +1 @@
|
||||
potato
|
||||
32
backend/http/test/index_files/apache.html
Normal file
32
backend/http/test/index_files/apache.html
Normal file
@@ -0,0 +1,32 @@
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
|
||||
<html>
|
||||
<head>
|
||||
<title>Index of /nick/pub</title>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Index of /nick/pub</h1>
|
||||
<table><tr><th><img src="/icons/blank.gif" alt="[ICO]"></th><th><a href="?C=N;O=D">Name</a></th><th><a href="?C=M;O=A">Last modified</a></th><th><a href="?C=S;O=A">Size</a></th><th><a href="?C=D;O=A">Description</a></th></tr><tr><th colspan="5"><hr></th></tr>
|
||||
<tr><td valign="top"><img src="/icons/back.gif" alt="[DIR]"></td><td><a href="/nick/">Parent Directory</a></td><td> </td><td align="right"> - </td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td><td><a href="SWIG-embed.tar.gz">SWIG-embed.tar.gz</a></td><td align="right">29-Nov-2005 16:27 </td><td align="right">2.3K</td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="avi2dvd.pl">avi2dvd.pl</a></td><td align="right">14-Apr-2010 23:07 </td><td align="right"> 17K</td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/binary.gif" alt="[ ]"></td><td><a href="cambert.exe">cambert.exe</a></td><td align="right">15-Dec-2006 18:07 </td><td align="right"> 54K</td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td><td><a href="cambert.gz">cambert.gz</a></td><td align="right">14-Apr-2010 23:07 </td><td align="right"> 18K</td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/compressed.gif" alt="[ ]"></td><td><a href="fedora_demo.gz">fedora_demo.gz</a></td><td align="right">08-Jun-2007 11:01 </td><td align="right">1.0M</td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="gchq-challenge/">gchq-challenge/</a></td><td align="right">24-Dec-2016 15:24 </td><td align="right"> - </td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="mandelterm/">mandelterm/</a></td><td align="right">13-Jul-2013 22:22 </td><td align="right"> - </td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="pgp-key.txt">pgp-key.txt</a></td><td align="right">14-Apr-2010 23:07 </td><td align="right">400 </td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="pymath/">pymath/</a></td><td align="right">24-Dec-2016 15:24 </td><td align="right"> - </td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="rclone">rclone</a></td><td align="right">09-May-2017 17:15 </td><td align="right"> 22M</td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/binary.gif" alt="[ ]"></td><td><a href="readdir.exe">readdir.exe</a></td><td align="right">21-Oct-2016 14:47 </td><td align="right">1.6M</td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="rush_hour_solver_cut_down.py">rush_hour_solver_cut_down.py</a></td><td align="right">23-Jul-2009 11:44 </td><td align="right"> 14K</td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="snake-puzzle/">snake-puzzle/</a></td><td align="right">25-Sep-2016 20:56 </td><td align="right"> - </td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/folder.gif" alt="[DIR]"></td><td><a href="stressdisk/">stressdisk/</a></td><td align="right">08-Nov-2016 14:25 </td><td align="right"> - </td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td> </td></tr>
|
||||
<tr><th colspan="5"><hr></th></tr>
|
||||
<!-- some extras from https://github.com/ncw/rclone/issues/1573 -->
|
||||
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
|
||||
|
||||
</table>
|
||||
</body></html>
|
||||
378
backend/http/test/index_files/caddy.html
Normal file
378
backend/http/test/index_files/caddy.html
Normal file
@@ -0,0 +1,378 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>/</title>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<style>
|
||||
* { padding: 0; margin: 0; }
|
||||
|
||||
body {
|
||||
font-family: sans-serif;
|
||||
text-rendering: optimizespeed;
|
||||
}
|
||||
|
||||
a {
|
||||
color: #006ed3;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a:hover,
|
||||
h1 a:hover {
|
||||
color: #319cff;
|
||||
}
|
||||
|
||||
header,
|
||||
#summary {
|
||||
padding-left: 5%;
|
||||
padding-right: 5%;
|
||||
}
|
||||
|
||||
th:first-child,
|
||||
td:first-child {
|
||||
padding-left: 5%;
|
||||
}
|
||||
|
||||
th:last-child,
|
||||
td:last-child {
|
||||
padding-right: 5%;
|
||||
}
|
||||
|
||||
header {
|
||||
padding-top: 25px;
|
||||
padding-bottom: 15px;
|
||||
background-color: #f2f2f2;
|
||||
}
|
||||
|
||||
h1 {
|
||||
font-size: 20px;
|
||||
font-weight: normal;
|
||||
white-space: nowrap;
|
||||
overflow-x: hidden;
|
||||
text-overflow: ellipsis;
|
||||
}
|
||||
|
||||
h1 a {
|
||||
color: inherit;
|
||||
}
|
||||
|
||||
h1 a:hover {
|
||||
text-decoration: underline;
|
||||
}
|
||||
|
||||
main {
|
||||
display: block;
|
||||
}
|
||||
|
||||
.meta {
|
||||
font-size: 12px;
|
||||
font-family: Verdana, sans-serif;
|
||||
border-bottom: 1px solid #9C9C9C;
|
||||
padding-top: 10px;
|
||||
padding-bottom: 10px;
|
||||
}
|
||||
|
||||
.meta-item {
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
#filter {
|
||||
padding: 4px;
|
||||
border: 1px solid #CCC;
|
||||
}
|
||||
|
||||
table {
|
||||
width: 100%;
|
||||
border-collapse: collapse;
|
||||
}
|
||||
|
||||
tr {
|
||||
border-bottom: 1px dashed #dadada;
|
||||
}
|
||||
|
||||
tbody tr:hover {
|
||||
background-color: #ffffec;
|
||||
}
|
||||
|
||||
th,
|
||||
td {
|
||||
text-align: left;
|
||||
padding: 10px 0;
|
||||
}
|
||||
|
||||
th {
|
||||
padding-top: 15px;
|
||||
padding-bottom: 15px;
|
||||
font-size: 16px;
|
||||
white-space: nowrap;
|
||||
}
|
||||
|
||||
th a {
|
||||
color: black;
|
||||
}
|
||||
|
||||
th svg {
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
td {
|
||||
font-size: 14px;
|
||||
}
|
||||
|
||||
td:first-child {
|
||||
width: 50%;
|
||||
}
|
||||
|
||||
th:last-child,
|
||||
td:last-child {
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
td:first-child svg {
|
||||
position: absolute;
|
||||
}
|
||||
|
||||
td .name,
|
||||
td .goup {
|
||||
margin-left: 1.75em;
|
||||
word-break: break-all;
|
||||
overflow-wrap: break-word;
|
||||
white-space: pre-wrap;
|
||||
}
|
||||
|
||||
footer {
|
||||
padding: 40px 20px;
|
||||
font-size: 12px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
@media (max-width: 600px) {
|
||||
.hideable {
|
||||
display: none;
|
||||
}
|
||||
|
||||
td:first-child {
|
||||
width: auto;
|
||||
}
|
||||
|
||||
th:nth-child(2),
|
||||
td:nth-child(2) {
|
||||
padding-right: 5%;
|
||||
text-align: right;
|
||||
}
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<svg version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" height="0" width="0" style="position: absolute;">
|
||||
<defs>
|
||||
<!-- Folder -->
|
||||
<linearGradient id="f" y2="640" gradientUnits="userSpaceOnUse" x2="244.84" gradientTransform="matrix(.97319 0 0 1.0135 -.50695 -13.679)" y1="415.75" x1="244.84">
|
||||
<stop stop-color="#b3ddfd" offset="0"/>
|
||||
<stop stop-color="#69c" offset="1"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="e" y2="571.06" gradientUnits="userSpaceOnUse" x2="238.03" gradientTransform="translate(0,2)" y1="346.05" x1="236.26">
|
||||
<stop stop-color="#ace" offset="0"/>
|
||||
<stop stop-color="#369" offset="1"/>
|
||||
</linearGradient>
|
||||
<g id="folder" transform="translate(-266.06 -193.36)">
|
||||
<g transform="matrix(.066019 0 0 .066019 264.2 170.93)">
|
||||
<g transform="matrix(1.4738 0 0 1.4738 -52.053 -166.93)">
|
||||
<path fill="#69c" d="m98.424 343.78c-11.08 0-20 8.92-20 20v48.5 33.719 105.06c0 11.08 8.92 20 20 20h279.22c11.08 0 20-8.92 20-20v-138.78c0-11.08-8.92-20-20-20h-117.12c-7.5478-1.1844-9.7958-6.8483-10.375-11.312v-5.625-11.562c0-11.08-8.92-20-20-20h-131.72z"/>
|
||||
<rect rx="12.885" ry="12.199" height="227.28" width="366.69" y="409.69" x="54.428" fill="#369"/>
|
||||
<path fill="url(#e)" d="m98.424 345.78c-11.08 0-20 8.92-20 20v48.5 33.719 105.06c0 11.08 8.92 20 20 20h279.22c11.08 0 20-8.92 20-20v-138.78c0-11.08-8.92-20-20-20h-117.12c-7.5478-1.1844-9.7958-6.8483-10.375-11.312v-5.625-11.562c0-11.08-8.92-20-20-20h-131.72z"/>
|
||||
<rect rx="12.885" ry="12.199" height="227.28" width="366.69" y="407.69" x="54.428" fill="url(#f)"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
|
||||
<!-- File -->
|
||||
<linearGradient id="a">
|
||||
<stop stop-color="#cbcbcb" offset="0"/>
|
||||
<stop stop-color="#f0f0f0" offset=".34923"/>
|
||||
<stop stop-color="#e2e2e2" offset="1"/>
|
||||
</linearGradient>
|
||||
<linearGradient id="d" y2="686.15" xlink:href="#a" gradientUnits="userSpaceOnUse" y1="207.83" gradientTransform="matrix(.28346 0 0 .31053 -608.52 485.11)" x2="380.1" x1="749.25"/>
|
||||
<linearGradient id="c" y2="287.74" xlink:href="#a" gradientUnits="userSpaceOnUse" y1="169.44" gradientTransform="matrix(.28342 0 0 .31057 -608.52 485.11)" x2="622.33" x1="741.64"/>
|
||||
<linearGradient id="b" y2="418.54" gradientUnits="userSpaceOnUse" y1="236.13" gradientTransform="matrix(.29343 0 0 .29999 -608.52 485.11)" x2="330.88" x1="687.96">
|
||||
<stop stop-color="#fff" offset="0"/>
|
||||
<stop stop-color="#fff" stop-opacity="0" offset="1"/>
|
||||
</linearGradient>
|
||||
<g id="file" transform="translate(-278.15 -216.59)">
|
||||
<g fill-rule="evenodd" transform="matrix(.19775 0 0 .19775 381.05 112.68)">
|
||||
<path d="m-520.17 525.5v36.739 36.739 36.739 36.739h33.528 33.528 33.528 33.528v-36.739-36.739-36.739l-33.528-36.739h-33.528-33.528-33.528z" stroke-opacity=".36478" stroke-width=".42649" fill="#fff"/>
|
||||
<g>
|
||||
<path d="m-520.11 525.68v36.739 36.739 36.739 36.739h33.528 33.528 33.528 33.528v-36.739-36.739-36.739l-33.528-36.739h-33.528-33.528-33.528z" stroke-opacity=".36478" stroke="#000" stroke-width=".42649" fill="url(#d)"/>
|
||||
<path d="m-386 562.42c-10.108-2.9925-23.206-2.5682-33.101-0.86253 1.7084-10.962 1.922-24.701-0.4271-35.877l33.528 36.739z" stroke-width=".95407pt" fill="url(#c)"/>
|
||||
<path d="m-519.13 537-0.60402 134.7h131.68l0.0755-33.296c-2.9446 1.1325-32.692-40.998-70.141-39.186-37.483 1.8137-27.785-56.777-61.006-62.214z" stroke-width="1pt" fill="url(#b)"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
|
||||
<!-- Up arrow -->
|
||||
<g id="up-arrow" transform="translate(-279.22 -208.12)">
|
||||
<path transform="matrix(.22413 0 0 .12089 335.67 164.35)" stroke-width="0" d="m-194.17 412.01h-28.827-28.827l14.414-24.965 14.414-24.965 14.414 24.965z"/>
|
||||
</g>
|
||||
|
||||
<!-- Down arrow -->
|
||||
<g id="down-arrow" transform="translate(-279.22 -208.12)">
|
||||
<path transform="matrix(.22413 0 0 -.12089 335.67 257.93)" stroke-width="0" d="m-194.17 412.01h-28.827-28.827l14.414-24.965 14.414-24.965 14.414 24.965z"/>
|
||||
</g>
|
||||
</defs>
|
||||
</svg>
|
||||
|
||||
<header>
|
||||
<h1>
|
||||
<a href="/">/</a>
|
||||
</h1>
|
||||
</header>
|
||||
<main>
|
||||
<div class="meta">
|
||||
<div id="summary">
|
||||
<span class="meta-item"><b>4</b> directories</span>
|
||||
<span class="meta-item"><b>4</b> files</span>
|
||||
<span class="meta-item"><input type="text" placeholder="filter" id="filter" onkeyup='filter()'></span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="listing">
|
||||
<table aria-describedby="summary">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>
|
||||
<a href="?sort=name&order=desc">Name <svg width="1em" height=".4em" version="1.1" viewBox="0 0 12.922194 6.0358899"><use xlink:href="#up-arrow"></use></svg></a>
|
||||
</th>
|
||||
<th>
|
||||
<a href="?sort=size&order=asc">Size</a>
|
||||
</th>
|
||||
<th class="hideable">
|
||||
<a href="?sort=time&order=asc">Modified</a>
|
||||
</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr class="file">
|
||||
<td>
|
||||
<a href="./mimetype.zip">
|
||||
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 26.604381 29.144726"><use xlink:href="#file"></use></svg>
|
||||
<span class="name">mimetype.zip</span>
|
||||
</a>
|
||||
</td>
|
||||
<td data-order="783696">765 KiB</td>
|
||||
<td class="hideable"><time datetime="2016-04-04T15:36:49Z">04/04/2016 03:36:49 PM +00:00</time></td>
|
||||
</tr>
|
||||
<tr class="file">
|
||||
<td>
|
||||
<a href="./rclone-delete-empty-dirs.py">
|
||||
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 26.604381 29.144726"><use xlink:href="#file"></use></svg>
|
||||
<span class="name">rclone-delete-empty-dirs.py</span>
|
||||
</a>
|
||||
</td>
|
||||
<td data-order="1271">1.2 KiB</td>
|
||||
<td class="hideable"><time datetime="2016-10-26T16:05:08Z">10/26/2016 04:05:08 PM +00:00</time></td>
|
||||
</tr>
|
||||
<tr class="file">
|
||||
<td>
|
||||
<a href="./rclone-show-empty-dirs.py">
|
||||
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 26.604381 29.144726"><use xlink:href="#file"></use></svg>
|
||||
<span class="name">rclone-show-empty-dirs.py</span>
|
||||
</a>
|
||||
</td>
|
||||
<td data-order="868">868 B</td>
|
||||
<td class="hideable"><time datetime="2016-10-26T09:29:34Z">10/26/2016 09:29:34 AM +00:00</time></td>
|
||||
</tr>
|
||||
<tr class="file">
|
||||
<td>
|
||||
<a href="./stat-windows-386.zip">
|
||||
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 26.604381 29.144726"><use xlink:href="#file"></use></svg>
|
||||
<span class="name">stat-windows-386.zip</span>
|
||||
</a>
|
||||
</td>
|
||||
<td data-order="704960">688 KiB</td>
|
||||
<td class="hideable"><time datetime="2016-08-14T20:44:58Z">08/14/2016 08:44:58 PM +00:00</time></td>
|
||||
</tr>
|
||||
<tr class="file">
|
||||
<td>
|
||||
<a href="./v1.36-155-gcf29ee8b-team-drive%CE%B2/">
|
||||
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 35.678803 28.527945"><use xlink:href="#folder"></use></svg>
|
||||
<span class="name">v1.36-155-gcf29ee8b-team-driveβ</span>
|
||||
</a>
|
||||
</td>
|
||||
<td data-order="-1">—</td>
|
||||
<td class="hideable"><time datetime="2017-06-01T21:28:09Z">06/01/2017 09:28:09 PM +00:00</time></td>
|
||||
</tr>
|
||||
<tr class="file">
|
||||
<td>
|
||||
<a href="./v1.36-156-gca76b3fb-team-drive%CE%B2/">
|
||||
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 35.678803 28.527945"><use xlink:href="#folder"></use></svg>
|
||||
<span class="name">v1.36-156-gca76b3fb-team-driveβ</span>
|
||||
</a>
|
||||
</td>
|
||||
<td data-order="-1">—</td>
|
||||
<td class="hideable"><time datetime="2017-06-04T08:53:04Z">06/04/2017 08:53:04 AM +00:00</time></td>
|
||||
</tr>
|
||||
<tr class="file">
|
||||
<td>
|
||||
<a href="./v1.36-156-ge1f0e0f5-team-drive%CE%B2/">
|
||||
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 35.678803 28.527945"><use xlink:href="#folder"></use></svg>
|
||||
<span class="name">v1.36-156-ge1f0e0f5-team-driveβ</span>
|
||||
</a>
|
||||
</td>
|
||||
<td data-order="-1">—</td>
|
||||
<td class="hideable"><time datetime="2017-06-02T10:38:05Z">06/02/2017 10:38:05 AM +00:00</time></td>
|
||||
</tr>
|
||||
<tr class="file">
|
||||
<td>
|
||||
<a href="./v1.36-22-g06ea13a-ssh-agent%CE%B2/">
|
||||
<svg width="1.5em" height="1em" version="1.1" viewBox="0 0 35.678803 28.527945"><use xlink:href="#folder"></use></svg>
|
||||
<span class="name">v1.36-22-g06ea13a-ssh-agentβ</span>
|
||||
</a>
|
||||
</td>
|
||||
<td data-order="-1">—</td>
|
||||
<td class="hideable"><time datetime="2017-04-10T13:58:02Z">04/10/2017 01:58:02 PM +00:00</time></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</main>
|
||||
<footer>
|
||||
Served with <a rel="noopener noreferrer" href="https://caddyserver.com">Caddy</a>
|
||||
</footer>
|
||||
<script>
|
||||
var filterEl = document.getElementById('filter');
|
||||
function filter() {
|
||||
var q = filterEl.value.trim().toLowerCase();
|
||||
var elems = document.querySelectorAll('tr.file');
|
||||
elems.forEach(function(el) {
|
||||
if (!q) {
|
||||
el.style.display = '';
|
||||
return;
|
||||
}
|
||||
var nameEl = el.querySelector('.name');
|
||||
var nameVal = nameEl.textContent.trim().toLowerCase();
|
||||
if (nameVal.indexOf(q) !== -1) {
|
||||
el.style.display = '';
|
||||
} else {
|
||||
el.style.display = 'none';
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
function localizeDatetime(e, index, ar) {
|
||||
if (e.textContent === undefined) {
|
||||
return;
|
||||
}
|
||||
var d = new Date(e.getAttribute('datetime'));
|
||||
if (isNaN(d)) {
|
||||
d = new Date(e.textContent);
|
||||
if (isNaN(d)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
e.textContent = d.toLocaleString();
|
||||
}
|
||||
var timeList = Array.prototype.slice.call(document.getElementsByTagName("time"));
|
||||
timeList.forEach(localizeDatetime);
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
0
backend/http/test/index_files/empty.html
Normal file
0
backend/http/test/index_files/empty.html
Normal file
77
backend/http/test/index_files/memstore.html
Normal file
77
backend/http/test/index_files/memstore.html
Normal file
@@ -0,0 +1,77 @@
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head>
|
||||
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
|
||||
<meta name="robots" content="noindex" />
|
||||
<title>Index of /</title>
|
||||
</head>
|
||||
<body>
|
||||
<div id="content">
|
||||
<h1>Index of /</h1>
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>Type</th>
|
||||
<th>Size</th>
|
||||
<th>Last modified</th>
|
||||
<th>MD5</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
|
||||
<tr>
|
||||
<td><a href="test/">test/</a></td>
|
||||
<td>application/directory</td>
|
||||
<td>0 bytes</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td><a href="v1.35/">v1.35/</a></td>
|
||||
<td>application/directory</td>
|
||||
<td>0 bytes</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td><a href="v1.36-01-g503cd84/">v1.36-01-g503cd84/</a></td>
|
||||
<td>application/directory</td>
|
||||
<td>0 bytes</td>
|
||||
<td>-</td>
|
||||
<td>-</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td><a href="rclone-beta-latest-freebsd-386.zip">rclone-beta-latest-freebsd-386.zip</a></td>
|
||||
<td>application/zip</td>
|
||||
<td>4.6 MB</td>
|
||||
<td>2017-06-19 14:04:52</td>
|
||||
<td>e747003c69c81e675f206a715264bfa8</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td><a href="rclone-beta-latest-freebsd-amd64.zip">rclone-beta-latest-freebsd-amd64.zip</a></td>
|
||||
<td>application/zip</td>
|
||||
<td>5.0 MB</td>
|
||||
<td>2017-06-19 14:04:53</td>
|
||||
<td>ff30b5e9bf2863a2373069142e6f2b7f</td>
|
||||
</tr>
|
||||
|
||||
<tr>
|
||||
<td><a href="rclone-beta-latest-windows-amd64.zip">rclone-beta-latest-windows-amd64.zip</a></td>
|
||||
<td>application/x-zip-compressed</td>
|
||||
<td>4.9 MB</td>
|
||||
<td>2017-06-19 13:56:02</td>
|
||||
<td>851a5547a0495cbbd94cbc90a80ed6f5</td>
|
||||
</tr>
|
||||
|
||||
</tbody>
|
||||
</table>
|
||||
<p class="right"><a href="http://www.memset.com/"><img src="http://www.memset.com/images/Memset_logo_2010.gif" alt="Memset Ltd." /></a></p>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
12
backend/http/test/index_files/nginx.html
Normal file
12
backend/http/test/index_files/nginx.html
Normal file
@@ -0,0 +1,12 @@
|
||||
<html>
|
||||
<head><title>Index of /atomic/fedora/</title></head>
|
||||
<body bgcolor="white">
|
||||
<h1>Index of /atomic/fedora/</h1><hr><pre><a href="../">../</a>
|
||||
<a href="deltas/">deltas/</a> 04-May-2017 21:37 -
|
||||
<a href="objects/">objects/</a> 04-May-2017 20:44 -
|
||||
<a href="refs/">refs/</a> 04-May-2017 20:42 -
|
||||
<a href="state/">state/</a> 04-May-2017 21:36 -
|
||||
<a href="config">config</a> 04-May-2017 20:42 118
|
||||
<a href="summary">summary</a> 04-May-2017 21:36 806
|
||||
</pre><hr></body>
|
||||
</html>
|
||||
54
backend/hubic/auth.go
Normal file
54
backend/hubic/auth.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package hubic
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
// auth is an authenticator for swift
|
||||
type auth struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// newAuth creates a swift authenticator
|
||||
func newAuth(f *Fs) *auth {
|
||||
return &auth{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Request constructs a http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(*swift.Connection) (*http.Request, error) {
|
||||
err := a.f.getCredentials()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Response parses the result of an http request
|
||||
func (a *auth) Response(resp *http.Response) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The public storage URL - set Internal to true to read
|
||||
// internal/service net URL
|
||||
func (a *auth) StorageUrl(Internal bool) string { // nolint
|
||||
return a.f.credentials.Endpoint
|
||||
}
|
||||
|
||||
// The access token
|
||||
func (a *auth) Token() string {
|
||||
return a.f.credentials.Token
|
||||
}
|
||||
|
||||
// The CDN url if available
|
||||
func (a *auth) CdnUrl() string { // nolint
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ swift.Authenticator = (*auth)(nil)
|
||||
@@ -13,9 +13,12 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/ncw/rclone/swift"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
swiftLib "github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
@@ -38,7 +41,7 @@ var (
|
||||
TokenURL: "https://api.hubic.com/oauth/token/",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
@@ -56,10 +59,10 @@ func init() {
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigClientID,
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Hubic Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: fs.ConfigClientSecret,
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Hubic Client Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
@@ -157,7 +160,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
Auth: newAuth(f),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fs.Config.Transport(),
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
}
|
||||
err = c.Authenticate()
|
||||
if err != nil {
|
||||
@@ -165,7 +168,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}
|
||||
|
||||
// Make inner swift Fs from the connection
|
||||
swiftFs, err := swift.NewFsWithConnection(name, root, c)
|
||||
swiftFs, err := swift.NewFsWithConnection(name, root, c, true)
|
||||
if err != nil && err != fs.ErrorIsFile {
|
||||
return nil, err
|
||||
}
|
||||
17
backend/hubic/hubic_test.go
Normal file
17
backend/hubic/hubic_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Hubic filesystem interface
|
||||
package hubic_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/hubic"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
})
|
||||
}
|
||||
29
backend/local/about_unix.go
Normal file
29
backend/local/about_unix.go
Normal file
@@ -0,0 +1,29 @@
|
||||
// +build darwin dragonfly freebsd linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
var s syscall.Statfs_t
|
||||
err := syscall.Statfs(f.root, &s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||
}
|
||||
bs := int64(s.Bsize)
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
|
||||
Free: fs.NewUsageValue(bs * int64(s.Bavail)), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// check interface
|
||||
var _ fs.Abouter = &Fs{}
|
||||
36
backend/local/about_windows.go
Normal file
36
backend/local/about_windows.go
Normal file
@@ -0,0 +1,36 @@
|
||||
// +build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
var available, total, free int64
|
||||
_, _, e1 := getFreeDiskSpace.Call(
|
||||
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
|
||||
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
|
||||
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
|
||||
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
||||
)
|
||||
if e1 != syscall.Errno(0) {
|
||||
return nil, errors.Wrap(e1, "failed to read disk usage")
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(total), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(total - free), // bytes in use
|
||||
Free: fs.NewUsageValue(available), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// check interface
|
||||
var _ fs.Abouter = &Fs{}
|
||||
969
backend/local/local.go
Normal file
969
backend/local/local.go
Normal file
@@ -0,0 +1,969 @@
|
||||
// Package local provides a filesystem interface
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/appengine/log"
|
||||
)
|
||||
|
||||
var (
|
||||
followSymlinks = flags.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
|
||||
skipSymlinks = flags.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
|
||||
noUTFNorm = flags.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
|
||||
noCheckUpdated = flags.BoolP("local-no-check-updated", "", false, "Don't check to see if the files change during upload")
|
||||
)
|
||||
|
||||
// Constants
|
||||
const devUnset = 0xdeadbeefcafebabe // a device id meaning it is unset
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "local",
|
||||
Description: "Local Disk",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "nounc",
|
||||
Help: "Disable UNC (long path names) conversion on Windows",
|
||||
Optional: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "true",
|
||||
Help: "Disables long file names",
|
||||
}},
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Fs represents a local filesystem rooted at root
|
||||
type Fs struct {
|
||||
name string // the name of the remote
|
||||
root string // The root directory (OS path)
|
||||
features *fs.Features // optional features
|
||||
dev uint64 // device number of root node
|
||||
precisionOk sync.Once // Whether we need to read the precision
|
||||
precision time.Duration // precision of local filesystem
|
||||
wmu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
nounc bool // Skip UNC conversion on Windows
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
dirNames *mapper // directory name mapping
|
||||
objectHashesMu sync.Mutex // global lock for Object.hashes
|
||||
}
|
||||
|
||||
// Object represents a local filesystem object
|
||||
type Object struct {
|
||||
fs *Fs // The Fs this object is part of
|
||||
remote string // The remote path - properly UTF-8 encoded - for rclone
|
||||
path string // The local path - may not be properly UTF-8 encoded - for OS
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
hashes map[hash.Type]string // Hashes
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// NewFs constructs an Fs from the path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
var err error
|
||||
|
||||
if *noUTFNorm {
|
||||
log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
}
|
||||
|
||||
nounc := config.FileGet(name, "nounc")
|
||||
f := &Fs{
|
||||
name: name,
|
||||
warned: make(map[string]struct{}),
|
||||
nounc: nounc == "true",
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
dirNames: newMapper(),
|
||||
}
|
||||
f.root = f.cleanPath(root)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: f.caseInsensitive(),
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if *followSymlinks {
|
||||
f.lstat = os.Stat
|
||||
}
|
||||
|
||||
// Check to see if this points to a file
|
||||
fi, err := f.lstat(f.root)
|
||||
if err == nil {
|
||||
f.dev = readDevice(fi)
|
||||
}
|
||||
if err == nil && fi.Mode().IsRegular() {
|
||||
// It is a file, so use the parent as the root
|
||||
f.root, _ = getDirFile(f.root)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("Local file system at %s", f.root)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// caseInsenstive returns whether the remote is case insensitive or not
|
||||
func (f *Fs) caseInsensitive() bool {
|
||||
// FIXME not entirely accurate since you can have case
|
||||
// sensitive Fses on darwin and case insenstive Fses on linux.
|
||||
// Should probably check but that would involve creating a
|
||||
// file in the remote to be most accurate which probably isn't
|
||||
// desirable.
|
||||
return runtime.GOOS == "windows" || runtime.GOOS == "darwin"
|
||||
}
|
||||
|
||||
// newObject makes a half completed Object
|
||||
//
|
||||
// if dstPath is empty then it is made from remote
|
||||
func (f *Fs) newObject(remote, dstPath string) *Object {
|
||||
if dstPath == "" {
|
||||
dstPath = f.cleanPath(filepath.Join(f.root, remote))
|
||||
}
|
||||
remote = f.cleanRemote(remote)
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
path: dstPath,
|
||||
}
|
||||
}
|
||||
|
||||
// Return an Object from a path
|
||||
//
|
||||
// May return nil if an error occurred
|
||||
func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Object, error) {
|
||||
o := f.newObject(remote, dstPath)
|
||||
if info != nil {
|
||||
o.setMetadata(info)
|
||||
} else {
|
||||
err := o.lstat()
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if os.IsPermission(err) {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if o.mode.IsDir() {
|
||||
return nil, errors.Wrapf(fs.ErrorNotAFile, "%q", remote)
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, "", nil)
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
dir = f.dirNames.Load(dir)
|
||||
fsDirPath := f.cleanPath(filepath.Join(f.root, dir))
|
||||
remote := f.cleanRemote(dir)
|
||||
_, err = os.Stat(fsDirPath)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
|
||||
fd, err := os.Open(fsDirPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to open directory %q", dir)
|
||||
}
|
||||
defer func() {
|
||||
cerr := fd.Close()
|
||||
if cerr != nil && err == nil {
|
||||
err = errors.Wrapf(cerr, "failed to close directory %q:", dir)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
fis, err := fd.Readdir(1024)
|
||||
if err == io.EOF && len(fis) == 0 {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to read directory %q", dir)
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
name := fi.Name()
|
||||
mode := fi.Mode()
|
||||
newRemote := path.Join(remote, name)
|
||||
newPath := filepath.Join(fsDirPath, name)
|
||||
// Follow symlinks if required
|
||||
if *followSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
fi, err = os.Stat(newPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mode = fi.Mode()
|
||||
}
|
||||
if fi.IsDir() {
|
||||
// Ignore directories which are symlinks. These are junction points under windows which
|
||||
// are kind of a souped up symlink. Unix doesn't have directories which are symlinks.
|
||||
if (mode&os.ModeSymlink) == 0 && f.dev == readDevice(fi) {
|
||||
d := fs.NewDir(f.dirNames.Save(newRemote, f.cleanRemote(newRemote)), fi.ModTime())
|
||||
entries = append(entries, d)
|
||||
}
|
||||
} else {
|
||||
fso, err := f.newObjectWithInfo(newRemote, newPath, fi)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fso.Storable() {
|
||||
entries = append(entries, fso)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// cleanRemote makes string a valid UTF-8 string for remote strings.
|
||||
//
|
||||
// Any invalid UTF-8 characters will be replaced with utf8.RuneError
|
||||
// It also normalises the UTF-8 and converts the slashes if necessary.
|
||||
func (f *Fs) cleanRemote(name string) string {
|
||||
if !utf8.ValidString(name) {
|
||||
f.wmu.Lock()
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Logf(f, "Replacing invalid UTF-8 characters in %q", name)
|
||||
f.warned[name] = struct{}{}
|
||||
}
|
||||
f.wmu.Unlock()
|
||||
name = string([]rune(name))
|
||||
}
|
||||
name = filepath.ToSlash(name)
|
||||
return name
|
||||
}
|
||||
|
||||
// mapper maps raw to cleaned directory names
|
||||
type mapper struct {
|
||||
mu sync.RWMutex // mutex to protect the below
|
||||
m map[string]string // map of un-normalised directory names
|
||||
}
|
||||
|
||||
func newMapper() *mapper {
|
||||
return &mapper{
|
||||
m: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup a directory name to make a local name (reverses
|
||||
// cleanDirName)
|
||||
//
|
||||
// FIXME this is temporary before we make a proper Directory object
|
||||
func (m *mapper) Load(in string) string {
|
||||
m.mu.RLock()
|
||||
out, ok := m.m[in]
|
||||
m.mu.RUnlock()
|
||||
if ok {
|
||||
return out
|
||||
}
|
||||
return in
|
||||
}
|
||||
|
||||
// Cleans a directory name recording if it needed to be altered
|
||||
//
|
||||
// FIXME this is temporary before we make a proper Directory object
|
||||
func (m *mapper) Save(in, out string) string {
|
||||
if in != out {
|
||||
m.mu.Lock()
|
||||
m.m[out] = in
|
||||
m.mu.Unlock()
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Put the Object to the local filesystem
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
// Temporary Object under construction - info filled in by Update()
|
||||
o := f.newObject(remote, "")
|
||||
err := o.Update(in, src, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
// FIXME: https://github.com/syncthing/syncthing/blob/master/lib/osutil/mkdirall_windows.go
|
||||
root := f.cleanPath(filepath.Join(f.root, dir))
|
||||
err := os.MkdirAll(root, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir == "" {
|
||||
fi, err := f.lstat(root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.dev = readDevice(fi)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rmdir removes the directory
|
||||
//
|
||||
// If it isn't empty it will return an error
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
root := f.cleanPath(filepath.Join(f.root, dir))
|
||||
return os.Remove(root)
|
||||
}
|
||||
|
||||
// Precision of the file system
|
||||
func (f *Fs) Precision() (precision time.Duration) {
|
||||
f.precisionOk.Do(func() {
|
||||
f.precision = f.readPrecision()
|
||||
})
|
||||
return f.precision
|
||||
}
|
||||
|
||||
// Read the precision
|
||||
func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
// Default precision of 1s
|
||||
precision = time.Second
|
||||
|
||||
// Create temporary file and test it
|
||||
fd, err := ioutil.TempFile("", "rclone")
|
||||
if err != nil {
|
||||
// If failed return 1s
|
||||
// fmt.Println("Failed to create temp file", err)
|
||||
return time.Second
|
||||
}
|
||||
path := fd.Name()
|
||||
// fmt.Println("Created temp file", path)
|
||||
err = fd.Close()
|
||||
if err != nil {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// Delete it on return
|
||||
defer func() {
|
||||
// fmt.Println("Remove temp file")
|
||||
_ = os.Remove(path) // ignore error
|
||||
}()
|
||||
|
||||
// Find the minimum duration we can detect
|
||||
for duration := time.Duration(1); duration < time.Second; duration *= 10 {
|
||||
// Current time with delta
|
||||
t := time.Unix(time.Now().Unix(), int64(duration))
|
||||
err := os.Chtimes(path, t, t)
|
||||
if err != nil {
|
||||
// fmt.Println("Failed to Chtimes", err)
|
||||
break
|
||||
}
|
||||
|
||||
// Read the actual time back
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil {
|
||||
// fmt.Println("Failed to Stat", err)
|
||||
break
|
||||
}
|
||||
|
||||
// If it matches - have found the precision
|
||||
// fmt.Println("compare", fi.ModTime(), t)
|
||||
if fi.ModTime().Equal(t) {
|
||||
// fmt.Println("Precision detected as", duration)
|
||||
return duration
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Purge deletes all the files and directories
|
||||
//
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge() error {
|
||||
fi, err := f.lstat(f.root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !fi.Mode().IsDir() {
|
||||
return errors.Errorf("can't purge non directory: %q", f.root)
|
||||
}
|
||||
return os.RemoveAll(f.root)
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := f.newObject(remote, "")
|
||||
|
||||
// Check it is a file if it exists
|
||||
err := dstObj.lstat()
|
||||
if os.IsNotExist(err) {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
} else if !dstObj.mode.IsRegular() {
|
||||
// It isn't a file
|
||||
return nil, errors.New("can't move file onto non-file")
|
||||
}
|
||||
|
||||
// Create destination
|
||||
err = dstObj.mkdirAll()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err = os.Rename(srcObj.path, dstObj.path)
|
||||
if os.IsNotExist(err) {
|
||||
// race condition, source was deleted in the meantime
|
||||
return nil, err
|
||||
} else if os.IsPermission(err) {
|
||||
// not enough rights to write to dst
|
||||
return nil, err
|
||||
} else if err != nil {
|
||||
// not quite clear, but probably trying to move a file across file system
|
||||
// boundaries. Copying might still work.
|
||||
fs.Debugf(src, "Can't move: %v: trying copy", err)
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Update the info
|
||||
err = dstObj.lstat()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
srcPath := f.cleanPath(filepath.Join(srcFs.root, srcRemote))
|
||||
dstPath := f.cleanPath(filepath.Join(f.root, dstRemote))
|
||||
|
||||
// Check if destination exists
|
||||
_, err := os.Lstat(dstPath)
|
||||
if !os.IsNotExist(err) {
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Create parent of destination
|
||||
dstParentPath, _ := getDirFile(dstPath)
|
||||
err = os.MkdirAll(dstParentPath, 0777)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err = os.Rename(srcPath, dstPath)
|
||||
if os.IsNotExist(err) {
|
||||
// race condition, source was deleted in the meantime
|
||||
return err
|
||||
} else if os.IsPermission(err) {
|
||||
// not enough rights to write to dst
|
||||
return err
|
||||
} else if err != nil {
|
||||
// not quite clear, but probably trying to move directory across file system
|
||||
// boundaries. Copying might still work.
|
||||
fs.Debugf(src, "Can't move dir: %v: trying copy", err)
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Supported
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
|
||||
// Fs returns the parent Fs
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Return a string version
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns the requested hash of a file as a lowercase hex string
|
||||
func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
// Check that the underlying file hasn't changed
|
||||
oldtime := o.modTime
|
||||
oldsize := o.size
|
||||
err := o.lstat()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to stat")
|
||||
}
|
||||
|
||||
o.fs.objectHashesMu.Lock()
|
||||
hashes := o.hashes
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
|
||||
in, err := os.Open(o.path)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to open")
|
||||
}
|
||||
hashes, err = hash.Stream(in)
|
||||
closeErr := in.Close()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to read")
|
||||
}
|
||||
if closeErr != nil {
|
||||
return "", errors.Wrap(closeErr, "hash: failed to close")
|
||||
}
|
||||
o.fs.objectHashesMu.Lock()
|
||||
o.hashes = hashes
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
}
|
||||
return hashes[r], nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
err := os.Chtimes(o.path, modTime, modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Re-read metadata
|
||||
return o.lstat()
|
||||
}
|
||||
|
||||
// Storable returns a boolean showing if this object is storable
|
||||
func (o *Object) Storable() bool {
|
||||
// Check for control characters in the remote name and show non storable
|
||||
for _, c := range o.Remote() {
|
||||
if c >= 0x00 && c < 0x20 || c == 0x7F {
|
||||
fs.Logf(o.fs, "Can't store file with control characters: %q", o.Remote())
|
||||
return false
|
||||
}
|
||||
}
|
||||
mode := o.mode
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
if !*skipSymlinks {
|
||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||
}
|
||||
return false
|
||||
} else if mode&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||
fs.Logf(o, "Can't transfer non file/directory")
|
||||
return false
|
||||
} else if mode&os.ModeDir != 0 {
|
||||
// fs.Debugf(o, "Skipping directory")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
|
||||
// object that is read
|
||||
type localOpenFile struct {
|
||||
o *Object // object that is open
|
||||
in io.ReadCloser // handle we are wrapping
|
||||
hash *hash.MultiHasher // currently accumulating hashes
|
||||
fd *os.File // file object reference
|
||||
}
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
if !*noCheckUpdated {
|
||||
// Check if file has the same size and modTime
|
||||
fi, err := file.fd.Stat()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
||||
}
|
||||
if file.o.size != fi.Size() {
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
|
||||
}
|
||||
if !file.o.modTime.Equal(fi.ModTime()) {
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
|
||||
}
|
||||
}
|
||||
|
||||
n, err = file.in.Read(p)
|
||||
if n > 0 {
|
||||
// Hash routines never return an error
|
||||
_, _ = file.hash.Write(p[:n])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Close the object and update the hashes
|
||||
func (file *localOpenFile) Close() (err error) {
|
||||
err = file.in.Close()
|
||||
if err == nil {
|
||||
if file.hash.Size() == file.o.Size() {
|
||||
file.o.fs.objectHashesMu.Lock()
|
||||
file.o.hashes = file.hash.Sums()
|
||||
file.o.fs.objectHashesMu.Unlock()
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset, limit int64 = 0, -1
|
||||
hashes := hash.Supported
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.size)
|
||||
case *fs.HashesOption:
|
||||
hashes = x.Hashes
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fd, err := os.Open(o.path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
|
||||
if offset != 0 {
|
||||
// seek the object
|
||||
_, err = fd.Seek(offset, io.SeekStart)
|
||||
// don't attempt to make checksums
|
||||
return wrappedFd, err
|
||||
}
|
||||
hash, err := hash.NewMultiHasherTypes(hashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Update the md5sum as we go along
|
||||
in = &localOpenFile{
|
||||
o: o,
|
||||
in: wrappedFd,
|
||||
hash: hash,
|
||||
fd: fd,
|
||||
}
|
||||
return in, nil
|
||||
}
|
||||
|
||||
// mkdirAll makes all the directories needed to store the object
|
||||
func (o *Object) mkdirAll() error {
|
||||
dir, _ := getDirFile(o.path)
|
||||
return os.MkdirAll(dir, 0777)
|
||||
}
|
||||
|
||||
// Update the object from in with modTime and size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
hashes := hash.Supported
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.HashesOption:
|
||||
hashes = x.Hashes
|
||||
}
|
||||
}
|
||||
|
||||
err := o.mkdirAll()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out, err := os.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Calculate the hash of the object we are reading as we go along
|
||||
hash, err := hash.NewMultiHasherTypes(hashes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
in = io.TeeReader(in, hash)
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
closeErr := out.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
if err != nil {
|
||||
fs.Logf(o, "Removing partially written file on error: %v", err)
|
||||
if removeErr := os.Remove(o.path); removeErr != nil {
|
||||
fs.Errorf(o, "Failed to remove partially written file: %v", removeErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// All successful so update the hashes
|
||||
o.fs.objectHashesMu.Lock()
|
||||
o.hashes = hash.Sums()
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
// Set the mtime
|
||||
err = o.SetModTime(src.ModTime())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// ReRead info now that we have finished
|
||||
return o.lstat()
|
||||
}
|
||||
|
||||
// setMetadata sets the file info from the os.FileInfo passed in
|
||||
func (o *Object) setMetadata(info os.FileInfo) {
|
||||
// Don't overwrite the info if we don't need to
|
||||
// this avoids upsetting the race detector
|
||||
if o.size != info.Size() {
|
||||
o.size = info.Size()
|
||||
}
|
||||
if !o.modTime.Equal(info.ModTime()) {
|
||||
o.modTime = info.ModTime()
|
||||
}
|
||||
if o.mode != info.Mode() {
|
||||
o.mode = info.Mode()
|
||||
}
|
||||
}
|
||||
|
||||
// Stat a Object into info
|
||||
func (o *Object) lstat() error {
|
||||
info, err := o.fs.lstat(o.path)
|
||||
if err == nil {
|
||||
o.setMetadata(info)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
// Return the directory and file from an OS path. Assumes
|
||||
// os.PathSeparator is used.
|
||||
func getDirFile(s string) (string, string) {
|
||||
i := strings.LastIndex(s, string(os.PathSeparator))
|
||||
dir, file := s[:i], s[i+1:]
|
||||
if dir == "" {
|
||||
dir = string(os.PathSeparator)
|
||||
}
|
||||
return dir, file
|
||||
}
|
||||
|
||||
// cleanPathFragment cleans an OS path fragment which is part of a
|
||||
// bigger path and not necessarily absolute
|
||||
func cleanPathFragment(s string) string {
|
||||
if s == "" {
|
||||
return s
|
||||
}
|
||||
s = filepath.Clean(s)
|
||||
if runtime.GOOS == "windows" {
|
||||
s = strings.Replace(s, `/`, `\`, -1)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// cleanPath cleans and makes absolute the path passed in and returns
|
||||
// an OS path.
|
||||
//
|
||||
// The input might be in OS form or rclone form or a mixture, but the
|
||||
// output is in OS form.
|
||||
//
|
||||
// On windows it makes the path UNC also and replaces any characters
|
||||
// Windows can't deal with with their replacements.
|
||||
func (f *Fs) cleanPath(s string) string {
|
||||
s = cleanPathFragment(s)
|
||||
if runtime.GOOS == "windows" {
|
||||
if !filepath.IsAbs(s) && !strings.HasPrefix(s, "\\") {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
if !f.nounc {
|
||||
// Convert to UNC
|
||||
s = uncPath(s)
|
||||
}
|
||||
s = cleanWindowsName(f, s)
|
||||
} else {
|
||||
if !filepath.IsAbs(s) {
|
||||
s2, err := filepath.Abs(s)
|
||||
if err == nil {
|
||||
s = s2
|
||||
}
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Pattern to match a windows absolute path: "c:\" and similar
|
||||
var isAbsWinDrive = regexp.MustCompile(`^[a-zA-Z]\:\\`)
|
||||
|
||||
// uncPath converts an absolute Windows path
|
||||
// to a UNC long path.
|
||||
func uncPath(s string) string {
|
||||
// UNC can NOT use "/", so convert all to "\"
|
||||
s = strings.Replace(s, `/`, `\`, -1)
|
||||
|
||||
// If prefix is "\\", we already have a UNC path or server.
|
||||
if strings.HasPrefix(s, `\\`) {
|
||||
// If already long path, just keep it
|
||||
if strings.HasPrefix(s, `\\?\`) {
|
||||
return s
|
||||
}
|
||||
|
||||
// Trim "\\" from path and add UNC prefix.
|
||||
return `\\?\UNC\` + strings.TrimPrefix(s, `\\`)
|
||||
}
|
||||
if isAbsWinDrive.MatchString(s) {
|
||||
return `\\?\` + s
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// cleanWindowsName will clean invalid Windows characters replacing them with _
|
||||
func cleanWindowsName(f *Fs, name string) string {
|
||||
original := name
|
||||
var name2 string
|
||||
if strings.HasPrefix(name, `\\?\`) {
|
||||
name2 = `\\?\`
|
||||
name = strings.TrimPrefix(name, `\\?\`)
|
||||
}
|
||||
if strings.HasPrefix(name, `//?/`) {
|
||||
name2 = `//?/`
|
||||
name = strings.TrimPrefix(name, `//?/`)
|
||||
}
|
||||
// Colon is allowed as part of a drive name X:\
|
||||
colonAt := strings.Index(name, ":")
|
||||
if colonAt > 0 && colonAt < 3 && len(name) > colonAt+1 {
|
||||
// Copy to name2, which is unfiltered
|
||||
name2 += name[0 : colonAt+1]
|
||||
name = name[colonAt+1:]
|
||||
}
|
||||
|
||||
name2 += strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case '<', '>', '"', '|', '?', '*', ':':
|
||||
return '_'
|
||||
}
|
||||
return r
|
||||
}, name)
|
||||
|
||||
if name2 != original && f != nil {
|
||||
f.wmu.Lock()
|
||||
if _, ok := f.warned[name]; !ok {
|
||||
fs.Logf(f, "Replacing invalid characters in %q to %q", name, name2)
|
||||
f.warned[name] = struct{}{}
|
||||
}
|
||||
f.wmu.Unlock()
|
||||
}
|
||||
return name2
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
_ fs.DirMover = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
)
|
||||
78
backend/local/local_internal_test.go
Normal file
78
backend/local/local_internal_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestMain drives the tests
|
||||
func TestMain(m *testing.M) {
|
||||
fstest.TestMain(m)
|
||||
}
|
||||
|
||||
func TestMapper(t *testing.T) {
|
||||
m := newMapper()
|
||||
assert.Equal(t, m.m, map[string]string{})
|
||||
assert.Equal(t, "potato", m.Save("potato", "potato"))
|
||||
assert.Equal(t, m.m, map[string]string{})
|
||||
assert.Equal(t, "-r'áö", m.Save("-r?'a´o¨", "-r'áö"))
|
||||
assert.Equal(t, m.m, map[string]string{
|
||||
"-r'áö": "-r?'a´o¨",
|
||||
})
|
||||
assert.Equal(t, "potato", m.Load("potato"))
|
||||
assert.Equal(t, "-r?'a´o¨", m.Load("-r'áö"))
|
||||
}
|
||||
|
||||
// Test copy with source file that's updating
|
||||
func TestUpdatingCheck(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
filePath := "sub dir/local test"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
|
||||
fd, err := os.Open(path.Join(r.LocalName, filePath))
|
||||
if err != nil {
|
||||
t.Fatalf("failed opening file %q: %v", filePath, err)
|
||||
}
|
||||
|
||||
fi, err := fd.Stat()
|
||||
require.NoError(t, err)
|
||||
o := &Object{size: fi.Size(), modTime: fi.ModTime()}
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
|
||||
hash, err := hash.NewMultiHasherTypes(hash.Supported)
|
||||
require.NoError(t, err)
|
||||
in := localOpenFile{
|
||||
o: o,
|
||||
in: wrappedFd,
|
||||
hash: hash,
|
||||
fd: fd,
|
||||
}
|
||||
|
||||
buf := make([]byte, 1)
|
||||
_, err = in.Read(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.Errorf(t, err, "can't copy - source file is being updated")
|
||||
|
||||
// turn the checking off and try again
|
||||
|
||||
*noCheckUpdated = true
|
||||
defer func() {
|
||||
*noCheckUpdated = false
|
||||
}()
|
||||
|
||||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
17
backend/local/local_test.go
Normal file
17
backend/local/local_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Local filesystem interface
|
||||
package local_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "",
|
||||
NilObject: (*local.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -9,10 +9,11 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
)
|
||||
|
||||
var (
|
||||
oneFileSystem = fs.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
|
||||
oneFileSystem = flags.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
|
||||
)
|
||||
|
||||
// readDevice turns a valid os.FileInfo into a device number,
|
||||
10
backend/local/remove_other.go
Normal file
10
backend/local/remove_other.go
Normal file
@@ -0,0 +1,10 @@
|
||||
//+build !windows
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
// Removes name, retrying on a sharing violation
|
||||
func remove(name string) error {
|
||||
return os.Remove(name)
|
||||
}
|
||||
50
backend/local/remove_test.go
Normal file
50
backend/local/remove_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check we can remove an open file
|
||||
func TestRemove(t *testing.T) {
|
||||
fd, err := ioutil.TempFile("", "rclone-remove-test")
|
||||
require.NoError(t, err)
|
||||
name := fd.Name()
|
||||
defer func() {
|
||||
_ = os.Remove(name)
|
||||
}()
|
||||
|
||||
exists := func() bool {
|
||||
_, err := os.Stat(name)
|
||||
if err == nil {
|
||||
return true
|
||||
} else if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
require.NoError(t, err)
|
||||
return false
|
||||
}
|
||||
|
||||
assert.True(t, exists())
|
||||
// close the file in the background
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
require.NoError(t, fd.Close())
|
||||
}()
|
||||
// delete the open file
|
||||
err = remove(name)
|
||||
require.NoError(t, err)
|
||||
// check it no longer exists
|
||||
assert.False(t, exists())
|
||||
// wait for background close
|
||||
wg.Wait()
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user