mirror of
https://github.com/rclone/rclone.git
synced 2026-01-06 02:23:24 +00:00
Compare commits
552 Commits
v1.39
...
sandeepkru
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e24fe27153 | ||
|
|
2cb79cb43d | ||
|
|
4c2fbf9b36 | ||
|
|
ed4f1b2936 | ||
|
|
144c1a04d4 | ||
|
|
25ec7f5c00 | ||
|
|
b15603d5ea | ||
|
|
71c974bf9a | ||
|
|
03c5b8232e | ||
|
|
72392a2d72 | ||
|
|
b062ae9d13 | ||
|
|
8c0335a176 | ||
|
|
794e55de27 | ||
|
|
038ed1aaf0 | ||
|
|
97beff5370 | ||
|
|
b9b9bce0db | ||
|
|
947e10eb2b | ||
|
|
6b42421374 | ||
|
|
fa051ff970 | ||
|
|
69164b3dda | ||
|
|
935533e57f | ||
|
|
1550f70865 | ||
|
|
1a65c3a740 | ||
|
|
a29a1de43d | ||
|
|
e7ae5e8ee0 | ||
|
|
56e1e82005 | ||
|
|
8442498693 | ||
|
|
08021c4636 | ||
|
|
3f0789e2db | ||
|
|
7110349547 | ||
|
|
a9adb43896 | ||
|
|
c47a4c9703 | ||
|
|
d9d00a7dd7 | ||
|
|
b82e66daaa | ||
|
|
7d2861ead6 | ||
|
|
aaa8591661 | ||
|
|
4df1794932 | ||
|
|
d18928962c | ||
|
|
339fbf0df5 | ||
|
|
13ccb39819 | ||
|
|
f9a1a7e700 | ||
|
|
1c75581959 | ||
|
|
4d793b8ee8 | ||
|
|
9289aead9b | ||
|
|
ce109ed9c0 | ||
|
|
d7ac4ca44e | ||
|
|
1053d7e123 | ||
|
|
017297af70 | ||
|
|
4e8e5fed7d | ||
|
|
c0f772bc14 | ||
|
|
334ef28012 | ||
|
|
da45dadfe9 | ||
|
|
05edb5f501 | ||
|
|
04d18d2a07 | ||
|
|
f1269dc06a | ||
|
|
c5286ee157 | ||
|
|
ba43acb6aa | ||
|
|
8a84975993 | ||
|
|
d758e1908e | ||
|
|
737aed8412 | ||
|
|
4009fb67c8 | ||
|
|
3ef938ebde | ||
|
|
5302e5f9b1 | ||
|
|
de8c7d8e45 | ||
|
|
2a29f7f6c8 | ||
|
|
2b332bced2 | ||
|
|
aad75e6720 | ||
|
|
2a806a8d8b | ||
|
|
500085d244 | ||
|
|
3d8e529441 | ||
|
|
6607d8752c | ||
|
|
67e9ef4547 | ||
|
|
d4213c0ac5 | ||
|
|
3a2248aa5f | ||
|
|
573ef4c8ee | ||
|
|
7bf2d389a8 | ||
|
|
71b4f1ccab | ||
|
|
e5ff375948 | ||
|
|
512f4b4487 | ||
|
|
a38f8b87ce | ||
|
|
9697754707 | ||
|
|
8e625e0bc3 | ||
|
|
e52ecba295 | ||
|
|
e62d2fd309 | ||
|
|
e56be0dfd8 | ||
|
|
2a32e2d838 | ||
|
|
db4c206e0e | ||
|
|
f77efc7649 | ||
|
|
aadbcce486 | ||
|
|
f162116132 | ||
|
|
909c3a92d6 | ||
|
|
826975c341 | ||
|
|
6791cf7d7f | ||
|
|
d022c81d99 | ||
|
|
cdde8fa75a | ||
|
|
5ede6f6d09 | ||
|
|
53292527bb | ||
|
|
ec9894da07 | ||
|
|
ad02d1be3f | ||
|
|
63f413f477 | ||
|
|
f1ffe8e309 | ||
|
|
d85b9bc9d6 | ||
|
|
b07e51cf73 | ||
|
|
f073db81b1 | ||
|
|
9698a2babb | ||
|
|
5eecbd83ee | ||
|
|
e42edc8e8c | ||
|
|
291954baba | ||
|
|
9d8d7ae1f0 | ||
|
|
6ce32e4661 | ||
|
|
1755ffd1f3 | ||
|
|
aa5c5ec5d3 | ||
|
|
e80ae4e09c | ||
|
|
1320e84bc2 | ||
|
|
cb5bd47e61 | ||
|
|
790a8a9aed | ||
|
|
f1a43eca4d | ||
|
|
7ea68f1fc6 | ||
|
|
6427029c4e | ||
|
|
21383877df | ||
|
|
f95835d613 | ||
|
|
be79b47a7a | ||
|
|
be22735609 | ||
|
|
1b1b3c13cd | ||
|
|
5c128272fd | ||
|
|
d178233e74 | ||
|
|
98bf65c43b | ||
|
|
3b5e70c8c6 | ||
|
|
bd3ad1ac3e | ||
|
|
9fdf273614 | ||
|
|
fe25cb9c54 | ||
|
|
f2608e2a64 | ||
|
|
a5f1811892 | ||
|
|
50dc5fe92e | ||
|
|
b7d2048032 | ||
|
|
3116249692 | ||
|
|
d049e5c680 | ||
|
|
1c9572aba1 | ||
|
|
76f2cbeb94 | ||
|
|
0479c7dcf5 | ||
|
|
55674c0bfc | ||
|
|
e4c380b2a8 | ||
|
|
74cbdea0ef | ||
|
|
a3bf6b9c2c | ||
|
|
0daced29db | ||
|
|
b78af517de | ||
|
|
d8e88f10cd | ||
|
|
849db6699d | ||
|
|
a81ec00a8c | ||
|
|
da4a5e1fb3 | ||
|
|
ae562b5a4f | ||
|
|
c01177bc28 | ||
|
|
9f04ce282e | ||
|
|
764440068e | ||
|
|
a703216286 | ||
|
|
96a62d55a2 | ||
|
|
d0f32b62fd | ||
|
|
7c5f87842c | ||
|
|
cc8799e0d6 | ||
|
|
da214973a1 | ||
|
|
be8bd89674 | ||
|
|
9ab2521ef2 | ||
|
|
21a10e58c9 | ||
|
|
d36b80f587 | ||
|
|
24980d7123 | ||
|
|
870c58f7f8 | ||
|
|
b3c6f5f4b8 | ||
|
|
311a962011 | ||
|
|
da7a77ef2e | ||
|
|
9fbc40c5b9 | ||
|
|
56ce784301 | ||
|
|
8fe3037301 | ||
|
|
ba7ae2ee8c | ||
|
|
dc59836021 | ||
|
|
1a3fb21a77 | ||
|
|
bcdb7719c6 | ||
|
|
c51d97c752 | ||
|
|
57a5b72d60 | ||
|
|
34ba17deec | ||
|
|
e3a1bc9cd3 | ||
|
|
a35e62e15c | ||
|
|
d1ca8b8959 | ||
|
|
a0c65deca8 | ||
|
|
1f255a8567 | ||
|
|
f50b85278a | ||
|
|
9948b39dba | ||
|
|
2b855751fc | ||
|
|
ef3bcec76c | ||
|
|
1ac6dacf0f | ||
|
|
94e277d759 | ||
|
|
b83814082b | ||
|
|
2b7957cc74 | ||
|
|
3d5106e52b | ||
|
|
29ce1c2747 | ||
|
|
dc247d21ff | ||
|
|
8c3740c2c5 | ||
|
|
acd5d4377e | ||
|
|
9e4cd55477 | ||
|
|
2015f98f0c | ||
|
|
0e6faa2313 | ||
|
|
905e40b3e6 | ||
|
|
1db68571fd | ||
|
|
6b67489133 | ||
|
|
27dfcf303c | ||
|
|
e6d9720d7b | ||
|
|
196da4d903 | ||
|
|
18317a2747 | ||
|
|
ef412c1985 | ||
|
|
d97fe3b824 | ||
|
|
792c9e185e | ||
|
|
1f681e585b | ||
|
|
e82452ce9a | ||
|
|
dcf8334673 | ||
|
|
37be78705d | ||
|
|
4b5ff33125 | ||
|
|
d5b2ec32f1 | ||
|
|
aeedacfb50 | ||
|
|
92b266d361 | ||
|
|
05e32cfcf9 | ||
|
|
cbec59146a | ||
|
|
06e3fa3aba | ||
|
|
0fa700b3cf | ||
|
|
42f0963bf9 | ||
|
|
be54fd8f70 | ||
|
|
e5be471ce0 | ||
|
|
80588a5a6b | ||
|
|
67023f0040 | ||
|
|
32e02bd367 | ||
|
|
c749cf8d99 | ||
|
|
92cfb57fbd | ||
|
|
0cb5c4aa73 | ||
|
|
0358e9e724 | ||
|
|
a69d8ec93b | ||
|
|
92c5aa3786 | ||
|
|
fbe1c7f1ea | ||
|
|
c4531daa43 | ||
|
|
6e11a25df5 | ||
|
|
0865e38917 | ||
|
|
ab2fa59fc4 | ||
|
|
e13f65b953 | ||
|
|
5b8977a053 | ||
|
|
1dea99ab20 | ||
|
|
06a8d3011d | ||
|
|
e7fd607078 | ||
|
|
eca99b33c0 | ||
|
|
e42cee5e02 | ||
|
|
d45c750f76 | ||
|
|
2c2bb0f750 | ||
|
|
a8267d1628 | ||
|
|
9df266a6b4 | ||
|
|
4d553ef701 | ||
|
|
1ba3ffdc59 | ||
|
|
72f1b097a7 | ||
|
|
885044d0a5 | ||
|
|
6c10312c75 | ||
|
|
e5aa5fe7d8 | ||
|
|
9b140b42c9 | ||
|
|
0bfbde8856 | ||
|
|
98a924602f | ||
|
|
7e80e609e8 | ||
|
|
91b068ad3a | ||
|
|
b52e34ef5e | ||
|
|
32e6eee341 | ||
|
|
c5f1d501ed | ||
|
|
0ed0d9a7bc | ||
|
|
d9c13bff83 | ||
|
|
ce91289b09 | ||
|
|
5ba5be9b37 | ||
|
|
e9a2cbec37 | ||
|
|
4f6f07c074 | ||
|
|
f6020f1308 | ||
|
|
a46f2a9eb7 | ||
|
|
911a78ce6d | ||
|
|
d64789528d | ||
|
|
940df88eb2 | ||
|
|
19ca9fb939 | ||
|
|
26f1c55987 | ||
|
|
1afac32d80 | ||
|
|
26fbd00b4f | ||
|
|
1313b529ff | ||
|
|
82e835d6fc | ||
|
|
fa867a9a4c | ||
|
|
38d9475a34 | ||
|
|
c21c7e75b0 | ||
|
|
c8d095612a | ||
|
|
012d4a1235 | ||
|
|
854d3c3025 | ||
|
|
5bedc4c668 | ||
|
|
86892467d9 | ||
|
|
e62fe06763 | ||
|
|
4295428a0f | ||
|
|
2db0c4dd95 | ||
|
|
5bf639048f | ||
|
|
4924ac2f17 | ||
|
|
d4cca8d9f9 | ||
|
|
a9e386b153 | ||
|
|
117238211b | ||
|
|
645cf5ec0f | ||
|
|
d1bb8efb88 | ||
|
|
c19e675ca6 | ||
|
|
34c45a7c04 | ||
|
|
0a0318df20 | ||
|
|
04e055fc06 | ||
|
|
d551137635 | ||
|
|
aba43cd3a4 | ||
|
|
7f744033d8 | ||
|
|
078d705dbe | ||
|
|
5981f9fab5 | ||
|
|
84776c4e43 | ||
|
|
c1a3e363a6 | ||
|
|
7ccc6080b0 | ||
|
|
677971643c | ||
|
|
f4a1c1163c | ||
|
|
97b48cf988 | ||
|
|
86e5a35491 | ||
|
|
8bb2854fe4 | ||
|
|
d76da1f5fd | ||
|
|
89748feaa5 | ||
|
|
dfd0f4c5a4 | ||
|
|
0c9dc006c5 | ||
|
|
4e90ad04d5 | ||
|
|
43c7ea81df | ||
|
|
fa003e89b6 | ||
|
|
5114b11d6f | ||
|
|
f832433fa5 | ||
|
|
d073efdc6c | ||
|
|
9e48748182 | ||
|
|
b6058e0106 | ||
|
|
66c69fe620 | ||
|
|
a2336ad774 | ||
|
|
7713acf23d | ||
|
|
473a388f6d | ||
|
|
c8a4d437a0 | ||
|
|
09c14af6d1 | ||
|
|
acae10cd6f | ||
|
|
0861207ace | ||
|
|
a7dbf32c53 | ||
|
|
6025bb6ad1 | ||
|
|
70f07fd3ac | ||
|
|
b3f55d6bda | ||
|
|
d9094f1a45 | ||
|
|
572ee5ec96 | ||
|
|
316dac25c2 | ||
|
|
ee3c45676f | ||
|
|
2e7e15461b | ||
|
|
0175332987 | ||
|
|
85e0b87c99 | ||
|
|
d41017a277 | ||
|
|
fc32fee4ad | ||
|
|
5795bd7db6 | ||
|
|
9b011ce7e4 | ||
|
|
5e334eedd2 | ||
|
|
7fb53a031c | ||
|
|
ebfeec9fb4 | ||
|
|
90af7af9a3 | ||
|
|
fe8eeec5b5 | ||
|
|
e0eb666dbf | ||
|
|
7d4da1c66a | ||
|
|
f3e982d3bf | ||
|
|
3f9d0d3baf | ||
|
|
e9fd2250eb | ||
|
|
769aa860f2 | ||
|
|
fdebf9da31 | ||
|
|
77f344a69d | ||
|
|
62540b4007 | ||
|
|
21faac6e6c | ||
|
|
167a4396c7 | ||
|
|
1585aa61c1 | ||
|
|
b91bd32489 | ||
|
|
c3d0f68923 | ||
|
|
f57e92b9a5 | ||
|
|
baf9ee5cf7 | ||
|
|
354f1ad722 | ||
|
|
54deb01f00 | ||
|
|
3282fd26af | ||
|
|
88d830c7b7 | ||
|
|
724120d2f3 | ||
|
|
25bbc5d22b | ||
|
|
00adf40f9f | ||
|
|
aeefa34f62 | ||
|
|
9252224d82 | ||
|
|
1383df4f58 | ||
|
|
0ce81f68fe | ||
|
|
20ca7d0e4f | ||
|
|
4c3d42bcbb | ||
|
|
2ef8de0843 | ||
|
|
a70200dd29 | ||
|
|
c99412d11e | ||
|
|
abc736df1d | ||
|
|
ab0d06eb16 | ||
|
|
9ffc3898b1 | ||
|
|
afc963ed92 | ||
|
|
c929de9dc4 | ||
|
|
451cd6d971 | ||
|
|
a647c54888 | ||
|
|
334bf49d30 | ||
|
|
d8f78a7266 | ||
|
|
62e72801be | ||
|
|
358c1fbac9 | ||
|
|
cc9d7156e4 | ||
|
|
221a8a9c5d | ||
|
|
2b6f7028a6 | ||
|
|
5530662ccc | ||
|
|
442334ba61 | ||
|
|
70b4842823 | ||
|
|
2f63a9f81c | ||
|
|
8a9ed57951 | ||
|
|
a5c3bcc9c7 | ||
|
|
9b800d7184 | ||
|
|
b1945d0094 | ||
|
|
9a34fd984c | ||
|
|
644313a4b9 | ||
|
|
675e7c5d8e | ||
|
|
99f3c8bc93 | ||
|
|
ff6a7142da | ||
|
|
691c725e8b | ||
|
|
ee388c4331 | ||
|
|
771fbbe314 | ||
|
|
ab8c0a81fa | ||
|
|
cd7fd51119 | ||
|
|
0f787e43b0 | ||
|
|
3a7bb7b2df | ||
|
|
54724a1362 | ||
|
|
846bbef1e9 | ||
|
|
b33e3f779c | ||
|
|
8a25ca786c | ||
|
|
04a0a7406b | ||
|
|
9a653fea10 | ||
|
|
b183bd7f00 | ||
|
|
5055b340da | ||
|
|
6546b7e0b0 | ||
|
|
f4a5489d19 | ||
|
|
82418c3021 | ||
|
|
bf6101cb6c | ||
|
|
5723d2dbff | ||
|
|
d0d6b83a7a | ||
|
|
bea02fcf52 | ||
|
|
8722403b0d | ||
|
|
9aa8815990 | ||
|
|
6fb868e00c | ||
|
|
2f746426e7 | ||
|
|
4c1ffc7f54 | ||
|
|
1018e9bb27 | ||
|
|
295c3fabec | ||
|
|
3f8d286a75 | ||
|
|
fc8641809e | ||
|
|
de35f1c165 | ||
|
|
2974efc7d6 | ||
|
|
a6227f34e2 | ||
|
|
3c7a755631 | ||
|
|
8df78f2b6d | ||
|
|
44276db454 | ||
|
|
2eb5cfb7ad | ||
|
|
b3d8b7e22e | ||
|
|
ed2d4ef4a2 | ||
|
|
11fe3fdc16 | ||
|
|
cf6d522d2f | ||
|
|
29d428040c | ||
|
|
1aa482c333 | ||
|
|
40af98b0b3 | ||
|
|
c277a4096c | ||
|
|
1852a0e0c9 | ||
|
|
44cedbd9d9 | ||
|
|
540e00e938 | ||
|
|
a4fe2455ed | ||
|
|
f622017539 | ||
|
|
07f20dd1fd | ||
|
|
fe52502f19 | ||
|
|
9a73688e3a | ||
|
|
bc3ee977f4 | ||
|
|
a69fc8b80d | ||
|
|
926cd52a7f | ||
|
|
c2ce3114f4 | ||
|
|
29286cc8b3 | ||
|
|
1f5e23aedb | ||
|
|
d016438243 | ||
|
|
fa500e6d21 | ||
|
|
dbabb18b0c | ||
|
|
6f6f2aa369 | ||
|
|
17dabf7a99 | ||
|
|
9520992a54 | ||
|
|
a3dd2c691e | ||
|
|
38f829842a | ||
|
|
f9806848fe | ||
|
|
88e0770f2d | ||
|
|
a6833b68ca | ||
|
|
e44dc2b14d | ||
|
|
d876392d15 | ||
|
|
c098e25552 | ||
|
|
186f78d44f | ||
|
|
ea69deaa4c | ||
|
|
c963c74fbe | ||
|
|
9c45125271 | ||
|
|
8653944a6d | ||
|
|
84bc4dc142 | ||
|
|
84d00e9046 | ||
|
|
71bc108ce6 | ||
|
|
e57a388851 | ||
|
|
bfa2878d24 | ||
|
|
dcdb43eb07 | ||
|
|
115d24e1f7 | ||
|
|
62b74d06ff | ||
|
|
7117ba7d58 | ||
|
|
5e73acd40a | ||
|
|
25a41e1945 | ||
|
|
ee66419a27 | ||
|
|
8e86a902e2 | ||
|
|
a80d8a21dc | ||
|
|
517bdc719b | ||
|
|
5ad226ab54 | ||
|
|
a375992186 | ||
|
|
b96c73bee6 | ||
|
|
97c414f025 | ||
|
|
71722b5b95 | ||
|
|
59a8108fc3 | ||
|
|
821be5ebed | ||
|
|
2030dc13b2 | ||
|
|
5cce74d630 | ||
|
|
acd55a8f65 | ||
|
|
ad76dd0adc | ||
|
|
8c90bfb0cd | ||
|
|
4b0c5f79b5 | ||
|
|
1848e26183 | ||
|
|
7d3a17725d | ||
|
|
8e83fb6fb9 | ||
|
|
11da2a6c9b | ||
|
|
92624bbbf1 | ||
|
|
60afda007b | ||
|
|
b8b620f5c2 | ||
|
|
0a7731cf0d | ||
|
|
6cac98d2ce | ||
|
|
712e6a8085 | ||
|
|
6d333da69f | ||
|
|
5c7e8d5a2b | ||
|
|
57f1bb7bb2 | ||
|
|
5e83dce1f6 | ||
|
|
052c886317 | ||
|
|
28480c0570 | ||
|
|
72349bdaae | ||
|
|
36e6d23112 | ||
|
|
0eba37d8f3 | ||
|
|
c74c3b37da | ||
|
|
7c71ee1a5b | ||
|
|
ed20fa5ee7 | ||
|
|
54a9fdf421 | ||
|
|
0d041602cf | ||
|
|
8f47d7fc06 | ||
|
|
4dd1e507f4 | ||
|
|
65618afd8c | ||
|
|
be4ed14525 | ||
|
|
ef89f1f1a7 | ||
|
|
b412c745a1 |
@@ -43,4 +43,4 @@ artifacts:
|
||||
- path: build/*-v*.zip
|
||||
|
||||
deploy_script:
|
||||
- IF "%APPVEYOR_REPO_BRANCH%" == "master" make upload_beta
|
||||
- IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
|
||||
14
.gometalinter.json
Normal file
14
.gometalinter.json
Normal file
@@ -0,0 +1,14 @@
|
||||
{
|
||||
"Enable": [
|
||||
"deadcode",
|
||||
"errcheck",
|
||||
"goimports",
|
||||
"golint",
|
||||
"ineffassign",
|
||||
"structcheck",
|
||||
"varcheck",
|
||||
"vet"
|
||||
],
|
||||
"EnableGC": true,
|
||||
"Vendor": true
|
||||
}
|
||||
29
.travis.yml
29
.travis.yml
@@ -1,14 +1,17 @@
|
||||
language: go
|
||||
sudo: false
|
||||
osx_image: xcode7.3
|
||||
sudo: required
|
||||
dist: trusty
|
||||
os:
|
||||
- linux
|
||||
go:
|
||||
- 1.6.4
|
||||
- 1.7.6
|
||||
- 1.8.5
|
||||
- 1.9.2
|
||||
- 1.8.7
|
||||
- 1.9.3
|
||||
- "1.10.1"
|
||||
- tip
|
||||
before_install:
|
||||
- if [[ $TRAVIS_OS_NAME == linux ]]; then sudo modprobe fuse ; sudo chmod 666 /dev/fuse ; sudo chown root:$USER /etc/fuse.conf ; fi
|
||||
- if [[ $TRAVIS_OS_NAME == osx ]]; then brew update && brew tap caskroom/cask && brew cask install osxfuse ; fi
|
||||
install:
|
||||
- git fetch --unshallow --tags
|
||||
- make vars
|
||||
@@ -20,24 +23,28 @@ script:
|
||||
env:
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
matrix:
|
||||
secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
|
||||
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
include:
|
||||
- os: osx
|
||||
go: 1.9.2
|
||||
go: "1.10.1"
|
||||
env: GOTAGS=""
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
skip_cleanup: true
|
||||
on:
|
||||
branch: master
|
||||
go: 1.9.2
|
||||
condition: "`uname` == 'Linux'"
|
||||
all_branches: true
|
||||
go: "1.10.1"
|
||||
condition: $TRAVIS_PULL_REQUEST == false
|
||||
|
||||
@@ -100,21 +100,74 @@ need to make a remote called `TestDrive`.
|
||||
You can then run the unit tests in the drive directory. These tests
|
||||
are skipped if `TestDrive:` isn't defined.
|
||||
|
||||
cd drive
|
||||
cd backend/drive
|
||||
go test -v
|
||||
|
||||
You can then run the integration tests which tests all of rclone's
|
||||
operations. Normally these get run against the local filing system,
|
||||
but they can be run against any of the remotes.
|
||||
|
||||
cd ../fs
|
||||
cd fs/sync
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -subdir
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then run in that directory
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
|
||||
go run test_all.go
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
then change into the project root and run
|
||||
|
||||
make test
|
||||
|
||||
This command is run daily on the the integration test server. You can
|
||||
find the results at https://pub.rclone.org/integration-tests/
|
||||
|
||||
## Code Organisation ##
|
||||
|
||||
Rclone code is organised into a small number of top level directories
|
||||
with modules beneath.
|
||||
|
||||
* backend - the rclone backends for interfacing to cloud providers -
|
||||
* all - import this to load all the cloud providers
|
||||
* ...providers
|
||||
* bin - scripts for use while building or maintaining rclone
|
||||
* cmd - the rclone commands
|
||||
* all - import this to load all the commands
|
||||
* ...commands
|
||||
* docs - the documentation and website
|
||||
* content - adjust these docs only - everything else is autogenerated
|
||||
* fs - main rclone definitions - minimal amount of code
|
||||
* accounting - bandwidth limiting and statistics
|
||||
* asyncreader - an io.Reader which reads ahead
|
||||
* config - manage the config file and flags
|
||||
* driveletter - detect if a name is a drive letter
|
||||
* filter - implements include/exclude filtering
|
||||
* fserrors - rclone specific error handling
|
||||
* fshttp - http handling for rclone
|
||||
* fspath - path handling for rclone
|
||||
* hash - defines rclones hash types and functions
|
||||
* list - list a remote
|
||||
* log - logging facilities
|
||||
* march - iterates directories in lock step
|
||||
* object - in memory Fs objects
|
||||
* operations - primitives for sync, eg Copy, Move
|
||||
* sync - sync directories
|
||||
* walk - walk a directory
|
||||
* fstest - provides integration test framework
|
||||
* fstests - integration tests for the backends
|
||||
* mockdir - mocks an fs.Directory
|
||||
* mockobject - mocks an fs.Object
|
||||
* test_all - Runs integration tests for everything
|
||||
* graphics - the images used in the website etc
|
||||
* lib - libraries used by the backend
|
||||
* atexit - register functions to run when rclone exits
|
||||
* dircache - directory ID to name caching
|
||||
* oauthutil - helpers for using oauth
|
||||
* pacer - retries with backoff and paces operations
|
||||
* readers - a selection of useful io.Readers
|
||||
* rest - a thin abstraction over net/http for REST
|
||||
* vendor - 3rd party code managed by the dep tool
|
||||
* vfs - Virtual FileSystem layer for implementing rclone mount and similar
|
||||
|
||||
## Writing Documentation ##
|
||||
|
||||
@@ -182,15 +235,15 @@ in the `vendor` directory for perfectly reproducable builds.
|
||||
|
||||
The `vendor` directory is entirely managed by the `dep` tool.
|
||||
|
||||
To add a new dependency
|
||||
To add a new dependency, run `dep ensure` and `dep` will pull in the
|
||||
new dependency to the `vendor` directory and update the `Gopkg.lock`
|
||||
file.
|
||||
|
||||
dep ensure github.com/pkg/errors
|
||||
You can add constraints on that package in the `Gopkg.toml` file (see
|
||||
the `dep` documentation), but don't unless you really need to.
|
||||
|
||||
You can add constraints on that package (see the `dep` documentation),
|
||||
but don't unless you really need to.
|
||||
|
||||
Please check in the changes generated by dep including the `vendor`
|
||||
directory and `Godep.toml` and `Godep.locl` in a single commit
|
||||
Please check in the changes generated by `dep` including the `vendor`
|
||||
directory and `Godep.toml` and `Godep.lock` in a single commit
|
||||
separate from any other code changes. Watch out for new files in
|
||||
`vendor`.
|
||||
|
||||
@@ -240,31 +293,34 @@ Research
|
||||
|
||||
Getting going
|
||||
|
||||
* Create `remote/remote.go` (copy this from a similar remote)
|
||||
* Create `backend/remote/remote.go` (copy this from a similar remote)
|
||||
* box is a good one to start from if you have a directory based remote
|
||||
* b2 is a good one to start from if you have a bucket based remote
|
||||
* Add your remote to the imports in `fs/all/all.go`
|
||||
* Add your remote to the imports in `backend/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
|
||||
Unit tests
|
||||
|
||||
* Create a config entry called `TestRemote` for the unit tests to use
|
||||
* Add your fs to the end of `fstest/fstests/gen_tests.go`
|
||||
* generate `remote/remote_test.go` unit tests `cd fstest/fstests; go generate`
|
||||
* Create a `backend/remote/remote_test.go` - copy and adjust your example remote
|
||||
* Make sure all tests pass with `go test -v`
|
||||
|
||||
Integration tests
|
||||
|
||||
* Add your fs to `fs/test_all.go`
|
||||
* Add your fs to `fstest/test_all/test_all.go`
|
||||
* Make sure integration tests pass with
|
||||
* `cd fs`
|
||||
* `cd fs/operations`
|
||||
* `go test -v -remote TestRemote:`
|
||||
* `cd fs/sync`
|
||||
* `go test -v -remote TestRemote:`
|
||||
* If you are making a bucket based remote, then check with this also
|
||||
* `go test -v -remote TestRemote: -subdir`
|
||||
* And if your remote defines `ListR` this also
|
||||
* `go test -v -remote TestRemote: -fast-list`
|
||||
|
||||
See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
|
||||
|
||||
* `README.md` - main Github page
|
||||
|
||||
340
Gopkg.lock
generated
340
Gopkg.lock
generated
@@ -4,68 +4,111 @@
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
packages = [".","fs","fuseutil"]
|
||||
revision = "371fbbdaa8987b715bdd21d6adc4c9b20155f748"
|
||||
packages = [
|
||||
".",
|
||||
"fs",
|
||||
"fuseutil"
|
||||
]
|
||||
revision = "65cc252bf6691cb3c7014bcb2c8dc29de91e3a7e"
|
||||
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "f6de2c509ed9d2af648c3c147207eaaf97149aed"
|
||||
version = "v0.14.0"
|
||||
revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479"
|
||||
version = "v0.23.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = ["storage"]
|
||||
revision = "2592daf71ab6b95dcfc7f7437ecc1afb9ddb7360"
|
||||
version = "v11.0.0-beta"
|
||||
name = "github.com/Azure/azure-pipeline-go"
|
||||
packages = ["pipeline"]
|
||||
revision = "7571e8eb0876932ab505918ff7ed5107773e5ee2"
|
||||
version = "0.1.7"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
|
||||
revision = "f6be1abbb5abd0517522f850dd785990d373da7e"
|
||||
version = "v8.4.0"
|
||||
name = "github.com/Azure/azure-storage-blob-go"
|
||||
packages = ["2017-07-29/azblob"]
|
||||
revision = "66ba96e49ebbdc3cd26970c6c675c906d304b5e2"
|
||||
version = "0.1.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/goconfig"
|
||||
packages = ["."]
|
||||
revision = "87a46d97951ee1ea20ed3b24c25646a79e87ba5d"
|
||||
revision = "ef1e4c783f8f0478bd8bff0edb3dd0bade552599"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/VividCortex/ewma"
|
||||
packages = ["."]
|
||||
revision = "43880d236f695d39c62cf7aa4ebd4508c258e6c0"
|
||||
revision = "b24eb346a94c3ba12c1da1e564dbac1b498a77ce"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/a8m/tree"
|
||||
packages = ["."]
|
||||
revision = "5554ed4554293f11a726accc1ebf2bd3342742f8"
|
||||
revision = "3cf936ce15d6100c49d9c75f79c220ae7e579599"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/abbot/go-http-auth"
|
||||
packages = ["."]
|
||||
revision = "0ddd408d5d60ea76e320503cc7dd091992dee608"
|
||||
version = "v0.4.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/s3iface","service/s3/s3manager","service/sts"]
|
||||
revision = "5a2026bfb28e86839f9fcc46523850319399006c"
|
||||
packages = [
|
||||
"aws",
|
||||
"aws/awserr",
|
||||
"aws/awsutil",
|
||||
"aws/client",
|
||||
"aws/client/metadata",
|
||||
"aws/corehandlers",
|
||||
"aws/credentials",
|
||||
"aws/credentials/ec2rolecreds",
|
||||
"aws/credentials/endpointcreds",
|
||||
"aws/credentials/stscreds",
|
||||
"aws/csm",
|
||||
"aws/defaults",
|
||||
"aws/ec2metadata",
|
||||
"aws/endpoints",
|
||||
"aws/request",
|
||||
"aws/session",
|
||||
"aws/signer/v4",
|
||||
"internal/sdkio",
|
||||
"internal/sdkrand",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/eventstream",
|
||||
"private/protocol/eventstream/eventstreamapi",
|
||||
"private/protocol/query",
|
||||
"private/protocol/query/queryutil",
|
||||
"private/protocol/rest",
|
||||
"private/protocol/restxml",
|
||||
"private/protocol/xml/xmlutil",
|
||||
"service/s3",
|
||||
"service/s3/s3iface",
|
||||
"service/s3/s3manager",
|
||||
"service/sts"
|
||||
]
|
||||
revision = "bfc1a07cf158c30c41a3eefba8aae043d0bb5bff"
|
||||
version = "v1.14.8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/billziss-gh/cgofuse"
|
||||
packages = ["fuse"]
|
||||
revision = "3a24389863c5bf906de391226ee8c4ec2c925bfe"
|
||||
version = "v1.0.3"
|
||||
revision = "ea66f9809c71af94522d494d3d617545662ea59d"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
packages = ["."]
|
||||
revision = "a148de800f91fe6cbd8b2a472bbfdc09c4b6568f"
|
||||
revision = "af9db2027c98c61ecd8e17caa5bd265792b9b9a2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cpuguy83/go-md2man"
|
||||
packages = ["md2man"]
|
||||
revision = "1d903dcb749992f3741d744c0f8376b4bd7eb3e1"
|
||||
version = "v1.0.7"
|
||||
revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
|
||||
version = "v1.0.8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
@@ -73,12 +116,6 @@
|
||||
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c"
|
||||
version = "v3.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/djherbis/times"
|
||||
packages = ["."]
|
||||
@@ -86,22 +123,34 @@
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
|
||||
packages = ["dropbox","dropbox/async","dropbox/file_properties","dropbox/files"]
|
||||
revision = "9befe8c89b6e17667716dedd2f62327bae374bf2"
|
||||
packages = [
|
||||
"dropbox",
|
||||
"dropbox/async",
|
||||
"dropbox/common",
|
||||
"dropbox/file_properties",
|
||||
"dropbox/files",
|
||||
"dropbox/seen_state",
|
||||
"dropbox/sharing",
|
||||
"dropbox/team_common",
|
||||
"dropbox/team_policies",
|
||||
"dropbox/users",
|
||||
"dropbox/users_common"
|
||||
]
|
||||
revision = "7afa861bfde5a348d765522b303b6fbd9d250155"
|
||||
version = "v4.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "20b96f641a5ea98f2f8619ff4f3e061cff4833bd"
|
||||
version = "v1.28.2"
|
||||
revision = "06f5f3d67269ccec1fe5fe4134ba6e982984f7f5"
|
||||
version = "v1.37.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "130e6b02ab059e7b717a096f397c5b60111cae74"
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -119,7 +168,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/jlaffaye/ftp"
|
||||
packages = ["."]
|
||||
revision = "299b7ff5b6096588cceca2edc1fc9f557002fb85"
|
||||
revision = "2403248fa8cc9f7909862627aa7337f13f8e0bf1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
@@ -128,9 +177,15 @@
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kardianos/osext"
|
||||
packages = ["."]
|
||||
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-runewidth"
|
||||
@@ -142,19 +197,19 @@
|
||||
branch = "master"
|
||||
name = "github.com/ncw/go-acd"
|
||||
packages = ["."]
|
||||
revision = "96a49aad3fc3889629f2eceb004927386884bd92"
|
||||
revision = "887eb06ab6a255fbf5744b5812788e884078620a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "c95c6e5c2d1a3d37fc44c8c6dc9e231c7500667d"
|
||||
revision = "b2a7479cf26fa841ff90dd932d0221cb5c50782d"
|
||||
version = "v1.0.39"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/nsf/termbox-go"
|
||||
packages = ["."]
|
||||
revision = "4ed959e0540971545eddb8c75514973d670cf739"
|
||||
revision = "5c94acc5e6eb520f1bcd183974e01171cc4c23b3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -163,28 +218,34 @@
|
||||
revision = "ed8ca104421a21947710335006107540e3ecb335"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/patrickmn/go-cache"
|
||||
packages = ["."]
|
||||
revision = "a3647f8e31d79543b2d0f0ae2fe5c379d72cedc0"
|
||||
version = "v2.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pengsrc/go-shared"
|
||||
packages = ["check","convert","json","yaml"]
|
||||
revision = "454950d6a0782c34427d4f29b46c6bf447256f20"
|
||||
version = "v0.0.8"
|
||||
packages = [
|
||||
"buffer",
|
||||
"check",
|
||||
"convert",
|
||||
"log",
|
||||
"reopen"
|
||||
]
|
||||
revision = "807ee759d82c84982a89fb3dc875ef884942f1e5"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "2b3a18b5f0fb6b4f9190549597d3f962c02bc5eb"
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "7c1f7a370726a2457b33b29baefc2402b4965c65"
|
||||
revision = "57673e38ea946592a59c26592b7e6fbda646975b"
|
||||
version = "1.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
@@ -193,28 +254,22 @@
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/rfjakob/eme"
|
||||
packages = ["."]
|
||||
revision = "7c8316a9cb0a6af865265f899f5de6aadb31a24b"
|
||||
revision = "01668ae55fe0b79a483095689043cce3e80260db"
|
||||
version = "v1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/russross/blackfriday"
|
||||
packages = ["."]
|
||||
revision = "4048872b16cc0fc2c5fd9eacf0ed2c2fedaa0c8c"
|
||||
version = "v1.5"
|
||||
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
|
||||
version = "v1.5.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/uuid"
|
||||
branch = "master"
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
packages = ["."]
|
||||
revision = "879c5887cd475cd7864858769793b2ceb0d44feb"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/sirupsen/logrus"
|
||||
packages = ["."]
|
||||
revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e"
|
||||
version = "v1.0.3"
|
||||
revision = "f9261e73885de99b1647d68bedadf2b9a99ad11f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -223,22 +278,34 @@
|
||||
revision = "75fb7ed4208cf72d323d7d02fd1a5964a7a9073c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = [".","doc"]
|
||||
revision = "e5f66de850af3302fbe378c8acded2b0fa55472c"
|
||||
packages = [
|
||||
".",
|
||||
"doc"
|
||||
]
|
||||
revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "7aff26db30c1be810f9de5038ec5ef96ac41fd7c"
|
||||
revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"require"
|
||||
]
|
||||
revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686"
|
||||
version = "v1.2.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = ["assert","require"]
|
||||
revision = "890a5c3458b43e6104ff5da8dfa139d013d77544"
|
||||
name = "github.com/t3rm1n4l/go-mega"
|
||||
packages = ["."]
|
||||
revision = "57978a63bd3f91fa7e188b751a7e7e6dd4e33813"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -247,68 +314,151 @@
|
||||
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/yunify/qingstor-sdk-go"
|
||||
packages = [".","config","logger","request","request/builder","request/data","request/errors","request/signer","request/unpacker","service","utils"]
|
||||
revision = "088fbd27bd49adf215d02a05c36c5ac2d243d1f1"
|
||||
packages = [
|
||||
".",
|
||||
"config",
|
||||
"logger",
|
||||
"request",
|
||||
"request/builder",
|
||||
"request/data",
|
||||
"request/errors",
|
||||
"request/signer",
|
||||
"request/unpacker",
|
||||
"service",
|
||||
"utils"
|
||||
]
|
||||
revision = "4f9ac88c5fec7350e960aabd0de1f1ede0ad2895"
|
||||
version = "v2.2.14"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","nacl/secretbox","pbkdf2","poly1305","salsa20/salsa","scrypt","ssh","ssh/agent","ssh/terminal"]
|
||||
revision = "76eec36fa14229c4b25bb894c2d0e591527af429"
|
||||
packages = [
|
||||
"bcrypt",
|
||||
"blowfish",
|
||||
"curve25519",
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"internal/chacha20",
|
||||
"internal/subtle",
|
||||
"nacl/secretbox",
|
||||
"pbkdf2",
|
||||
"poly1305",
|
||||
"salsa20/salsa",
|
||||
"scrypt",
|
||||
"ssh",
|
||||
"ssh/agent",
|
||||
"ssh/terminal"
|
||||
]
|
||||
revision = "027cca12c2d63e3d62b670d901e8a2c95854feec"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp","html","html/atom","webdav","webdav/internal/xml"]
|
||||
revision = "0a9397675ba34b2845f758fe3cd68828369c6517"
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"html",
|
||||
"html/atom",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"publicsuffix",
|
||||
"webdav",
|
||||
"webdav/internal/xml",
|
||||
"websocket"
|
||||
]
|
||||
revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "bb50c06baba3d0c76f9d125c0719093e315b5b44"
|
||||
packages = [
|
||||
".",
|
||||
"google",
|
||||
"internal",
|
||||
"jws",
|
||||
"jwt"
|
||||
]
|
||||
revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix","windows"]
|
||||
revision = "314a259e304ff91bd6985da2a7149bbf91237993"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows"
|
||||
]
|
||||
revision = "6c888cc515d3ed83fc103cf1d84468aad274b0a7"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["internal/gen","internal/triegen","internal/ucd","transform","unicode/cldr","unicode/norm"]
|
||||
revision = "1cbadb444a806fd9430d14ad08967ed91da4fa0a"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
revision = "6dc17368e09b0e8634d71cac8168d853e869a0c7"
|
||||
revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["drive/v2","gensupport","googleapi","googleapi/internal/uritemplates","storage/v1"]
|
||||
revision = "906273f42cdebd65de3a53f30dd9e23de1b55ba9"
|
||||
packages = [
|
||||
"drive/v3",
|
||||
"gensupport",
|
||||
"googleapi",
|
||||
"googleapi/internal/uritemplates",
|
||||
"storage/v1"
|
||||
]
|
||||
revision = "2eea9ba0a3d94f6ab46508083e299a00bbbc65f6"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","log","urlfetch"]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
packages = [
|
||||
".",
|
||||
"internal",
|
||||
"internal/app_identity",
|
||||
"internal/base",
|
||||
"internal/datastore",
|
||||
"internal/log",
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"log",
|
||||
"urlfetch"
|
||||
]
|
||||
revision = "b1f26356af11148e710935ed1ac8a7f5702c7612"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "4dfe75b3ebfff3e7b40e2983d8c014f9e4d98fed08c3fe454f1dc890589e6960"
|
||||
inputs-digest = "898be2e0549915b0f877529a45db62ce6b9904e7ecf8e3fed48d768d429c32ce"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
153
Gopkg.toml
153
Gopkg.toml
@@ -1,150 +1,15 @@
|
||||
|
||||
## Gopkg.toml example (these lines may be deleted)
|
||||
|
||||
## "required" lists a set of packages (not projects) that must be included in
|
||||
## Gopkg.lock. This list is merged with the set of packages imported by the current
|
||||
## project. Use it when your project needs a package it doesn't explicitly import -
|
||||
## including "main" packages.
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
|
||||
## "ignored" lists a set of packages (not projects) that are ignored when
|
||||
## dep statically analyzes source code. Ignored packages can be in this project,
|
||||
## or in a dependency.
|
||||
# ignored = ["github.com/user/project/badpkg"]
|
||||
|
||||
## Dependencies define constraints on dependent projects. They are respected by
|
||||
## dep whether coming from the Gopkg.toml of the current project or a dependency.
|
||||
# [[constraint]]
|
||||
## Required: the root import path of the project being constrained.
|
||||
# name = "github.com/user/project"
|
||||
#
|
||||
## Recommended: the version constraint to enforce for the project.
|
||||
## Only one of "branch", "version" or "revision" can be specified.
|
||||
# version = "1.0.0"
|
||||
# branch = "master"
|
||||
# revision = "abc123"
|
||||
#
|
||||
## Optional: an alternate location (URL or import path) for the project's source.
|
||||
# source = "https://github.com/myfork/package.git"
|
||||
|
||||
## Overrides have the same structure as [[constraint]], but supercede all
|
||||
## [[constraint]] declarations from all projects. Only the current project's
|
||||
## [[override]] are applied.
|
||||
##
|
||||
## Overrides are a sledgehammer. Use them only as a last resort.
|
||||
# [[override]]
|
||||
## Required: the root import path of the project being constrained.
|
||||
# name = "github.com/user/project"
|
||||
#
|
||||
## Optional: specifying a version constraint override will cause all other
|
||||
## constraints on this project to be ignored; only the overriden constraint
|
||||
## need be satisfied.
|
||||
## Again, only one of "branch", "version" or "revision" can be specified.
|
||||
# version = "1.0.0"
|
||||
# branch = "master"
|
||||
# revision = "abc123"
|
||||
#
|
||||
## Optional: specifying an alternate source location as an override will
|
||||
## enforce that the alternate location is used for that project, regardless of
|
||||
## what source location any dependent projects specify.
|
||||
# source = "https://github.com/myfork/package.git"
|
||||
|
||||
[[constraint]]
|
||||
# pin this to master to pull in the macOS changes
|
||||
# can likely remove for 1.43
|
||||
[[override]]
|
||||
branch = "master"
|
||||
name = "bazil.org/fuse"
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/Unknwon/goconfig"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/VividCortex/ewma"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/go-acd"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/rfjakob/eme"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/skratchdot/open-golang"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/cobra"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/stacktic/dropbox"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/dropbox/dropbox-sdk-go-unofficial"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/yunify/qingstor-sdk-go"
|
||||
|
||||
[[constraint]]
|
||||
# pin this to master to pull in the fix for linux/mips
|
||||
# can likely remove for 1.43
|
||||
[[override]]
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/patrickmn/go-cache"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/okzk/sdnotify"
|
||||
name = "github.com/Azure/azure-storage-blob-go"
|
||||
version = "0.1.4"
|
||||
|
||||
@@ -1,17 +1,43 @@
|
||||
When filing an issue, please include the following information if possible as well as a description of the problem. Make sure you test with the latest beta of rclone.
|
||||
<!--
|
||||
|
||||
Hi!
|
||||
|
||||
We understand you are having a problem with rclone or have an idea for an improvement - we want to help you with that!
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the rclone forum
|
||||
|
||||
https://forum.rclone.org/
|
||||
|
||||
instead of filing an issue. We'll reply quickly and it won't increase our massive issue backlog.
|
||||
|
||||
If you think you might have found a bug, please can you try to replicate it with the latest beta?
|
||||
|
||||
https://beta.rclone.org/
|
||||
https://rclone.org/downloads/
|
||||
|
||||
If you can still replicate it with the latest beta, then please fill in the info below which makes our lives much easier. A log with -vv will make our day :-)
|
||||
|
||||
If you've just got a question or aren't sure if you've found a bug then please use the [rclone forum](https://forum.rclone.org/) instead of filing an issue.
|
||||
If you have an idea for an improvement, then please search the old issues first and if you don't find your idea, make a new issue.
|
||||
|
||||
> What is your rclone version (eg output from `rclone -V`)
|
||||
Thanks
|
||||
|
||||
> Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
The Rclone Developers
|
||||
|
||||
> Which cloud storage system are you using? (eg Google Drive)
|
||||
-->
|
||||
|
||||
> The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
#### What is the problem you are having with rclone?
|
||||
|
||||
> A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
#### What is your rclone version (eg output from `rclone -V`)
|
||||
|
||||
|
||||
#### Which OS you are using and how many bits (eg Windows 7, 64 bit)
|
||||
|
||||
|
||||
#### Which cloud storage system are you using? (eg Google Drive)
|
||||
|
||||
|
||||
#### The command you were trying to run (eg `rclone copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
#### A log from the command with the `-vv` flag (eg output from `rclone -vv copy /tmp remote:tmp`)
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ Current active maintainers of rclone are
|
||||
* Stefan Breunig @breunigs
|
||||
* Ishuah Kariuki @ishuah
|
||||
* Remus Bunduc @remusb - cache subsystem maintainer
|
||||
* Fabian Möller @B4dM4n
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
|
||||
2436
MANUAL.html
2436
MANUAL.html
File diff suppressed because it is too large
Load Diff
3595
MANUAL.txt
3595
MANUAL.txt
File diff suppressed because it is too large
Load Diff
115
Makefile
115
Makefile
@@ -1,11 +1,22 @@
|
||||
SHELL = /bin/bash
|
||||
TAG := $(shell echo `git describe --abbrev=8 --tags`-`git rev-parse --abbrev-ref HEAD` | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/; s/-\(HEAD\|master\)$$//')
|
||||
SHELL = bash
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
TAG_BRANCH :=
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f", $$_)')
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
GO_LATEST := $(findstring go1.9,$(GO_VERSION))
|
||||
BETA_URL := https://beta.rclone.org/$(TAG)/
|
||||
# Run full tests if go >= go1.9
|
||||
FULL_TESTS := $(shell go version | perl -lne 'print "go$$1.$$2" if /go(\d+)\.(\d+)/ && ($$1 > 1 || $$2 >= 9)')
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||
# Pass in GOTAGS=xyz on the make command line to set build tags
|
||||
ifdef GOTAGS
|
||||
BUILDTAGS=-tags "$(GOTAGS)"
|
||||
@@ -20,11 +31,12 @@ rclone:
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo BRANCH="'$(BRANCH)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo GO_LATEST="'$(GO_LATEST)'"
|
||||
@echo FULL_TESTS="'$(FULL_TESTS)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
version:
|
||||
@@ -32,38 +44,53 @@ version:
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone
|
||||
-go test $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
|
||||
-cd fs && go run $(BUILDTAGS) test_all.go 2>&1 | tee test_all.log
|
||||
go install github.com/ncw/rclone/fstest/test_all
|
||||
-go test -v -count 1 $(BUILDTAGS) $(GO_FILES) 2>&1 | tee test.log
|
||||
-test_all github.com/ncw/rclone/fs/operations github.com/ncw/rclone/fs/sync 2>&1 | tee fs/test_all.log
|
||||
@echo "Written logs in test.log and fs/test_all.log"
|
||||
|
||||
# Quick test
|
||||
quicktest:
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) $(GO_FILES)
|
||||
ifdef GO_LATEST
|
||||
ifdef FULL_TESTS
|
||||
RCLONE_CONFIG="/notfound" go test $(BUILDTAGS) -cpu=2 -race $(GO_FILES)
|
||||
endif
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
ifdef GO_LATEST
|
||||
ifdef FULL_TESTS
|
||||
go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
errcheck $(BUILDTAGS) ./...
|
||||
find . -name \*.go | grep -v /vendor/ | xargs goimports -d | grep . ; test $$? -eq 1
|
||||
go list ./... | xargs -n1 golint | grep -E -v '(StorageUrl|CdnUrl)' ; test $$? -eq 1
|
||||
else
|
||||
@echo Skipping tests as not on Go stable
|
||||
@echo Skipping source quality tests as version of go too old
|
||||
endif
|
||||
|
||||
gometalinter_install:
|
||||
go get -u github.com/alecthomas/gometalinter
|
||||
gometalinter --install --update
|
||||
|
||||
# We aren't using gometalinter as the default linter yet because
|
||||
# 1. it doesn't support build tags: https://github.com/alecthomas/gometalinter/issues/275
|
||||
# 2. can't get -printfuncs working with the vet linter
|
||||
gometalinter:
|
||||
gometalinter ./...
|
||||
|
||||
# Get the build dependencies
|
||||
build_dep:
|
||||
ifdef GO_LATEST
|
||||
ifdef FULL_TESTS
|
||||
go get -u github.com/kisielk/errcheck
|
||||
go get -u golang.org/x/tools/cmd/goimports
|
||||
go get -u github.com/golang/lint/golint
|
||||
go get -u github.com/inconshreveable/mousetrap
|
||||
go get -u github.com/tools/godep
|
||||
endif
|
||||
|
||||
# Get the release dependencies
|
||||
release_dep:
|
||||
go get -u github.com/goreleaser/nfpm/...
|
||||
go get -u github.com/aktau/github-release
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
@@ -86,6 +113,9 @@ MANUAL.txt: MANUAL.md
|
||||
commanddocs: rclone
|
||||
rclone gendocs docs/content/commands/
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
|
||||
install: rclone
|
||||
install -d ${DESTDIR}/usr/bin
|
||||
install -t ${DESTDIR}/usr/bin ${GOPATH}/bin/rclone
|
||||
@@ -94,7 +124,7 @@ clean:
|
||||
go clean ./...
|
||||
find . -name \*~ | xargs -r rm -f
|
||||
rm -rf build docs/public
|
||||
rm -f rclone fs/fs.test fs/test_all.log test.log
|
||||
rm -f rclone fs/operations/operations.test fs/sync/sync.test fs/test_all.log test.log
|
||||
|
||||
website:
|
||||
cd docs && hugo
|
||||
@@ -102,8 +132,22 @@ website:
|
||||
upload_website: website
|
||||
rclone -v sync docs/public memstore:www-rclone-org
|
||||
|
||||
tarball:
|
||||
git archive -9 --format=tar.gz --prefix=rclone-$(TAG)/ -o build/rclone-$(TAG).tar.gz $(TAG)
|
||||
|
||||
sign_upload:
|
||||
cd build && md5sum rclone-v* | gpg --clearsign > MD5SUMS
|
||||
cd build && sha1sum rclone-v* | gpg --clearsign > SHA1SUMS
|
||||
cd build && sha256sum rclone-v* | gpg --clearsign > SHA256SUMS
|
||||
|
||||
check_sign:
|
||||
cd build && gpg --verify MD5SUMS && gpg --decrypt MD5SUMS | md5sum -c
|
||||
cd build && gpg --verify SHA1SUMS && gpg --decrypt SHA1SUMS | sha1sum -c
|
||||
cd build && gpg --verify SHA256SUMS && gpg --decrypt SHA256SUMS | sha256sum -c
|
||||
|
||||
upload:
|
||||
rclone -v copy build/ memstore:downloads-rclone-org
|
||||
rclone -v copy --exclude '*current*' build/ memstore:downloads-rclone-org/$(TAG)
|
||||
rclone -v copy --include '*current*' --include version.txt build/ memstore:downloads-rclone-org
|
||||
|
||||
upload_github:
|
||||
./bin/upload-github $(TAG)
|
||||
@@ -119,28 +163,40 @@ beta:
|
||||
log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
upload_beta:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ memstore:beta-rclone-org
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
compile_all:
|
||||
ifdef GO_LATEST
|
||||
ifdef FULL_TESTS
|
||||
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)β
|
||||
else
|
||||
@echo Skipping compile all as not on Go stable
|
||||
@echo Skipping compile all as version of go too old
|
||||
endif
|
||||
|
||||
appveyor_upload:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
endif
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt -exclude "^windows/" -parallel 8 $(BUILDTAGS) $(TAG)β
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ memstore:beta-rclone-org/$(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ memstore:beta-rclone-org
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)β
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
# Fetch the windows builds from appveyor
|
||||
fetch_windows:
|
||||
rclone -v copy --include 'rclone-v*-windows-*.zip' memstore:beta-rclone-org/$(TAG) build/
|
||||
rclone -v copy --include 'rclone-v*-windows-*.zip' $(BETA_UPLOAD) build/
|
||||
-#cp -av build/rclone-v*-windows-386.zip build/rclone-current-windows-386.zip
|
||||
-#cp -av build/rclone-v*-windows-amd64.zip build/rclone-current-windows-amd64.zip
|
||||
md5sum build/rclone-*-windows-*.zip | sort
|
||||
@@ -152,8 +208,8 @@ tag: doc
|
||||
@echo "Old tag is $(LAST_TAG)"
|
||||
@echo "New tag is $(NEW_TAG)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEW_TAG)" > docs/layouts/shortcodes/version.html
|
||||
git tag $(NEW_TAG)
|
||||
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
|
||||
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo " * $(NEW_TAG) -" `date -I` >> docs/content/changelog.md
|
||||
@git log $(LAST_TAG)..$(NEW_TAG) --oneline >> docs/content/changelog.md
|
||||
@@ -162,15 +218,12 @@ tag: doc
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
retag:
|
||||
git tag -f $(LAST_TAG)
|
||||
git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
|
||||
|
||||
startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
|
||||
|
||||
gen_tests:
|
||||
cd fstest/fstests && go generate
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
|
||||
@@ -25,8 +25,10 @@ Rclone is a command line program to sync files and directories to and from
|
||||
* Google Drive
|
||||
* HTTP
|
||||
* Hubic
|
||||
* Mega
|
||||
* Microsoft Azure Blob Storage
|
||||
* Microsoft OneDrive
|
||||
* OpenDrive
|
||||
* Openstack Swift / Rackspace cloud files / Memset Memstore / OVH / Oracle Cloud Storage
|
||||
* pCloud
|
||||
* QingStor
|
||||
|
||||
10
RELEASE.md
10
RELEASE.md
@@ -6,19 +6,24 @@ Making a release
|
||||
* git status - make sure everything is checked in
|
||||
* Check travis & appveyor builds are green
|
||||
* make check
|
||||
* make test
|
||||
* make test # see integration test server or run locally
|
||||
* make tag
|
||||
* edit docs/content/changelog.md
|
||||
* make doc
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX"
|
||||
* make retag
|
||||
* make release_dep
|
||||
* # Set the GOPATH for a current stable go compiler
|
||||
* make cross
|
||||
* git checkout docs/content/commands # to undo date changes in commands
|
||||
* git push --tags origin master
|
||||
* git push --tags origin master:stable # update the stable branch for packager.io
|
||||
* # Wait for the appveyor and travis builds to complete then fetch the windows binaries from appveyor
|
||||
* make fetch_windows
|
||||
* make tarball
|
||||
* make sign_upload
|
||||
* make check_sign
|
||||
* make upload
|
||||
* make upload_website
|
||||
* make upload_github
|
||||
@@ -26,10 +31,11 @@ Making a release
|
||||
* # announce with forum post, twitter post, G+ post
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
* Review any pinned packages in Gopkg.toml and remove if possible
|
||||
* make update
|
||||
* git status
|
||||
* git add new files
|
||||
* carry forward any patches to vendor stuff
|
||||
* git commit -a -v
|
||||
|
||||
Make the version number be just in a file?
|
||||
Make the version number be just in a file?
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package amazonclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/amazonclouddrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,76 +0,0 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package azureblob_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/azureblob"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*azureblob.Object)(nil))
|
||||
fstests.RemoteName = "TestAzureBlob:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
@@ -1,6 +0,0 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.7
|
||||
|
||||
package azureblob
|
||||
301
b2/api/types.go
301
b2/api/types.go
@@ -1,301 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Error describes a B2 error response
|
||||
type Error struct {
|
||||
Status int `json:"status"` // The numeric HTTP status code. Always matches the status in the HTTP response.
|
||||
Code string `json:"code"` // A single-identifier code that identifies the error.
|
||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||
}
|
||||
|
||||
// Fatal statisfies the Fatal interface
|
||||
//
|
||||
// It indicates which errors should be treated as fatal
|
||||
func (e *Error) Fatal() bool {
|
||||
return e.Status == 403 // 403 errors shouldn't be retried
|
||||
}
|
||||
|
||||
var _ fs.Fataler = (*Error)(nil)
|
||||
|
||||
// Account describes a B2 account
|
||||
type Account struct {
|
||||
ID string `json:"accountId"` // The identifier for the account.
|
||||
}
|
||||
|
||||
// Bucket describes a B2 bucket
|
||||
type Bucket struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
|
||||
// fits in a 64 bit integer such as the type "long" in the programming
|
||||
// language Java. It is intended to be compatible with Java's time
|
||||
// long. For example, it can be passed directly into the java call
|
||||
// Date.setTime(long time).
|
||||
type Timestamp time.Time
|
||||
|
||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
timestamp := (*time.Time)(t).UTC().UnixNano()
|
||||
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Timestamp
|
||||
func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
timestamp, err := strconv.ParseInt(string(data), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
|
||||
return nil
|
||||
}
|
||||
|
||||
const versionFormat = "-v2006-01-02-150405.000"
|
||||
|
||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := (time.Time)(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
}
|
||||
|
||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||
//
|
||||
// It returns the new file name and a timestamp, or the old filename
|
||||
// and a zero timestamp.
|
||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
newRemote = remote
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
if len(base) < len(versionFormat) {
|
||||
return
|
||||
}
|
||||
versionStart := len(base) - len(versionFormat)
|
||||
// Check it ends in -xxx
|
||||
if base[len(base)-4] != '-' {
|
||||
return
|
||||
}
|
||||
// Replace with .xxx for parsing
|
||||
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is unitialised
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return (time.Time)(t).IsZero()
|
||||
}
|
||||
|
||||
// Equal compares two timestamps
|
||||
//
|
||||
// If either are !IsZero then it returns false
|
||||
func (t Timestamp) Equal(s Timestamp) bool {
|
||||
if (time.Time)(t).IsZero() {
|
||||
return false
|
||||
}
|
||||
if (time.Time)(s).IsZero() {
|
||||
return false
|
||||
}
|
||||
return (time.Time)(t).Equal((time.Time)(s))
|
||||
}
|
||||
|
||||
// File is info about a file
|
||||
type File struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
Size int64 `json:"size"` // The number of bytes in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
}
|
||||
|
||||
// ListBucketsResponse is as returned from the b2_list_buckets call
|
||||
type ListBucketsResponse struct {
|
||||
Buckets []Bucket `json:"buckets"`
|
||||
}
|
||||
|
||||
// ListFileNamesRequest is as passed to b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesRequest struct {
|
||||
BucketID string `json:"bucketId"` // required - The bucket to look for file names in.
|
||||
StartFileName string `json:"startFileName,omitempty"` // optional - The first file name to return. If there is a file with this name, it will be returned in the list. If not, the first file name after this the first one after this name.
|
||||
MaxFileCount int `json:"maxFileCount,omitempty"` // optional - The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 1000.
|
||||
StartFileID string `json:"startFileId,omitempty"` // optional - What to pass in to startFileId for the next search to continue where this one left off.
|
||||
Prefix string `json:"prefix,omitempty"` // optional - Files returned will be limited to those with the given prefix. Defaults to the empty string, which matches all files.
|
||||
Delimiter string `json:"delimiter,omitempty"` // Files returned will be limited to those within the top folder, or any one subfolder. Defaults to NULL. Folder names will also be returned. The delimiter character will be used to "break" file names into folders.
|
||||
}
|
||||
|
||||
// ListFileNamesResponse is as received from b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesResponse struct {
|
||||
Files []File `json:"files"` // An array of objects, each one describing one file.
|
||||
NextFileName *string `json:"nextFileName"` // What to pass in to startFileName for the next search to continue where this one left off, or null if there are no more files.
|
||||
NextFileID *string `json:"nextFileId"` // What to pass in to startFileId for the next search to continue where this one left off, or null if there are no more files.
|
||||
}
|
||||
|
||||
// GetUploadURLRequest is passed to b2_get_upload_url
|
||||
type GetUploadURLRequest struct {
|
||||
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
|
||||
}
|
||||
|
||||
// GetUploadURLResponse is received from b2_get_upload_url
|
||||
type GetUploadURLResponse struct {
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_file.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
|
||||
}
|
||||
|
||||
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
|
||||
type FileInfo struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
AccountID string `json:"accountId"` // Your account ID.
|
||||
BucketID string `json:"bucketId"` // The bucket that the file is in.
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// CreateBucketRequest is used to create a bucket
|
||||
type CreateBucketRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// DeleteBucketRequest is used to create a bucket
|
||||
type DeleteBucketRequest struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
}
|
||||
|
||||
// DeleteFileRequest is used to delete a file version
|
||||
type DeleteFileRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
}
|
||||
|
||||
// HideFileRequest is used to delete a file
|
||||
type HideFileRequest struct {
|
||||
BucketID string `json:"bucketId"` // The bucket containing the file to hide.
|
||||
Name string `json:"fileName"` // The name of the file to hide.
|
||||
}
|
||||
|
||||
// GetFileInfoRequest is used to return a FileInfo struct with b2_get_file_info
|
||||
type GetFileInfoRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
}
|
||||
|
||||
// StartLargeFileRequest (b2_start_large_file) Prepares for uploading the parts of a large file.
|
||||
//
|
||||
// If the original source of the file being uploaded has a last
|
||||
// modified time concept, Backblaze recommends using
|
||||
// src_last_modified_millis as the name, and a string holding the base
|
||||
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||
// programming language Java. It is intended to be compatible with
|
||||
// Java's time long. For example, it can be passed directly into the
|
||||
// Java call Date.setTime(long time).
|
||||
//
|
||||
// If the caller knows the SHA1 of the entire large file being
|
||||
// uploaded, Backblaze recommends using large_file_sha1 as the name,
|
||||
// and a 40 byte hex string representing the SHA1.
|
||||
//
|
||||
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
||||
type StartLargeFileRequest struct {
|
||||
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
|
||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||
}
|
||||
|
||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||
type StartLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||
type GetUploadPartURLRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLResponse is received from b2_get_upload_url
|
||||
type GetUploadPartURLResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_part.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_part.
|
||||
}
|
||||
|
||||
// UploadPartResponse is the response to b2_upload_part
|
||||
type UploadPartResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
}
|
||||
|
||||
// FinishLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a FileInfo object (with extra AccountID and BucketID fields which we ignore).
|
||||
//
|
||||
// Large files do not have a SHA1 checksum. The value will always be "none".
|
||||
type FinishLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
SHA1s []string `json:"partSha1Array"` // A JSON array of hex SHA1 checksums of the parts of the large file. This is a double-check that the right parts were uploaded in the right order, and that none were missed. Note that the part numbers start at 1, and the SHA1 of the part 1 is the first string in the array, at index 0.
|
||||
}
|
||||
|
||||
// CancelLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a CancelLargeFileResponse
|
||||
type CancelLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// CancelLargeFileResponse is the response to CancelLargeFileRequest
|
||||
type CancelLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
// Test B2 filesystem interface
|
||||
//
|
||||
// Automatically generated - DO NOT EDIT
|
||||
// Regenerate with: make gen_tests
|
||||
package b2_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/b2"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
func TestSetup(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*b2.Object)(nil))
|
||||
fstests.RemoteName = "TestB2:"
|
||||
}
|
||||
|
||||
// Generic tests for the Fs
|
||||
func TestInit(t *testing.T) { fstests.TestInit(t) }
|
||||
func TestFsString(t *testing.T) { fstests.TestFsString(t) }
|
||||
func TestFsName(t *testing.T) { fstests.TestFsName(t) }
|
||||
func TestFsRoot(t *testing.T) { fstests.TestFsRoot(t) }
|
||||
func TestFsRmdirEmpty(t *testing.T) { fstests.TestFsRmdirEmpty(t) }
|
||||
func TestFsRmdirNotFound(t *testing.T) { fstests.TestFsRmdirNotFound(t) }
|
||||
func TestFsMkdir(t *testing.T) { fstests.TestFsMkdir(t) }
|
||||
func TestFsMkdirRmdirSubdir(t *testing.T) { fstests.TestFsMkdirRmdirSubdir(t) }
|
||||
func TestFsListEmpty(t *testing.T) { fstests.TestFsListEmpty(t) }
|
||||
func TestFsListDirEmpty(t *testing.T) { fstests.TestFsListDirEmpty(t) }
|
||||
func TestFsListRDirEmpty(t *testing.T) { fstests.TestFsListRDirEmpty(t) }
|
||||
func TestFsNewObjectNotFound(t *testing.T) { fstests.TestFsNewObjectNotFound(t) }
|
||||
func TestFsPutFile1(t *testing.T) { fstests.TestFsPutFile1(t) }
|
||||
func TestFsPutError(t *testing.T) { fstests.TestFsPutError(t) }
|
||||
func TestFsPutFile2(t *testing.T) { fstests.TestFsPutFile2(t) }
|
||||
func TestFsUpdateFile1(t *testing.T) { fstests.TestFsUpdateFile1(t) }
|
||||
func TestFsListDirFile2(t *testing.T) { fstests.TestFsListDirFile2(t) }
|
||||
func TestFsListRDirFile2(t *testing.T) { fstests.TestFsListRDirFile2(t) }
|
||||
func TestFsListDirRoot(t *testing.T) { fstests.TestFsListDirRoot(t) }
|
||||
func TestFsListRDirRoot(t *testing.T) { fstests.TestFsListRDirRoot(t) }
|
||||
func TestFsListSubdir(t *testing.T) { fstests.TestFsListSubdir(t) }
|
||||
func TestFsListRSubdir(t *testing.T) { fstests.TestFsListRSubdir(t) }
|
||||
func TestFsListLevel2(t *testing.T) { fstests.TestFsListLevel2(t) }
|
||||
func TestFsListRLevel2(t *testing.T) { fstests.TestFsListRLevel2(t) }
|
||||
func TestFsListFile1(t *testing.T) { fstests.TestFsListFile1(t) }
|
||||
func TestFsNewObject(t *testing.T) { fstests.TestFsNewObject(t) }
|
||||
func TestFsListFile1and2(t *testing.T) { fstests.TestFsListFile1and2(t) }
|
||||
func TestFsNewObjectDir(t *testing.T) { fstests.TestFsNewObjectDir(t) }
|
||||
func TestFsCopy(t *testing.T) { fstests.TestFsCopy(t) }
|
||||
func TestFsMove(t *testing.T) { fstests.TestFsMove(t) }
|
||||
func TestFsDirMove(t *testing.T) { fstests.TestFsDirMove(t) }
|
||||
func TestFsRmdirFull(t *testing.T) { fstests.TestFsRmdirFull(t) }
|
||||
func TestFsPrecision(t *testing.T) { fstests.TestFsPrecision(t) }
|
||||
func TestFsDirChangeNotify(t *testing.T) { fstests.TestFsDirChangeNotify(t) }
|
||||
func TestObjectString(t *testing.T) { fstests.TestObjectString(t) }
|
||||
func TestObjectFs(t *testing.T) { fstests.TestObjectFs(t) }
|
||||
func TestObjectRemote(t *testing.T) { fstests.TestObjectRemote(t) }
|
||||
func TestObjectHashes(t *testing.T) { fstests.TestObjectHashes(t) }
|
||||
func TestObjectModTime(t *testing.T) { fstests.TestObjectModTime(t) }
|
||||
func TestObjectMimeType(t *testing.T) { fstests.TestObjectMimeType(t) }
|
||||
func TestObjectSetModTime(t *testing.T) { fstests.TestObjectSetModTime(t) }
|
||||
func TestObjectSize(t *testing.T) { fstests.TestObjectSize(t) }
|
||||
func TestObjectOpen(t *testing.T) { fstests.TestObjectOpen(t) }
|
||||
func TestObjectOpenSeek(t *testing.T) { fstests.TestObjectOpenSeek(t) }
|
||||
func TestObjectPartialRead(t *testing.T) { fstests.TestObjectPartialRead(t) }
|
||||
func TestObjectUpdate(t *testing.T) { fstests.TestObjectUpdate(t) }
|
||||
func TestObjectStorable(t *testing.T) { fstests.TestObjectStorable(t) }
|
||||
func TestFsIsFile(t *testing.T) { fstests.TestFsIsFile(t) }
|
||||
func TestFsIsFileNotFound(t *testing.T) { fstests.TestFsIsFileNotFound(t) }
|
||||
func TestObjectRemove(t *testing.T) { fstests.TestObjectRemove(t) }
|
||||
func TestFsPutStream(t *testing.T) { fstests.TestFsPutStream(t) }
|
||||
func TestObjectPurge(t *testing.T) { fstests.TestObjectPurge(t) }
|
||||
func TestFinalise(t *testing.T) { fstests.TestFinalise(t) }
|
||||
425
b2/upload.go
425
b2/upload.go
@@ -1,425 +0,0 @@
|
||||
// Upload large files for b2
|
||||
//
|
||||
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type hashAppendingReader struct {
|
||||
h hash.Hash
|
||||
in io.Reader
|
||||
hexSum string
|
||||
hexReader io.Reader
|
||||
}
|
||||
|
||||
// Read returns bytes all bytes from the original reader, then the hex sum
|
||||
// of what was read so far, then EOF.
|
||||
func (har *hashAppendingReader) Read(b []byte) (int, error) {
|
||||
if har.hexReader == nil {
|
||||
n, err := har.in.Read(b)
|
||||
if err == io.EOF {
|
||||
har.in = nil // allow GC
|
||||
err = nil // allow reading hexSum before EOF
|
||||
|
||||
har.hexSum = hex.EncodeToString(har.h.Sum(nil))
|
||||
har.hexReader = strings.NewReader(har.hexSum)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
return har.hexReader.Read(b)
|
||||
}
|
||||
|
||||
// AdditionalLength returns how many bytes the appended hex sum will take up.
|
||||
func (har *hashAppendingReader) AdditionalLength() int {
|
||||
return hex.EncodedLen(har.h.Size())
|
||||
}
|
||||
|
||||
// HexSum returns the hash sum as hex. It's only available after the original
|
||||
// reader has EOF'd. It's an empty string before that.
|
||||
func (har *hashAppendingReader) HexSum() string {
|
||||
return har.hexSum
|
||||
}
|
||||
|
||||
// newHashAppendingReader takes a Reader and a Hash and will append the hex sum
|
||||
// after the original reader reaches EOF. The increased size depends on the
|
||||
// given hash, which may be queried through AdditionalLength()
|
||||
func newHashAppendingReader(in io.Reader, h hash.Hash) *hashAppendingReader {
|
||||
withHash := io.TeeReader(in, h)
|
||||
return &hashAppendingReader{h: h, in: withHash}
|
||||
}
|
||||
|
||||
// largeUpload is used to control the upload of large files which need chunking
|
||||
type largeUpload struct {
|
||||
f *Fs // parent Fs
|
||||
o *Object // object being uploaded
|
||||
in io.Reader // read the data from here
|
||||
id string // ID of the file being uploaded
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts, if known
|
||||
sha1s []string // slice of SHA1s for each part
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
size := src.Size()
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize))
|
||||
} else {
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
}
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
modTime := src.ModTime()
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: o.fs.root + remote,
|
||||
ContentType: fs.MimeType(src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
},
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if calculatedSha1, err := src.Hash(fs.HashSHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
up = &largeUpload{
|
||||
f: f,
|
||||
o: o,
|
||||
in: in,
|
||||
id: response.ID,
|
||||
size: size,
|
||||
parts: parts,
|
||||
sha1s: make([]string, sha1SliceSize),
|
||||
}
|
||||
return up, nil
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
|
||||
up.uploadMu.Lock()
|
||||
defer up.uploadMu.Unlock()
|
||||
if len(up.uploads) == 0 {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_get_upload_part_url",
|
||||
}
|
||||
var request = api.GetUploadPartURLRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
}
|
||||
} else {
|
||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||
}
|
||||
return upload, nil
|
||||
}
|
||||
|
||||
// returnUploadURL returns the UploadURL to the cache
|
||||
func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
||||
if upload == nil {
|
||||
return
|
||||
}
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = append(up.uploads, upload)
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// clearUploadURL clears the current UploadURL and the AuthorizationToken
|
||||
func (up *largeUpload) clearUploadURL() {
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = nil
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
|
||||
size := int64(len(body)) + int64(in.AdditionalLength())
|
||||
|
||||
// Authorization
|
||||
//
|
||||
// An upload authorization token, from b2_get_upload_part_url.
|
||||
//
|
||||
// X-Bz-Part-Number
|
||||
//
|
||||
// A number from 1 to 10000. The parts uploaded for one file
|
||||
// must have contiguous numbers, starting with 1.
|
||||
//
|
||||
// Content-Length
|
||||
//
|
||||
// The number of bytes in the file being uploaded. Note that
|
||||
// this header is required; you cannot leave it out and just
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100MB.
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
//
|
||||
// The SHA1 checksum of the this part of the file. B2 will
|
||||
// check this when the part is uploaded, to make sure that the
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// passed to b2_finish_large_file.
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: upload.UploadURL,
|
||||
Body: fs.AccountPart(up.o, in),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||
sha1Header: "hex_digits_at_end",
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
|
||||
var response api.UploadPartResponse
|
||||
|
||||
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(resp, err)
|
||||
// On retryable error clear PartUploadURL
|
||||
if retry {
|
||||
fs.Debugf(up.o, "Clearing part upload URL because of error: %v", err)
|
||||
upload = nil
|
||||
}
|
||||
up.returnUploadURL(upload)
|
||||
up.sha1s[part-1] = in.HexSum()
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
} else {
|
||||
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish() error {
|
||||
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_finish_large_file",
|
||||
}
|
||||
var request = api.FinishLargeFileRequest{
|
||||
ID: up.id,
|
||||
SHA1s: up.sha1s,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_cancel_large_file",
|
||||
}
|
||||
var request = api.CancelLargeFileRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
var response api.CancelLargeFileResponse
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||
wg.Add(1)
|
||||
go func(part int64, buf []byte) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(part, buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}(part, buf)
|
||||
}
|
||||
|
||||
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
||||
cancelErr := up.cancel()
|
||||
if cancelErr != nil {
|
||||
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return up.finish()
|
||||
}
|
||||
|
||||
// Stream uploads the chunks from the input, starting with a required initial
|
||||
// chunk. Assumes the file size is unknown and will upload until the input
|
||||
// reaches EOF.
|
||||
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
|
||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||
errs := make(chan error, 1)
|
||||
hasMoreParts := true
|
||||
var wg sync.WaitGroup
|
||||
fs.AccountByPart(up.o) // Cancel whole file accounting before reading
|
||||
|
||||
// Transfer initial chunk
|
||||
up.size = int64(len(initialUploadBlock))
|
||||
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
|
||||
|
||||
outer:
|
||||
for part := int64(2); hasMoreParts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()
|
||||
|
||||
// Read the chunk
|
||||
n, err := io.ReadFull(up.in, buf)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
buf = buf[:n]
|
||||
hasMoreParts = false
|
||||
err = nil
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||
up.f.putUploadBlock(buf)
|
||||
hasMoreParts = false
|
||||
err = nil
|
||||
break outer
|
||||
} else if err != nil {
|
||||
// other kinds of errors indicate failure
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Keep stats up to date
|
||||
up.parts = part
|
||||
up.size += int64(n)
|
||||
if part > maxParts {
|
||||
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
}
|
||||
wg.Wait()
|
||||
up.sha1s = up.sha1s[:up.parts]
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload() error {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||
remaining := up.size
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
var err error
|
||||
fs.AccountByPart(up.o) // Cancel whole file accounting before reading
|
||||
outer:
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()[:reqSize]
|
||||
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
remaining -= reqSize
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
}
|
||||
45
backend/alias/alias.go
Normal file
45
backend/alias/alias.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package alias
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "alias",
|
||||
Description: "Alias for a existing remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
Help: "Remote or path to alias.\nCan be \"myremote:path/to/dir\", \"myremote:bucket\", \"myremote:\" or \"/local/path\".",
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path.
|
||||
//
|
||||
// The returned Fs is the actual Fs, referenced by remote in the config
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
remote := config.FileGet(name, "remote")
|
||||
if remote == "" {
|
||||
return nil, errors.New("alias can't point to an empty remote - check the value of the remote setting")
|
||||
}
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point alias remote at itself - check the value of the remote setting")
|
||||
}
|
||||
fsInfo, configName, fsPath, err := fs.ParseRemote(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
root = filepath.ToSlash(root)
|
||||
return fsInfo.NewFs(configName, path.Join(fsPath, root))
|
||||
}
|
||||
104
backend/alias/alias_internal_test.go
Normal file
104
backend/alias/alias_internal_test.go
Normal file
@@ -0,0 +1,104 @@
|
||||
package alias
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/local" // pull in test backend
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var (
|
||||
remoteName = "TestAlias"
|
||||
)
|
||||
|
||||
func prepare(t *testing.T, root string) {
|
||||
config.LoadConfig()
|
||||
|
||||
// Configure the remote
|
||||
config.FileSet(remoteName, "type", "alias")
|
||||
config.FileSet(remoteName, "remote", root)
|
||||
}
|
||||
|
||||
func TestNewFS(t *testing.T) {
|
||||
type testEntry struct {
|
||||
remote string
|
||||
size int64
|
||||
isDir bool
|
||||
}
|
||||
for testi, test := range []struct {
|
||||
remoteRoot string
|
||||
fsRoot string
|
||||
fsList string
|
||||
wantOK bool
|
||||
entries []testEntry
|
||||
}{
|
||||
{"", "", "", true, []testEntry{
|
||||
{"four", -1, true},
|
||||
{"one%.txt", 6, false},
|
||||
{"three", -1, true},
|
||||
{"two.html", 7, false},
|
||||
}},
|
||||
{"", "four", "", true, []testEntry{
|
||||
{"five", -1, true},
|
||||
{"under four.txt", 9, false},
|
||||
}},
|
||||
{"", "", "four", true, []testEntry{
|
||||
{"four/five", -1, true},
|
||||
{"four/under four.txt", 9, false},
|
||||
}},
|
||||
{"four", "..", "", true, []testEntry{
|
||||
{"four", -1, true},
|
||||
{"one%.txt", 6, false},
|
||||
{"three", -1, true},
|
||||
{"two.html", 7, false},
|
||||
}},
|
||||
{"four", "../three", "", true, []testEntry{
|
||||
{"underthree.txt", 9, false},
|
||||
}},
|
||||
} {
|
||||
what := fmt.Sprintf("test %d remoteRoot=%q, fsRoot=%q, fsList=%q", testi, test.remoteRoot, test.fsRoot, test.fsList)
|
||||
|
||||
remoteRoot, err := filepath.Abs(filepath.FromSlash(path.Join("test/files", test.remoteRoot)))
|
||||
require.NoError(t, err, what)
|
||||
prepare(t, remoteRoot)
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||
require.NoError(t, err, what)
|
||||
gotEntries, err := f.List(test.fsList)
|
||||
require.NoError(t, err, what)
|
||||
|
||||
sort.Sort(gotEntries)
|
||||
|
||||
require.Equal(t, len(test.entries), len(gotEntries), what)
|
||||
for i, gotEntry := range gotEntries {
|
||||
what := fmt.Sprintf("%s, entry=%d", what, i)
|
||||
wantEntry := test.entries[i]
|
||||
|
||||
require.Equal(t, wantEntry.remote, gotEntry.Remote(), what)
|
||||
require.Equal(t, wantEntry.size, int64(gotEntry.Size()), what)
|
||||
_, isDir := gotEntry.(fs.Directory)
|
||||
require.Equal(t, wantEntry.isDir, isDir, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFSNoRemote(t *testing.T) {
|
||||
prepare(t, "")
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, f)
|
||||
}
|
||||
|
||||
func TestNewFSInvalidRemote(t *testing.T) {
|
||||
prepare(t, "not_existing_test_remote:")
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:", remoteName))
|
||||
|
||||
require.Error(t, err)
|
||||
require.Nil(t, f)
|
||||
}
|
||||
1
backend/alias/test/files/four/five/underfive.txt
Normal file
1
backend/alias/test/files/four/five/underfive.txt
Normal file
@@ -0,0 +1 @@
|
||||
apple
|
||||
29
backend/all/all.go
Normal file
29
backend/all/all.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package all
|
||||
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/backend/alias"
|
||||
_ "github.com/ncw/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/backend/azureblob"
|
||||
_ "github.com/ncw/rclone/backend/b2"
|
||||
_ "github.com/ncw/rclone/backend/box"
|
||||
_ "github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
_ "github.com/ncw/rclone/backend/dropbox"
|
||||
_ "github.com/ncw/rclone/backend/ftp"
|
||||
_ "github.com/ncw/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/backend/http"
|
||||
_ "github.com/ncw/rclone/backend/hubic"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/mega"
|
||||
_ "github.com/ncw/rclone/backend/onedrive"
|
||||
_ "github.com/ncw/rclone/backend/opendrive"
|
||||
_ "github.com/ncw/rclone/backend/pcloud"
|
||||
_ "github.com/ncw/rclone/backend/qingstor"
|
||||
_ "github.com/ncw/rclone/backend/s3"
|
||||
_ "github.com/ncw/rclone/backend/sftp"
|
||||
_ "github.com/ncw/rclone/backend/swift"
|
||||
_ "github.com/ncw/rclone/backend/webdav"
|
||||
_ "github.com/ncw/rclone/backend/yandex"
|
||||
)
|
||||
@@ -18,16 +18,20 @@ import (
|
||||
"log"
|
||||
"net/http"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/dircache"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/ncw/rclone/pacer"
|
||||
"github.com/ncw/rclone/rest"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
@@ -35,7 +39,6 @@ import (
|
||||
const (
|
||||
folderKind = "FOLDER"
|
||||
fileKind = "FILE"
|
||||
assetKind = "ASSET"
|
||||
statusAvailable = "AVAILABLE"
|
||||
timeFormat = time.RFC3339 // 2014-03-07T22:31:12.173Z
|
||||
minSleep = 20 * time.Millisecond
|
||||
@@ -46,7 +49,7 @@ const (
|
||||
var (
|
||||
// Flags
|
||||
tempLinkThreshold = fs.SizeSuffix(9 << 30) // Download files bigger than this via the tempLink
|
||||
uploadWaitPerGB = fs.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
|
||||
uploadWaitPerGB = flags.DurationP("acd-upload-wait-per-gb", "", 180*time.Second, "Additional time per GB to wait after a failed complete upload to see if it appears.")
|
||||
// Description of how to auth for this app
|
||||
acdConfig = &oauth2.Config{
|
||||
Scopes: []string{"clouddrive:read_all", "clouddrive:write"},
|
||||
@@ -73,20 +76,20 @@ func init() {
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigClientID,
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Amazon Application Client Id - required.",
|
||||
}, {
|
||||
Name: fs.ConfigClientSecret,
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Amazon Application Client Secret - required.",
|
||||
}, {
|
||||
Name: fs.ConfigAuthURL,
|
||||
Name: config.ConfigAuthURL,
|
||||
Help: "Auth server URL - leave blank to use Amazon's.",
|
||||
}, {
|
||||
Name: fs.ConfigTokenURL,
|
||||
Name: config.ConfigTokenURL,
|
||||
Help: "Token server url - leave blank to use Amazon's.",
|
||||
}},
|
||||
})
|
||||
fs.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
|
||||
flags.VarP(&tempLinkThreshold, "acd-templink-threshold", "", "Files >= this size will be downloaded via their tempLink.")
|
||||
}
|
||||
|
||||
// Fs represents a remote acd server
|
||||
@@ -133,9 +136,6 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a acd path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an acd 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
@@ -171,7 +171,7 @@ func (f *Fs) shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return true, err
|
||||
}
|
||||
}
|
||||
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// If query parameters contain X-Amz-Algorithm remove Authorization header
|
||||
@@ -193,7 +193,7 @@ func filterRequest(req *http.Request) {
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
root = parsePath(root)
|
||||
baseClient := fs.Config.Client()
|
||||
baseClient := fshttp.NewClient(fs.Config)
|
||||
if do, ok := baseClient.Transport.(interface {
|
||||
SetRequestFilter(f func(req *http.Request))
|
||||
}); ok {
|
||||
@@ -212,7 +212,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
root: root,
|
||||
c: c,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.AmazonCloudDrivePacer),
|
||||
noAuthClient: fs.Config.Client(),
|
||||
noAuthClient: fshttp.NewClient(fs.Config),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -472,7 +472,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if iErr != nil {
|
||||
return nil, iErr
|
||||
}
|
||||
if fs.IsRetryError(err) {
|
||||
if fserrors.IsRetryError(err) {
|
||||
fs.Debugf(f, "Directory listing error for %q: %v - low level retry %d/%d", dir, err, tries, maxTries)
|
||||
continue
|
||||
}
|
||||
@@ -875,8 +875,8 @@ func (f *Fs) Precision() time.Duration {
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() fs.HashSet {
|
||||
return fs.HashSet(fs.HashMD5)
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
@@ -932,9 +932,9 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t fs.HashType) (string, error) {
|
||||
if t != fs.HashMD5 {
|
||||
return "", fs.ErrHashUnsupported
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if o.info.ContentProperties != nil && o.info.ContentProperties.Md5 != nil {
|
||||
return *o.info.ContentProperties.Md5, nil
|
||||
@@ -1201,6 +1201,128 @@ func (o *Object) MimeType() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path that has had changes.
|
||||
// If the implementation uses polling, it should adhere to the given interval.
|
||||
//
|
||||
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
checkpoint := config.FileGet(f.name, "checkpoint")
|
||||
|
||||
quit := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
checkpoint = f.changeNotifyRunner(notifyFunc, checkpoint)
|
||||
if err := config.SetValueAndSave(f.name, "checkpoint", checkpoint); err != nil {
|
||||
fs.Debugf(f, "Unable to save checkpoint: %v", err)
|
||||
}
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
case <-time.After(pollInterval):
|
||||
}
|
||||
}
|
||||
}()
|
||||
return quit
|
||||
}
|
||||
|
||||
func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoint string) string {
|
||||
var err error
|
||||
var resp *http.Response
|
||||
var reachedEnd bool
|
||||
var csCount int
|
||||
var nodeCount int
|
||||
|
||||
fs.Debugf(f, "Checking for changes on remote (Checkpoint %q)", checkpoint)
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = f.c.Changes.GetChangesFunc(&acd.ChangesOptions{
|
||||
Checkpoint: checkpoint,
|
||||
IncludePurged: true,
|
||||
}, func(changeSet *acd.ChangeSet, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
type entryType struct {
|
||||
path string
|
||||
entryType fs.EntryType
|
||||
}
|
||||
var pathsToClear []entryType
|
||||
csCount++
|
||||
nodeCount += len(changeSet.Nodes)
|
||||
if changeSet.End {
|
||||
reachedEnd = true
|
||||
}
|
||||
if changeSet.Checkpoint != "" {
|
||||
checkpoint = changeSet.Checkpoint
|
||||
}
|
||||
for _, node := range changeSet.Nodes {
|
||||
if path, ok := f.dirCache.GetInv(*node.Id); ok {
|
||||
if node.IsFile() {
|
||||
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
|
||||
} else {
|
||||
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryDirectory})
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if node.IsFile() {
|
||||
// translate the parent dir of this object
|
||||
if len(node.Parents) > 0 {
|
||||
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
|
||||
// and append the drive file name to compute the full file name
|
||||
if len(path) > 0 {
|
||||
path = path + "/" + *node.Name
|
||||
} else {
|
||||
path = *node.Name
|
||||
}
|
||||
// this will now clear the actual file too
|
||||
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
|
||||
}
|
||||
} else { // a true root object that is changed
|
||||
pathsToClear = append(pathsToClear, entryType{path: *node.Name, entryType: fs.EntryObject})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
visitedPaths := make(map[string]bool)
|
||||
for _, entry := range pathsToClear {
|
||||
if _, ok := visitedPaths[entry.path]; ok {
|
||||
continue
|
||||
}
|
||||
visitedPaths[entry.path] = true
|
||||
notifyFunc(entry.path, entry.entryType)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
return false, err
|
||||
})
|
||||
fs.Debugf(f, "Got %d ChangeSets with %d Nodes", csCount, nodeCount)
|
||||
|
||||
if err != nil && err != io.ErrUnexpectedEOF {
|
||||
fs.Debugf(f, "Failed to get Changes: %v", err)
|
||||
return checkpoint
|
||||
}
|
||||
|
||||
if reachedEnd {
|
||||
reachedEnd = false
|
||||
fs.Debugf(f, "All changes were processed. Waiting for more.")
|
||||
} else if checkpoint == "" {
|
||||
fs.Debugf(f, "Did not get any checkpoint, something went wrong! %+v", resp)
|
||||
}
|
||||
return checkpoint
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
if o.info.Id == nil {
|
||||
return ""
|
||||
}
|
||||
return *o.info.Id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1209,6 +1331,8 @@ var (
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
20
backend/amazonclouddrive/amazonclouddrive_test.go
Normal file
20
backend/amazonclouddrive/amazonclouddrive_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Test AmazonCloudDrive filesystem interface
|
||||
|
||||
// +build acd
|
||||
|
||||
package amazonclouddrive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/amazonclouddrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.NilObject = fs.Object((*amazonclouddrive.Object)(nil))
|
||||
fstests.RemoteName = "TestAmazonCloudDrive:"
|
||||
fstests.Run(t)
|
||||
}
|
||||
1181
backend/azureblob/azureblob.go
Normal file
1181
backend/azureblob/azureblob.go
Normal file
File diff suppressed because it is too large
Load Diff
20
backend/azureblob/azureblob_test.go
Normal file
20
backend/azureblob/azureblob_test.go
Normal file
@@ -0,0 +1,20 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !freebsd,!netbsd,!openbsd,!plan9,!solaris,go1.8
|
||||
|
||||
package azureblob_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/azureblob"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestAzureBlob:",
|
||||
NilObject: (*azureblob.Object)(nil),
|
||||
})
|
||||
}
|
||||
6
backend/azureblob/azureblob_unsupported.go
Normal file
6
backend/azureblob/azureblob_unsupported.go
Normal file
@@ -0,0 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build freebsd netbsd openbsd plan9 solaris !go1.8
|
||||
|
||||
package azureblob
|
||||
301
backend/b2/api/types.go
Normal file
301
backend/b2/api/types.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
// Error describes a B2 error response
|
||||
type Error struct {
|
||||
Status int `json:"status"` // The numeric HTTP status code. Always matches the status in the HTTP response.
|
||||
Code string `json:"code"` // A single-identifier code that identifies the error.
|
||||
Message string `json:"message"` // A human-readable message, in English, saying what went wrong.
|
||||
}
|
||||
|
||||
// Error statisfies the error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Message, e.Status, e.Code)
|
||||
}
|
||||
|
||||
// Fatal statisfies the Fatal interface
|
||||
//
|
||||
// It indicates which errors should be treated as fatal
|
||||
func (e *Error) Fatal() bool {
|
||||
return e.Status == 403 // 403 errors shouldn't be retried
|
||||
}
|
||||
|
||||
var _ fserrors.Fataler = (*Error)(nil)
|
||||
|
||||
// Account describes a B2 account
|
||||
type Account struct {
|
||||
ID string `json:"accountId"` // The identifier for the account.
|
||||
}
|
||||
|
||||
// Bucket describes a B2 bucket
|
||||
type Bucket struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// Timestamp is a UTC time when this file was uploaded. It is a base
|
||||
// 10 number of milliseconds since midnight, January 1, 1970 UTC. This
|
||||
// fits in a 64 bit integer such as the type "long" in the programming
|
||||
// language Java. It is intended to be compatible with Java's time
|
||||
// long. For example, it can be passed directly into the java call
|
||||
// Date.setTime(long time).
|
||||
type Timestamp time.Time
|
||||
|
||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
timestamp := (*time.Time)(t).UTC().UnixNano()
|
||||
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Timestamp
|
||||
func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
timestamp, err := strconv.ParseInt(string(data), 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
|
||||
return nil
|
||||
}
|
||||
|
||||
const versionFormat = "-v2006-01-02-150405.000"
|
||||
|
||||
// AddVersion adds the timestamp as a version string into the filename passed in.
|
||||
func (t Timestamp) AddVersion(remote string) string {
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
s := (time.Time)(t).Format(versionFormat)
|
||||
// Replace the '.' with a '-'
|
||||
s = strings.Replace(s, ".", "-", -1)
|
||||
return base + s + ext
|
||||
}
|
||||
|
||||
// RemoveVersion removes the timestamp from a filename as a version string.
|
||||
//
|
||||
// It returns the new file name and a timestamp, or the old filename
|
||||
// and a zero timestamp.
|
||||
func RemoveVersion(remote string) (t Timestamp, newRemote string) {
|
||||
newRemote = remote
|
||||
ext := path.Ext(remote)
|
||||
base := remote[:len(remote)-len(ext)]
|
||||
if len(base) < len(versionFormat) {
|
||||
return
|
||||
}
|
||||
versionStart := len(base) - len(versionFormat)
|
||||
// Check it ends in -xxx
|
||||
if base[len(base)-4] != '-' {
|
||||
return
|
||||
}
|
||||
// Replace with .xxx for parsing
|
||||
base = base[:len(base)-4] + "." + base[len(base)-3:]
|
||||
newT, err := time.Parse(versionFormat, base[versionStart:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return Timestamp(newT), base[:versionStart] + ext
|
||||
}
|
||||
|
||||
// IsZero returns true if the timestamp is unitialised
|
||||
func (t Timestamp) IsZero() bool {
|
||||
return (time.Time)(t).IsZero()
|
||||
}
|
||||
|
||||
// Equal compares two timestamps
|
||||
//
|
||||
// If either are !IsZero then it returns false
|
||||
func (t Timestamp) Equal(s Timestamp) bool {
|
||||
if (time.Time)(t).IsZero() {
|
||||
return false
|
||||
}
|
||||
if (time.Time)(s).IsZero() {
|
||||
return false
|
||||
}
|
||||
return (time.Time)(t).Equal((time.Time)(s))
|
||||
}
|
||||
|
||||
// File is info about a file
|
||||
type File struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
Size int64 `json:"size"` // The number of bytes in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// AuthorizeAccountResponse is as returned from the b2_authorize_account call
|
||||
type AuthorizeAccountResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
AuthorizationToken string `json:"authorizationToken"` // An authorization token to use with all calls, other than b2_authorize_account, that need an Authorization header.
|
||||
APIURL string `json:"apiUrl"` // The base URL to use for all API calls except for uploading and downloading files.
|
||||
DownloadURL string `json:"downloadUrl"` // The base URL to use for downloading files.
|
||||
}
|
||||
|
||||
// ListBucketsResponse is as returned from the b2_list_buckets call
|
||||
type ListBucketsResponse struct {
|
||||
Buckets []Bucket `json:"buckets"`
|
||||
}
|
||||
|
||||
// ListFileNamesRequest is as passed to b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesRequest struct {
|
||||
BucketID string `json:"bucketId"` // required - The bucket to look for file names in.
|
||||
StartFileName string `json:"startFileName,omitempty"` // optional - The first file name to return. If there is a file with this name, it will be returned in the list. If not, the first file name after this the first one after this name.
|
||||
MaxFileCount int `json:"maxFileCount,omitempty"` // optional - The maximum number of files to return from this call. The default value is 100, and the maximum allowed is 1000.
|
||||
StartFileID string `json:"startFileId,omitempty"` // optional - What to pass in to startFileId for the next search to continue where this one left off.
|
||||
Prefix string `json:"prefix,omitempty"` // optional - Files returned will be limited to those with the given prefix. Defaults to the empty string, which matches all files.
|
||||
Delimiter string `json:"delimiter,omitempty"` // Files returned will be limited to those within the top folder, or any one subfolder. Defaults to NULL. Folder names will also be returned. The delimiter character will be used to "break" file names into folders.
|
||||
}
|
||||
|
||||
// ListFileNamesResponse is as received from b2_list_file_names or b2_list_file_versions
|
||||
type ListFileNamesResponse struct {
|
||||
Files []File `json:"files"` // An array of objects, each one describing one file.
|
||||
NextFileName *string `json:"nextFileName"` // What to pass in to startFileName for the next search to continue where this one left off, or null if there are no more files.
|
||||
NextFileID *string `json:"nextFileId"` // What to pass in to startFileId for the next search to continue where this one left off, or null if there are no more files.
|
||||
}
|
||||
|
||||
// GetUploadURLRequest is passed to b2_get_upload_url
|
||||
type GetUploadURLRequest struct {
|
||||
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
|
||||
}
|
||||
|
||||
// GetUploadURLResponse is received from b2_get_upload_url
|
||||
type GetUploadURLResponse struct {
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_file.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
|
||||
}
|
||||
|
||||
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
|
||||
type FileInfo struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
Action string `json:"action"` // Either "upload" or "hide". "upload" means a file that was uploaded to B2 Cloud Storage. "hide" means a file version marking the file as hidden, so that it will not show up in b2_list_file_names. The result of b2_list_file_names will contain only "upload". The result of b2_list_file_versions may have both.
|
||||
AccountID string `json:"accountId"` // Your account ID.
|
||||
BucketID string `json:"bucketId"` // The bucket that the file is in.
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
}
|
||||
|
||||
// CreateBucketRequest is used to create a bucket
|
||||
type CreateBucketRequest struct {
|
||||
AccountID string `json:"accountId"`
|
||||
Name string `json:"bucketName"`
|
||||
Type string `json:"bucketType"`
|
||||
}
|
||||
|
||||
// DeleteBucketRequest is used to create a bucket
|
||||
type DeleteBucketRequest struct {
|
||||
ID string `json:"bucketId"`
|
||||
AccountID string `json:"accountId"`
|
||||
}
|
||||
|
||||
// DeleteFileRequest is used to delete a file version
|
||||
type DeleteFileRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
}
|
||||
|
||||
// HideFileRequest is used to delete a file
|
||||
type HideFileRequest struct {
|
||||
BucketID string `json:"bucketId"` // The bucket containing the file to hide.
|
||||
Name string `json:"fileName"` // The name of the file to hide.
|
||||
}
|
||||
|
||||
// GetFileInfoRequest is used to return a FileInfo struct with b2_get_file_info
|
||||
type GetFileInfoRequest struct {
|
||||
ID string `json:"fileId"` // The ID of the file, as returned by b2_upload_file, b2_list_file_names, or b2_list_file_versions.
|
||||
}
|
||||
|
||||
// StartLargeFileRequest (b2_start_large_file) Prepares for uploading the parts of a large file.
|
||||
//
|
||||
// If the original source of the file being uploaded has a last
|
||||
// modified time concept, Backblaze recommends using
|
||||
// src_last_modified_millis as the name, and a string holding the base
|
||||
// 10 number number of milliseconds since midnight, January 1, 1970
|
||||
// UTC. This fits in a 64 bit integer such as the type "long" in the
|
||||
// programming language Java. It is intended to be compatible with
|
||||
// Java's time long. For example, it can be passed directly into the
|
||||
// Java call Date.setTime(long time).
|
||||
//
|
||||
// If the caller knows the SHA1 of the entire large file being
|
||||
// uploaded, Backblaze recommends using large_file_sha1 as the name,
|
||||
// and a 40 byte hex string representing the SHA1.
|
||||
//
|
||||
// Example: { "src_last_modified_millis" : "1452802803026", "large_file_sha1" : "a3195dc1e7b46a2ff5da4b3c179175b75671e80d", "color": "blue" }
|
||||
type StartLargeFileRequest struct {
|
||||
BucketID string `json:"bucketId"` //The ID of the bucket that the file will go in.
|
||||
Name string `json:"fileName"` // The name of the file. See Files for requirements on file names.
|
||||
ContentType string `json:"contentType"` // The MIME type of the content of the file, which will be returned in the Content-Type header when downloading the file. Use the Content-Type b2/x-auto to automatically set the stored Content-Type post upload. In the case where a file extension is absent or the lookup fails, the Content-Type is set to application/octet-stream.
|
||||
Info map[string]string `json:"fileInfo"` // A JSON object holding the name/value pairs for the custom file info.
|
||||
}
|
||||
|
||||
// StartLargeFileResponse is the response to StartLargeFileRequest
|
||||
type StartLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
Name string `json:"fileName"` // The name of this file, which can be used with b2_download_file_by_name.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
ContentType string `json:"contentType"` // The MIME type of the file.
|
||||
Info map[string]string `json:"fileInfo"` // The custom information that was uploaded with the file. This is a JSON object, holding the name/value pairs that were uploaded with the file.
|
||||
UploadTimestamp Timestamp `json:"uploadTimestamp"` // This is a UTC time when this file was uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLRequest is passed to b2_get_upload_part_url
|
||||
type GetUploadPartURLRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// GetUploadPartURLResponse is received from b2_get_upload_url
|
||||
type GetUploadPartURLResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
UploadURL string `json:"uploadUrl"` // The URL that can be used to upload files to this bucket, see b2_upload_part.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_part.
|
||||
}
|
||||
|
||||
// UploadPartResponse is the response to b2_upload_part
|
||||
type UploadPartResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
PartNumber int64 `json:"partNumber"` // Which part this is (starting from 1)
|
||||
Size int64 `json:"contentLength"` // The number of bytes stored in the file.
|
||||
SHA1 string `json:"contentSha1"` // The SHA1 of the bytes stored in the file.
|
||||
}
|
||||
|
||||
// FinishLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a FileInfo object (with extra AccountID and BucketID fields which we ignore).
|
||||
//
|
||||
// Large files do not have a SHA1 checksum. The value will always be "none".
|
||||
type FinishLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
SHA1s []string `json:"partSha1Array"` // A JSON array of hex SHA1 checksums of the parts of the large file. This is a double-check that the right parts were uploaded in the right order, and that none were missed. Note that the part numbers start at 1, and the SHA1 of the part 1 is the first string in the array, at index 0.
|
||||
}
|
||||
|
||||
// CancelLargeFileRequest is passed to b2_finish_large_file
|
||||
//
|
||||
// The response is a CancelLargeFileResponse
|
||||
type CancelLargeFileRequest struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
}
|
||||
|
||||
// CancelLargeFileResponse is the response to CancelLargeFileRequest
|
||||
type CancelLargeFileResponse struct {
|
||||
ID string `json:"fileId"` // The unique identifier of the file being uploaded.
|
||||
Name string `json:"fileName"` // The name of this file.
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/b2/api"
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"hash"
|
||||
gohash "hash"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
@@ -19,10 +19,17 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/b2/api"
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/pacer"
|
||||
"github.com/ncw/rclone/rest"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -48,9 +55,9 @@ var (
|
||||
minChunkSize = fs.SizeSuffix(5E6)
|
||||
chunkSize = fs.SizeSuffix(96 * 1024 * 1024)
|
||||
uploadCutoff = fs.SizeSuffix(200E6)
|
||||
b2TestMode = fs.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
|
||||
b2Versions = fs.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
|
||||
b2HardDelete = fs.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
|
||||
b2TestMode = flags.StringP("b2-test-mode", "", "", "A flag string for X-Bz-Test-Mode header.")
|
||||
b2Versions = flags.BoolP("b2-versions", "", false, "Include old versions in directory listings.")
|
||||
b2HardDelete = flags.BoolP("b2-hard-delete", "", false, "Permanently delete files on remote removal, otherwise hide files.")
|
||||
errNotWithVersions = errors.New("can't modify or delete files in --b2-versions mode")
|
||||
)
|
||||
|
||||
@@ -72,8 +79,8 @@ func init() {
|
||||
},
|
||||
},
|
||||
})
|
||||
fs.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||
fs.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
|
||||
flags.VarP(&uploadCutoff, "b2-upload-cutoff", "", "Cutoff for switching to chunked upload")
|
||||
flags.VarP(&chunkSize, "b2-chunk-size", "", "Upload chunk size. Must fit in memory.")
|
||||
}
|
||||
|
||||
// Fs represents a remote b2 server
|
||||
@@ -186,7 +193,7 @@ func (f *Fs) shouldRetryNoReauth(resp *http.Response, err error) (bool, error) {
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
return fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
@@ -236,15 +243,15 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
account := fs.ConfigFileGet(name, "account")
|
||||
account := config.FileGet(name, "account")
|
||||
if account == "" {
|
||||
return nil, errors.New("account not found")
|
||||
}
|
||||
key := fs.ConfigFileGet(name, "key")
|
||||
key := config.FileGet(name, "key")
|
||||
if key == "" {
|
||||
return nil, errors.New("key not found")
|
||||
}
|
||||
endpoint := fs.ConfigFileGet(name, "endpoint", defaultEndpoint)
|
||||
endpoint := config.FileGet(name, "endpoint", defaultEndpoint)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
@@ -252,7 +259,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
account: account,
|
||||
key: key,
|
||||
endpoint: endpoint,
|
||||
srv: rest.NewClient(fs.Config.Client()).SetErrorHandler(errorHandler),
|
||||
srv: rest.NewClient(fshttp.NewClient(fs.Config)).SetErrorHandler(errorHandler),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
bufferTokens: make(chan []byte, fs.Config.Transfers),
|
||||
}
|
||||
@@ -544,6 +551,15 @@ func (f *Fs) itemToDirEntry(remote string, object *api.File, isDirectory bool, l
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// mark the bucket as being OK
|
||||
func (f *Fs) markBucketOK() {
|
||||
if f.bucket != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
f.bucketOK = true
|
||||
f.bucketOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
last := ""
|
||||
@@ -560,6 +576,8 @@ func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
@@ -615,7 +633,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.bucket == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := fs.NewListRHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
last := ""
|
||||
err = f.list(dir, true, "", 0, *b2Versions, func(remote string, object *api.File, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory, &last)
|
||||
@@ -627,6 +645,8 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
@@ -868,16 +888,16 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for object := range toBeDeleted {
|
||||
fs.Stats.Checking(object.Name)
|
||||
accounting.Stats.Checking(object.Name)
|
||||
checkErr(f.deleteByID(object.ID, object.Name))
|
||||
fs.Stats.DoneChecking(object.Name)
|
||||
accounting.Stats.DoneChecking(object.Name)
|
||||
}
|
||||
}()
|
||||
}
|
||||
last := ""
|
||||
checkErr(f.list("", true, "", 0, true, func(remote string, object *api.File, isDirectory bool) error {
|
||||
if !isDirectory {
|
||||
fs.Stats.Checking(remote)
|
||||
accounting.Stats.Checking(remote)
|
||||
if oldOnly && last != remote {
|
||||
if object.Action == "hide" {
|
||||
fs.Debugf(remote, "Deleting current version (id %q) as it is a hide marker", object.ID)
|
||||
@@ -890,7 +910,7 @@ func (f *Fs) purge(oldOnly bool) error {
|
||||
toBeDeleted <- object
|
||||
}
|
||||
last = remote
|
||||
fs.Stats.DoneChecking(remote)
|
||||
accounting.Stats.DoneChecking(remote)
|
||||
}
|
||||
return nil
|
||||
}))
|
||||
@@ -914,8 +934,8 @@ func (f *Fs) CleanUp() error {
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() fs.HashSet {
|
||||
return fs.HashSet(fs.HashSHA1)
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -939,9 +959,9 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the Sha-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t fs.HashType) (string, error) {
|
||||
if t != fs.HashSHA1 {
|
||||
return "", fs.ErrHashUnsupported
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
if t != hash.SHA1 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
if o.sha1 == "" {
|
||||
// Error is logged in readMetaData
|
||||
@@ -1094,7 +1114,7 @@ type openFile struct {
|
||||
o *Object // Object we are reading for
|
||||
resp *http.Response // response of the GET
|
||||
body io.Reader // reading from here
|
||||
hash hash.Hash // currently accumulating SHA1
|
||||
hash gohash.Hash // currently accumulating SHA1
|
||||
bytes int64 // number of bytes read on this connection
|
||||
eof bool // whether we have read end of file
|
||||
}
|
||||
@@ -1279,7 +1299,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
|
||||
modTime := src.ModTime()
|
||||
|
||||
calculatedSha1, _ := src.Hash(fs.HashSHA1)
|
||||
calculatedSha1, _ := src.Hash(hash.SHA1)
|
||||
if calculatedSha1 == "" {
|
||||
calculatedSha1 = "hex_digits_at_end"
|
||||
har := newHashAppendingReader(in, sha1.New())
|
||||
@@ -1402,6 +1422,11 @@ func (o *Object) MimeType() string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
@@ -1411,4 +1436,5 @@ var (
|
||||
_ fs.ListRer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
_ fs.IDer = &Object{}
|
||||
)
|
||||
17
backend/b2/b2_test.go
Normal file
17
backend/b2/b2_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test B2 filesystem interface
|
||||
package b2_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestB2:",
|
||||
NilObject: (*b2.Object)(nil),
|
||||
})
|
||||
}
|
||||
433
backend/b2/upload.go
Normal file
433
backend/b2/upload.go
Normal file
@@ -0,0 +1,433 @@
|
||||
// Upload large files for b2
|
||||
//
|
||||
// Docs - https://www.backblaze.com/b2/docs/large_files.html
|
||||
|
||||
package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
gohash "hash"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type hashAppendingReader struct {
|
||||
h gohash.Hash
|
||||
in io.Reader
|
||||
hexSum string
|
||||
hexReader io.Reader
|
||||
}
|
||||
|
||||
// Read returns bytes all bytes from the original reader, then the hex sum
|
||||
// of what was read so far, then EOF.
|
||||
func (har *hashAppendingReader) Read(b []byte) (int, error) {
|
||||
if har.hexReader == nil {
|
||||
n, err := har.in.Read(b)
|
||||
if err == io.EOF {
|
||||
har.in = nil // allow GC
|
||||
err = nil // allow reading hexSum before EOF
|
||||
|
||||
har.hexSum = hex.EncodeToString(har.h.Sum(nil))
|
||||
har.hexReader = strings.NewReader(har.hexSum)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
return har.hexReader.Read(b)
|
||||
}
|
||||
|
||||
// AdditionalLength returns how many bytes the appended hex sum will take up.
|
||||
func (har *hashAppendingReader) AdditionalLength() int {
|
||||
return hex.EncodedLen(har.h.Size())
|
||||
}
|
||||
|
||||
// HexSum returns the hash sum as hex. It's only available after the original
|
||||
// reader has EOF'd. It's an empty string before that.
|
||||
func (har *hashAppendingReader) HexSum() string {
|
||||
return har.hexSum
|
||||
}
|
||||
|
||||
// newHashAppendingReader takes a Reader and a Hash and will append the hex sum
|
||||
// after the original reader reaches EOF. The increased size depends on the
|
||||
// given hash, which may be queried through AdditionalLength()
|
||||
func newHashAppendingReader(in io.Reader, h gohash.Hash) *hashAppendingReader {
|
||||
withHash := io.TeeReader(in, h)
|
||||
return &hashAppendingReader{h: h, in: withHash}
|
||||
}
|
||||
|
||||
// largeUpload is used to control the upload of large files which need chunking
|
||||
type largeUpload struct {
|
||||
f *Fs // parent Fs
|
||||
o *Object // object being uploaded
|
||||
in io.Reader // read the data from here
|
||||
wrap accounting.WrapFn // account parts being transferred
|
||||
id string // ID of the file being uploaded
|
||||
size int64 // total size
|
||||
parts int64 // calculated number of parts, if known
|
||||
sha1s []string // slice of SHA1s for each part
|
||||
uploadMu sync.Mutex // lock for upload variable
|
||||
uploads []*api.GetUploadPartURLResponse // result of get upload URL calls
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
size := src.Size()
|
||||
parts := int64(0)
|
||||
sha1SliceSize := int64(maxParts)
|
||||
if size == -1 {
|
||||
fs.Debugf(o, "Streaming upload with --b2-chunk-size %s allows uploads of up to %s and will fail only when that limit is reached.", fs.SizeSuffix(chunkSize), fs.SizeSuffix(maxParts*chunkSize))
|
||||
} else {
|
||||
parts = size / int64(chunkSize)
|
||||
if size%int64(chunkSize) != 0 {
|
||||
parts++
|
||||
}
|
||||
if parts > maxParts {
|
||||
return nil, errors.Errorf("%q too big (%d bytes) makes too many parts %d > %d - increase --b2-chunk-size", remote, size, parts, maxParts)
|
||||
}
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
modTime := src.ModTime()
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucketID, err := f.getBucketID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: o.fs.root + remote,
|
||||
ContentType: fs.MimeType(src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
},
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
up = &largeUpload{
|
||||
f: f,
|
||||
o: o,
|
||||
in: in,
|
||||
wrap: wrap,
|
||||
id: response.ID,
|
||||
size: size,
|
||||
parts: parts,
|
||||
sha1s: make([]string, sha1SliceSize),
|
||||
}
|
||||
return up, nil
|
||||
}
|
||||
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
|
||||
up.uploadMu.Lock()
|
||||
defer up.uploadMu.Unlock()
|
||||
if len(up.uploads) == 0 {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_get_upload_part_url",
|
||||
}
|
||||
var request = api.GetUploadPartURLRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
}
|
||||
} else {
|
||||
upload, up.uploads = up.uploads[0], up.uploads[1:]
|
||||
}
|
||||
return upload, nil
|
||||
}
|
||||
|
||||
// returnUploadURL returns the UploadURL to the cache
|
||||
func (up *largeUpload) returnUploadURL(upload *api.GetUploadPartURLResponse) {
|
||||
if upload == nil {
|
||||
return
|
||||
}
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = append(up.uploads, upload)
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// clearUploadURL clears the current UploadURL and the AuthorizationToken
|
||||
func (up *largeUpload) clearUploadURL() {
|
||||
up.uploadMu.Lock()
|
||||
up.uploads = nil
|
||||
up.uploadMu.Unlock()
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
in := newHashAppendingReader(bytes.NewReader(body), sha1.New())
|
||||
size := int64(len(body)) + int64(in.AdditionalLength())
|
||||
|
||||
// Authorization
|
||||
//
|
||||
// An upload authorization token, from b2_get_upload_part_url.
|
||||
//
|
||||
// X-Bz-Part-Number
|
||||
//
|
||||
// A number from 1 to 10000. The parts uploaded for one file
|
||||
// must have contiguous numbers, starting with 1.
|
||||
//
|
||||
// Content-Length
|
||||
//
|
||||
// The number of bytes in the file being uploaded. Note that
|
||||
// this header is required; you cannot leave it out and just
|
||||
// use chunked encoding. The minimum size of every part but
|
||||
// the last one is 100MB.
|
||||
//
|
||||
// X-Bz-Content-Sha1
|
||||
//
|
||||
// The SHA1 checksum of the this part of the file. B2 will
|
||||
// check this when the part is uploaded, to make sure that the
|
||||
// data arrived correctly. The same SHA1 checksum must be
|
||||
// passed to b2_finish_large_file.
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
RootURL: upload.UploadURL,
|
||||
Body: up.wrap(in),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Authorization": upload.AuthorizationToken,
|
||||
"X-Bz-Part-Number": fmt.Sprintf("%d", part),
|
||||
sha1Header: "hex_digits_at_end",
|
||||
},
|
||||
ContentLength: &size,
|
||||
}
|
||||
|
||||
var response api.UploadPartResponse
|
||||
|
||||
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(resp, err)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||
}
|
||||
// On retryable error clear PartUploadURL
|
||||
if retry {
|
||||
fs.Debugf(up.o, "Clearing part upload URL because of error: %v", err)
|
||||
upload = nil
|
||||
}
|
||||
up.returnUploadURL(upload)
|
||||
up.sha1s[part-1] = in.HexSum()
|
||||
return retry, err
|
||||
})
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d: %v", part, err)
|
||||
} else {
|
||||
fs.Debugf(up.o, "Done sending chunk %d", part)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish() error {
|
||||
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_finish_large_file",
|
||||
}
|
||||
var request = api.FinishLargeFileRequest{
|
||||
ID: up.id,
|
||||
SHA1s: up.sha1s,
|
||||
}
|
||||
var response api.FileInfo
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return up.o.decodeMetaDataFileInfo(&response)
|
||||
}
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel() error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_cancel_large_file",
|
||||
}
|
||||
var request = api.CancelLargeFileRequest{
|
||||
ID: up.id,
|
||||
}
|
||||
var response api.CancelLargeFileResponse
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||
wg.Add(1)
|
||||
go func(part int64, buf []byte) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(part, buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}(part, buf)
|
||||
}
|
||||
|
||||
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
||||
cancelErr := up.cancel()
|
||||
if cancelErr != nil {
|
||||
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return up.finish()
|
||||
}
|
||||
|
||||
// Stream uploads the chunks from the input, starting with a required initial
|
||||
// chunk. Assumes the file size is unknown and will upload until the input
|
||||
// reaches EOF.
|
||||
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
|
||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||
errs := make(chan error, 1)
|
||||
hasMoreParts := true
|
||||
var wg sync.WaitGroup
|
||||
|
||||
// Transfer initial chunk
|
||||
up.size = int64(len(initialUploadBlock))
|
||||
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
|
||||
|
||||
outer:
|
||||
for part := int64(2); hasMoreParts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()
|
||||
|
||||
// Read the chunk
|
||||
var n int
|
||||
n, err = io.ReadFull(up.in, buf)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(up.o, "Read less than a full chunk, making this the last one.")
|
||||
buf = buf[:n]
|
||||
hasMoreParts = false
|
||||
err = nil
|
||||
} else if err == io.EOF {
|
||||
fs.Debugf(up.o, "Could not read any more bytes, previous chunk was the last.")
|
||||
up.f.putUploadBlock(buf)
|
||||
err = nil
|
||||
break outer
|
||||
} else if err != nil {
|
||||
// other kinds of errors indicate failure
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Keep stats up to date
|
||||
up.parts = part
|
||||
up.size += int64(n)
|
||||
if part > maxParts {
|
||||
err = errors.Errorf("%q too big (%d bytes so far) makes too many parts %d > %d - increase --b2-chunk-size", up.o, up.size, up.parts, maxParts)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
}
|
||||
wg.Wait()
|
||||
up.sha1s = up.sha1s[:up.parts]
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload() error {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||
remaining := up.size
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
var err error
|
||||
outer:
|
||||
for part := int64(1); part <= up.parts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
}
|
||||
|
||||
// Get a block of memory
|
||||
buf := up.f.getUploadBlock()[:reqSize]
|
||||
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(up.in, buf)
|
||||
if err != nil {
|
||||
up.f.putUploadBlock(buf)
|
||||
break outer
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
remaining -= reqSize
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
}
|
||||
192
backend/box/api/types.go
Normal file
192
backend/box/api/types.go
Normal file
@@ -0,0 +1,192 @@
|
||||
// Package api has type definitions for box
|
||||
//
|
||||
// Converted from the API docs with help from https://mholt.github.io/json-to-go/
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// 2017-05-03T07:26:10-07:00
|
||||
timeFormat = `"` + time.RFC3339 + `"`
|
||||
)
|
||||
|
||||
// Time represents represents date and time information for the
|
||||
// box API, by using RFC3339
|
||||
type Time time.Time
|
||||
|
||||
// MarshalJSON turns a Time into JSON (in UTC)
|
||||
func (t *Time) MarshalJSON() (out []byte, err error) {
|
||||
timeString := (*time.Time)(t).Format(timeFormat)
|
||||
return []byte(timeString), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Time
|
||||
func (t *Time) UnmarshalJSON(data []byte) error {
|
||||
newT, err := time.Parse(timeFormat, string(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Time(newT)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Error is returned from box when things go wrong
|
||||
type Error struct {
|
||||
Type string `json:"type"`
|
||||
Status int `json:"status"`
|
||||
Code string `json:"code"`
|
||||
ContextInfo json.RawMessage
|
||||
HelpURL string `json:"help_url"`
|
||||
Message string `json:"message"`
|
||||
RequestID string `json:"request_id"`
|
||||
}
|
||||
|
||||
// Error returns a string for the error and statistifes the error interface
|
||||
func (e *Error) Error() string {
|
||||
out := fmt.Sprintf("Error %q (%d)", e.Code, e.Status)
|
||||
if e.Message != "" {
|
||||
out += ": " + e.Message
|
||||
}
|
||||
if e.ContextInfo != nil {
|
||||
out += fmt.Sprintf(" (%+v)", e.ContextInfo)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Check Error statisfies the error interface
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// ItemFields are the fields needed for FileInfo
|
||||
var ItemFields = "type,id,sequence_id,etag,sha1,name,size,created_at,modified_at,content_created_at,content_modified_at,item_status"
|
||||
|
||||
// Types of things in Item
|
||||
const (
|
||||
ItemTypeFolder = "folder"
|
||||
ItemTypeFile = "file"
|
||||
ItemStatusActive = "active"
|
||||
ItemStatusTrashed = "trashed"
|
||||
ItemStatusDeleted = "deleted"
|
||||
)
|
||||
|
||||
// Item describes a folder or a file as returned by Get Folder Items and others
|
||||
type Item struct {
|
||||
Type string `json:"type"`
|
||||
ID string `json:"id"`
|
||||
SequenceID string `json:"sequence_id"`
|
||||
Etag string `json:"etag"`
|
||||
SHA1 string `json:"sha1"`
|
||||
Name string `json:"name"`
|
||||
Size float64 `json:"size"` // box returns this in xEyy format for very large numbers - see #2261
|
||||
CreatedAt Time `json:"created_at"`
|
||||
ModifiedAt Time `json:"modified_at"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
ItemStatus string `json:"item_status"` // active, trashed if the file has been moved to the trash, and deleted if the file has been permanently deleted
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the item
|
||||
func (i *Item) ModTime() (t time.Time) {
|
||||
t = time.Time(i.ContentModifiedAt)
|
||||
if t.IsZero() {
|
||||
t = time.Time(i.ModifiedAt)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// FolderItems is returned from the GetFolderItems call
|
||||
type FolderItems struct {
|
||||
TotalCount int `json:"total_count"`
|
||||
Entries []Item `json:"entries"`
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
Order []struct {
|
||||
By string `json:"by"`
|
||||
Direction string `json:"direction"`
|
||||
} `json:"order"`
|
||||
}
|
||||
|
||||
// Parent defined the ID of the parent directory
|
||||
type Parent struct {
|
||||
ID string `json:"id"`
|
||||
}
|
||||
|
||||
// CreateFolder is the request for Create Folder
|
||||
type CreateFolder struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// UploadFile is the request for Upload File
|
||||
type UploadFile struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
}
|
||||
|
||||
// UpdateFileModTime is used in Update File Info
|
||||
type UpdateFileModTime struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
}
|
||||
|
||||
// UpdateFileMove is the request for Upload File to change name and parent
|
||||
type UpdateFileMove struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// CopyFile is the request for Copy File
|
||||
type CopyFile struct {
|
||||
Name string `json:"name"`
|
||||
Parent Parent `json:"parent"`
|
||||
}
|
||||
|
||||
// UploadSessionRequest is uses in Create Upload Session
|
||||
type UploadSessionRequest struct {
|
||||
FolderID string `json:"folder_id,omitempty"` // don't pass for update
|
||||
FileSize int64 `json:"file_size"`
|
||||
FileName string `json:"file_name,omitempty"` // optional for update
|
||||
}
|
||||
|
||||
// UploadSessionResponse is returned from Create Upload Session
|
||||
type UploadSessionResponse struct {
|
||||
TotalParts int `json:"total_parts"`
|
||||
PartSize int64 `json:"part_size"`
|
||||
SessionEndpoints struct {
|
||||
ListParts string `json:"list_parts"`
|
||||
Commit string `json:"commit"`
|
||||
UploadPart string `json:"upload_part"`
|
||||
Status string `json:"status"`
|
||||
Abort string `json:"abort"`
|
||||
} `json:"session_endpoints"`
|
||||
SessionExpiresAt Time `json:"session_expires_at"`
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
NumPartsProcessed int `json:"num_parts_processed"`
|
||||
}
|
||||
|
||||
// Part defines the return from upload part call which are passed to commit upload also
|
||||
type Part struct {
|
||||
PartID string `json:"part_id"`
|
||||
Offset int `json:"offset"`
|
||||
Size int `json:"size"`
|
||||
Sha1 string `json:"sha1"`
|
||||
}
|
||||
|
||||
// UploadPartResponse is returned from the upload part call
|
||||
type UploadPartResponse struct {
|
||||
Part Part `json:"part"`
|
||||
}
|
||||
|
||||
// CommitUpload is used in the Commit Upload call
|
||||
type CommitUpload struct {
|
||||
Parts []Part `json:"parts"`
|
||||
Attributes struct {
|
||||
ContentCreatedAt Time `json:"content_created_at"`
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
} `json:"attributes"`
|
||||
}
|
||||
@@ -16,17 +16,21 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/box/api"
|
||||
"github.com/ncw/rclone/dircache"
|
||||
"github.com/ncw/rclone/backend/box/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/ncw/rclone/pacer"
|
||||
"github.com/ncw/rclone/rest"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
@@ -54,7 +58,7 @@ var (
|
||||
TokenURL: "https://app.box.com/api/oauth2/token",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectURL,
|
||||
}
|
||||
uploadCutoff = fs.SizeSuffix(50 * 1024 * 1024)
|
||||
@@ -73,14 +77,14 @@ func init() {
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigClientID,
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Box App Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: fs.ConfigClientSecret,
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Box App Client Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
fs.VarP(&uploadCutoff, "box-upload-cutoff", "", "Cutoff for switching to multipart upload")
|
||||
flags.VarP(&uploadCutoff, "box-upload-cutoff", "", "Cutoff for switching to multipart upload")
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
@@ -130,9 +134,6 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Pattern to match a box path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parsePath parses an box 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
@@ -158,7 +159,7 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
authRety = true
|
||||
fs.Debugf(nil, "Should retry: %v", err)
|
||||
}
|
||||
return authRety || fs.ShouldRetry(err) || fs.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
return authRety || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// substitute reserved characters for box
|
||||
@@ -825,8 +826,8 @@ func (f *Fs) DirCacheFlush() {
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() fs.HashSet {
|
||||
return fs.HashSet(fs.HashSHA1)
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -855,9 +856,9 @@ func (o *Object) srvPath() string {
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t fs.HashType) (string, error) {
|
||||
if t != fs.HashSHA1 {
|
||||
return "", fs.ErrHashUnsupported
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
if t != hash.SHA1 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.sha1, nil
|
||||
}
|
||||
@@ -878,7 +879,7 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
return errors.Wrapf(fs.ErrorNotAFile, "%q is %q", o.remote, info.Type)
|
||||
}
|
||||
o.hasMetaData = true
|
||||
o.size = info.Size
|
||||
o.size = int64(info.Size)
|
||||
o.sha1 = info.SHA1
|
||||
o.modTime = info.ModTime()
|
||||
o.id = info.ID
|
||||
@@ -955,6 +956,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.id == "" {
|
||||
return nil, errors.New("can't download - no id")
|
||||
}
|
||||
fs.FixRangeOption(options, o.size)
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
@@ -1046,6 +1048,11 @@ func (o *Object) Remove() error {
|
||||
return o.fs.deleteObject(o.id)
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.id
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1056,4 +1063,5 @@ var (
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
17
backend/box/box_test.go
Normal file
17
backend/box/box_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Box filesystem interface
|
||||
package box_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/box"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestBox:",
|
||||
NilObject: (*box.Object)(nil),
|
||||
})
|
||||
}
|
||||
273
backend/box/upload.go
Normal file
273
backend/box/upload.go
Normal file
@@ -0,0 +1,273 @@
|
||||
// multpart upload for box
|
||||
|
||||
package box
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/box/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions",
|
||||
RootURL: uploadURL,
|
||||
}
|
||||
request := api.UploadSessionRequest{
|
||||
FileSize: size,
|
||||
}
|
||||
// If object has an ID then it is existing so create a new version
|
||||
if o.id != "" {
|
||||
opts.Path = "/files/" + o.id + "/upload_sessions"
|
||||
} else {
|
||||
opts.Path = "/files/upload_sessions"
|
||||
request.FolderID = directoryID
|
||||
request.FileName = replaceReservedChars(leaf)
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// sha1Digest produces a digest using sha1 as per RFC3230
|
||||
func sha1Digest(digest []byte) string {
|
||||
return "sha=" + base64.StdEncoding.EncodeToString(digest)
|
||||
}
|
||||
|
||||
// uploadPart uploads a part in an upload session
|
||||
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
||||
chunkSize := int64(len(chunk))
|
||||
sha1sum := sha1.Sum(chunk)
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/files/upload_sessions/" + SessionID,
|
||||
RootURL: uploadURL,
|
||||
ContentType: "application/octet-stream",
|
||||
ContentLength: &chunkSize,
|
||||
ContentRange: fmt.Sprintf("bytes %d-%d/%d", offset, offset+chunkSize-1, totalSize),
|
||||
ExtraHeaders: map[string]string{
|
||||
"Digest": sha1Digest(sha1sum[:]),
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts.Body = wrap(bytes.NewReader(chunk))
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// commitUpload finishes an upload session
|
||||
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions/" + SessionID + "/commit",
|
||||
RootURL: uploadURL,
|
||||
ExtraHeaders: map[string]string{
|
||||
"Digest": sha1Digest(sha1sum),
|
||||
},
|
||||
}
|
||||
request := api.CommitUpload{
|
||||
Parts: parts,
|
||||
}
|
||||
request.Attributes.ContentModifiedAt = api.Time(modTime)
|
||||
request.Attributes.ContentCreatedAt = api.Time(modTime)
|
||||
var body []byte
|
||||
var resp *http.Response
|
||||
maxTries := fs.Config.LowLevelRetries
|
||||
const defaultDelay = 10
|
||||
var tries int
|
||||
outer:
|
||||
for tries = 0; tries < maxTries; tries++ {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
body, err = rest.ReadBody(resp)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
delay := defaultDelay
|
||||
why := "unknown"
|
||||
if err != nil {
|
||||
// Sometimes we get 400 Error with
|
||||
// parts_mismatch immediately after uploading
|
||||
// the last part. Ignore this error and wait.
|
||||
if boxErr, ok := err.(*api.Error); ok && boxErr.Code == "parts_mismatch" {
|
||||
why = err.Error()
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated:
|
||||
break outer
|
||||
case http.StatusAccepted:
|
||||
why = "not ready yet"
|
||||
delayString := resp.Header.Get("Retry-After")
|
||||
if delayString != "" {
|
||||
delay, err = strconv.Atoi(delayString)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Couldn't decode Retry-After header %q: %v", delayString, err)
|
||||
delay = defaultDelay
|
||||
}
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("unknown HTTP status return %q (%d)", resp.Status, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
fs.Debugf(o, "commit multipart upload failed %d/%d - trying again in %d seconds (%s)", tries+1, maxTries, delay, why)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
}
|
||||
if tries >= maxTries {
|
||||
return nil, errors.New("too many tries to commit multipart upload - increase --low-level-retries")
|
||||
}
|
||||
err = json.Unmarshal(body, &result)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "couldn't decode commit response: %q", body)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// abortUpload cancels an upload session
|
||||
func (o *Object) abortUpload(SessionID string) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/files/upload_sessions/" + SessionID,
|
||||
RootURL: uploadURL,
|
||||
NoResponse: true,
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||
// Create upload session
|
||||
session, err := o.createUploadSession(leaf, directoryID, size)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload create session failed")
|
||||
}
|
||||
chunkSize := session.PartSize
|
||||
fs.Debugf(o, "Multipart upload session started for %d parts of size %v", session.TotalParts, fs.SizeSuffix(chunkSize))
|
||||
|
||||
// Cancel the session if something went wrong
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := o.abortUpload(session.ID)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// unwrap the accounting from the input, we use wrap to put it
|
||||
// back on after the buffering
|
||||
in, wrap := accounting.UnWrap(in)
|
||||
|
||||
// Upload the chunks
|
||||
remaining := size
|
||||
position := int64(0)
|
||||
parts := make([]api.Part, session.TotalParts)
|
||||
hash := sha1.New()
|
||||
errs := make(chan error, 1)
|
||||
var wg sync.WaitGroup
|
||||
outer:
|
||||
for part := 0; part < session.TotalParts; part++ {
|
||||
// Check any errors
|
||||
select {
|
||||
case err = <-errs:
|
||||
break outer
|
||||
default:
|
||||
}
|
||||
|
||||
reqSize := remaining
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
}
|
||||
|
||||
// Make a block of memory
|
||||
buf := make([]byte, reqSize)
|
||||
|
||||
// Read the chunk
|
||||
_, err = io.ReadFull(in, buf)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to read source")
|
||||
break outer
|
||||
}
|
||||
|
||||
// Make the global hash (must be done sequentially)
|
||||
_, _ = hash.Write(buf)
|
||||
|
||||
// Transfer the chunk
|
||||
wg.Add(1)
|
||||
o.fs.uploadToken.Get()
|
||||
go func(part int, position int64) {
|
||||
defer wg.Done()
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
select {
|
||||
case errs <- err:
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
parts[part] = partResponse.Part
|
||||
}(part, position)
|
||||
|
||||
// ready for next block
|
||||
remaining -= chunkSize
|
||||
position += chunkSize
|
||||
}
|
||||
wg.Wait()
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
default:
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||
}
|
||||
|
||||
if result.TotalCount != 1 || len(result.Entries) != 1 {
|
||||
return errors.Errorf("multipart upload failed %v - not sure why", o)
|
||||
}
|
||||
return o.setMetaData(&result.Entries[0])
|
||||
}
|
||||
1574
backend/cache/cache.go
vendored
Normal file
1574
backend/cache/cache.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
1707
backend/cache/cache_internal_test.go
vendored
Normal file
1707
backend/cache/cache_internal_test.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
78
backend/cache/cache_mount_unix_test.go
vendored
Normal file
78
backend/cache/cache_mount_unix_test.go
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
// +build !plan9,!windows
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/cmd/mount"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
device := f.Name() + ":" + f.Root()
|
||||
var options = []fuse.MountOption{
|
||||
fuse.MaxReadahead(uint32(mountlib.MaxReadAhead)),
|
||||
fuse.Subtype("rclone"),
|
||||
fuse.FSName(device), fuse.VolumeName(device),
|
||||
fuse.NoAppleDouble(),
|
||||
fuse.NoAppleXattr(),
|
||||
//fuse.AllowOther(),
|
||||
}
|
||||
err := os.MkdirAll(r.mntDir, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
c, err := fuse.Mount(r.mntDir, options...)
|
||||
require.NoError(t, err)
|
||||
filesys := mount.NewFS(f)
|
||||
server := fusefs.New(c, nil)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
r.unmountRes = make(chan error, 1)
|
||||
go func() {
|
||||
err := server.Serve(filesys)
|
||||
closeErr := c.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
}
|
||||
r.unmountRes <- err
|
||||
}()
|
||||
|
||||
// check if the mount process has an error to report
|
||||
<-c.Ready
|
||||
require.NoError(t, c.MountError)
|
||||
|
||||
r.unmountFn = func() error {
|
||||
// Shutdown the VFS
|
||||
filesys.VFS.Shutdown()
|
||||
return fuse.Unmount(r.mntDir)
|
||||
}
|
||||
|
||||
r.vfs = filesys.VFS
|
||||
r.isMounted = true
|
||||
}
|
||||
|
||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||
var err error
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
err = r.unmountFn()
|
||||
if err != nil {
|
||||
//log.Printf("signal to umount failed - retrying: %v", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = <-r.unmountRes
|
||||
require.NoError(t, err)
|
||||
err = r.vfs.CleanUp()
|
||||
require.NoError(t, err)
|
||||
r.isMounted = false
|
||||
}
|
||||
124
backend/cache/cache_mount_windows_test.go
vendored
Normal file
124
backend/cache/cache_mount_windows_test.go
vendored
Normal file
@@ -0,0 +1,124 @@
|
||||
// +build windows
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/ncw/rclone/cmd/cmount"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// waitFor runs fn() until it returns true or the timeout expires
|
||||
func waitFor(fn func() bool) (ok bool) {
|
||||
const totalWait = 10 * time.Second
|
||||
const individualWait = 10 * time.Millisecond
|
||||
for i := 0; i < int(totalWait/individualWait); i++ {
|
||||
ok = fn()
|
||||
if ok {
|
||||
return ok
|
||||
}
|
||||
time.Sleep(individualWait)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
// FIXME implement cmount
|
||||
t.Skip("windows not supported yet")
|
||||
|
||||
device := f.Name() + ":" + f.Root()
|
||||
options := []string{
|
||||
"-o", "fsname=" + device,
|
||||
"-o", "subtype=rclone",
|
||||
"-o", fmt.Sprintf("max_readahead=%d", mountlib.MaxReadAhead),
|
||||
"-o", "uid=-1",
|
||||
"-o", "gid=-1",
|
||||
"-o", "allow_other",
|
||||
// This causes FUSE to supply O_TRUNC with the Open
|
||||
// call which is more efficient for cmount. However
|
||||
// it does not work with cgofuse on Windows with
|
||||
// WinFSP so cmount must work with or without it.
|
||||
"-o", "atomic_o_trunc",
|
||||
"--FileSystemName=rclone",
|
||||
}
|
||||
|
||||
fsys := cmount.NewFS(f)
|
||||
host := fuse.NewFileSystemHost(fsys)
|
||||
|
||||
// Serve the mount point in the background returning error to errChan
|
||||
r.unmountRes = make(chan error, 1)
|
||||
go func() {
|
||||
var err error
|
||||
ok := host.Mount(r.mntDir, options)
|
||||
if !ok {
|
||||
err = errors.New("mount failed")
|
||||
}
|
||||
r.unmountRes <- err
|
||||
}()
|
||||
|
||||
// unmount
|
||||
r.unmountFn = func() error {
|
||||
// Shutdown the VFS
|
||||
fsys.VFS.Shutdown()
|
||||
if host.Unmount() {
|
||||
if !waitFor(func() bool {
|
||||
_, err := os.Stat(r.mntDir)
|
||||
return err != nil
|
||||
}) {
|
||||
t.Fatalf("mountpoint %q didn't disappear after unmount - continuing anyway", r.mntDir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return errors.New("host unmount failed")
|
||||
}
|
||||
|
||||
// Wait for the filesystem to become ready, checking the file
|
||||
// system didn't blow up before starting
|
||||
select {
|
||||
case err := <-r.unmountRes:
|
||||
require.NoError(t, err)
|
||||
case <-time.After(time.Second * 3):
|
||||
}
|
||||
|
||||
// Wait for the mount point to be available on Windows
|
||||
// On Windows the Init signal comes slightly before the mount is ready
|
||||
if !waitFor(func() bool {
|
||||
_, err := os.Stat(r.mntDir)
|
||||
return err == nil
|
||||
}) {
|
||||
t.Errorf("mountpoint %q didn't became available on mount", r.mntDir)
|
||||
}
|
||||
|
||||
r.vfs = fsys.VFS
|
||||
r.isMounted = true
|
||||
}
|
||||
|
||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||
// FIXME implement cmount
|
||||
t.Skip("windows not supported yet")
|
||||
var err error
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
err = r.unmountFn()
|
||||
if err != nil {
|
||||
//log.Printf("signal to umount failed - retrying: %v", err)
|
||||
time.Sleep(3 * time.Second)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
require.NoError(t, err)
|
||||
err = <-r.unmountRes
|
||||
require.NoError(t, err)
|
||||
err = r.vfs.CleanUp()
|
||||
require.NoError(t, err)
|
||||
r.isMounted = false
|
||||
}
|
||||
21
backend/cache/cache_test.go
vendored
Normal file
21
backend/cache/cache_test.go
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
// Test Cache filesystem interface
|
||||
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for cache for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 !go1.7
|
||||
// +build plan9
|
||||
|
||||
package cache
|
||||
455
backend/cache/cache_upload_test.go
vendored
Normal file
455
backend/cache/cache_upload_test.go
vendored
Normal file
@@ -0,0 +1,455 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestInternalUploadTempDirCreated(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutdc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id)})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
_, err := os.Stat(path.Join(runInstance.tmpUploadDir, id))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testInternalUploadQueueOneFile(t *testing.T, id string, rootFs fs.Fs, boltDb *cache.Persistent) {
|
||||
// create some rand test data
|
||||
testSize := int64(524288000)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
bu := runInstance.listenForBackgroundUpload(t, rootFs, "one")
|
||||
runInstance.writeRemoteReader(t, rootFs, "one", testReader)
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(524416032), ti.Size())
|
||||
} else {
|
||||
require.Equal(t, testSize, ti.Size())
|
||||
}
|
||||
de1, err := runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
runInstance.completeBackgroundUpload(t, "one", bu)
|
||||
// check if it was removed from temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if it can be read
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "one", 0, int64(1024), false)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, data2, 1024)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileNoRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofnr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "0s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueOneFileWithRest(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqofwr%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1m"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
testInternalUploadQueueOneFile(t, id, rootFs, boltDb)
|
||||
}
|
||||
|
||||
func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
id := fmt.Sprintf("tiumef%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(10485760)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
time.Sleep(time.Second * 5)
|
||||
//_ = os.Remove(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
//require.NoError(t, err)
|
||||
|
||||
err = runInstance.dirMove(t, rootFs, "one/test", "second/test")
|
||||
require.NoError(t, err)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
id := fmt.Sprintf("tiutpc%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
testSize := int64(1048576)
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
testReader2 := runInstance.randomReader(t, testSize)
|
||||
runInstance.writeObjectReader(t, rootFs, "one/test/data.bin", testReader)
|
||||
runInstance.writeObjectReader(t, rootFs, "second/data.bin", testReader2)
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "one/test/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one/test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "one")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second")))
|
||||
require.False(t, os.IsNotExist(err))
|
||||
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, "second/data.bin")
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/data.bin")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
de1, err := runInstance.list(t, rootFs, "one/test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
|
||||
// check if it can be read
|
||||
de1, err = runInstance.list(t, rootFs, "second")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, 1)
|
||||
}
|
||||
|
||||
func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
id := fmt.Sprintf("tiuqmf%v", time.Now().Unix())
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
require.NoError(t, err)
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
totalFiles := 10
|
||||
rand.Seed(time.Now().Unix())
|
||||
|
||||
lastFile := ""
|
||||
for i := 0; i < totalFiles; i++ {
|
||||
size := int64(rand.Intn(maxSize-minSize) + minSize)
|
||||
testReader := runInstance.randomReader(t, size)
|
||||
remote := "test/" + strconv.Itoa(i) + ".bin"
|
||||
runInstance.writeRemoteReader(t, rootFs, remote, testReader)
|
||||
|
||||
// validate that it exists in temp fs
|
||||
ti, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, remote)))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, size, runInstance.cleanSize(t, ti.Size()))
|
||||
|
||||
if runInstance.wrappedIsExternal && i < totalFiles-1 {
|
||||
time.Sleep(time.Second * 3)
|
||||
}
|
||||
lastFile = remote
|
||||
}
|
||||
|
||||
// check if cache lists all files, likely temp upload didn't finish yet
|
||||
de1, err := runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
|
||||
// wait for background uploader to do its thing
|
||||
runInstance.completeAllBackgroundUploads(t, rootFs, lastFile)
|
||||
|
||||
// retry until we have no more temp files and fail if they don't go down to 0
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test")))
|
||||
require.True(t, os.IsNotExist(err))
|
||||
|
||||
// check if cache lists all files
|
||||
de1, err = runInstance.list(t, rootFs, "test")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, de1, totalFiles)
|
||||
}
|
||||
|
||||
func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
id := "tiutfo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove - allowed
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("second/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.Error(t, err)
|
||||
var started bool
|
||||
started, err = boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "second/one")))
|
||||
require.NoError(t, err)
|
||||
require.False(t, started)
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Rmdir - allowed
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "directory not empty")
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
started, err := boltDb.SearchPendingUpload(runInstance.encryptRemoteIfNeeded(t, path.Join(id, "test/one")))
|
||||
require.False(t, started)
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename -- allowed
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove -- allowed
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.Error(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.Error(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update -- allowed
|
||||
firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||
require.NoError(t, err)
|
||||
obj2, err := rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||
require.Equal(t, "one content updated", string(data2))
|
||||
tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
if runInstance.rootIsCrypt {
|
||||
require.Equal(t, int64(67), tmpInfo.Size())
|
||||
} else {
|
||||
require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
}
|
||||
|
||||
// test SetModTime -- allowed
|
||||
secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, secondModTime, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, firstModTime)
|
||||
require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
|
||||
func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
id := "tiuufo"
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true,
|
||||
nil,
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
boltDb.PurgeTempUploads()
|
||||
|
||||
// create some rand test data
|
||||
runInstance.mkdir(t, rootFs, "test")
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// check if it can be read
|
||||
data1, err := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data1)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = boltDb.SetPendingUploadToStarted(runInstance.encryptRemoteIfNeeded(t, path.Join(rootFs.Root(), "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test DirMove
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "second/one")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Rmdir
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
|
||||
// test Move/Rename
|
||||
err = runInstance.move(t, rootFs, path.Join("test", "one"), path.Join("test", "second"))
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
require.Error(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/second")))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
// test Copy -- allowed
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("one content"), data2)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/third")))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// test Remove
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
require.NoError(t, err)
|
||||
runInstance.writeRemoteString(t, rootFs, "test/one", "one content")
|
||||
|
||||
// test Update - this seems to work. Why? FIXME
|
||||
//firstModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated", func() {
|
||||
// data2 := runInstance.readDataFromRemote(t, rootFs, "test/one", 0, int64(len("one content updated")), true)
|
||||
// require.Equal(t, "one content", string(data2))
|
||||
//
|
||||
// tmpInfo, err := os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
// require.NoError(t, err)
|
||||
// if runInstance.rootIsCrypt {
|
||||
// require.Equal(t, int64(67), tmpInfo.Size())
|
||||
// } else {
|
||||
// require.Equal(t, int64(len(data2)), tmpInfo.Size())
|
||||
// }
|
||||
//})
|
||||
//require.Error(t, err)
|
||||
|
||||
// test SetModTime -- seems to work cause of previous
|
||||
//secondModTime, err := runInstance.modTime(t, rootFs, "test/one")
|
||||
//require.NoError(t, err)
|
||||
//require.Equal(t, secondModTime, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, firstModTime)
|
||||
//require.NotEqual(t, time.Time{}, secondModTime)
|
||||
}
|
||||
130
backend/cache/directory.go
vendored
Normal file
130
backend/cache/directory.go
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"path"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
// Directory is a generic dir that stores basic information about it
|
||||
type Directory struct {
|
||||
fs.Directory `json:"-"`
|
||||
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
Dir string `json:"dir"` // abs path of the directory
|
||||
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
|
||||
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
|
||||
|
||||
CacheItems int64 `json:"items"` // number of objects or -1 for unknown
|
||||
CacheType string `json:"cacheType"` // object type
|
||||
CacheTs *time.Time `json:",omitempty"`
|
||||
}
|
||||
|
||||
// NewDirectory builds an empty dir which will be used to unmarshal data in it
|
||||
func NewDirectory(f *Fs, remote string) *Directory {
|
||||
cd := ShallowDirectory(f, remote)
|
||||
t := time.Now()
|
||||
cd.CacheTs = &t
|
||||
|
||||
return cd
|
||||
}
|
||||
|
||||
// ShallowDirectory builds an empty dir which will be used to unmarshal data in it
|
||||
func ShallowDirectory(f *Fs, remote string) *Directory {
|
||||
var cd *Directory
|
||||
fullRemote := cleanPath(path.Join(f.Root(), remote))
|
||||
|
||||
// build a new one
|
||||
dir := cleanPath(path.Dir(fullRemote))
|
||||
name := cleanPath(path.Base(fullRemote))
|
||||
cd = &Directory{
|
||||
CacheFs: f,
|
||||
Name: name,
|
||||
Dir: dir,
|
||||
CacheModTime: time.Now().UnixNano(),
|
||||
CacheSize: 0,
|
||||
CacheItems: 0,
|
||||
CacheType: "Directory",
|
||||
}
|
||||
|
||||
return cd
|
||||
}
|
||||
|
||||
// DirectoryFromOriginal builds one from a generic fs.Directory
|
||||
func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
|
||||
var cd *Directory
|
||||
fullRemote := path.Join(f.Root(), d.Remote())
|
||||
|
||||
dir := cleanPath(path.Dir(fullRemote))
|
||||
name := cleanPath(path.Base(fullRemote))
|
||||
t := time.Now()
|
||||
cd = &Directory{
|
||||
Directory: d,
|
||||
CacheFs: f,
|
||||
Name: name,
|
||||
Dir: dir,
|
||||
CacheModTime: d.ModTime().UnixNano(),
|
||||
CacheSize: d.Size(),
|
||||
CacheItems: d.Items(),
|
||||
CacheType: "Directory",
|
||||
CacheTs: &t,
|
||||
}
|
||||
|
||||
return cd
|
||||
}
|
||||
|
||||
// Fs returns its FS info
|
||||
func (d *Directory) Fs() fs.Info {
|
||||
return d.CacheFs
|
||||
}
|
||||
|
||||
// String returns a human friendly name for this object
|
||||
func (d *Directory) String() string {
|
||||
if d == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return d.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (d *Directory) Remote() string {
|
||||
return d.CacheFs.cleanRootFromPath(d.abs())
|
||||
}
|
||||
|
||||
// abs returns the absolute path to the dir
|
||||
func (d *Directory) abs() string {
|
||||
return cleanPath(path.Join(d.Dir, d.Name))
|
||||
}
|
||||
|
||||
// parentRemote returns the absolute path parent remote
|
||||
func (d *Directory) parentRemote() string {
|
||||
absPath := d.abs()
|
||||
if absPath == "" {
|
||||
return ""
|
||||
}
|
||||
return cleanPath(path.Dir(absPath))
|
||||
}
|
||||
|
||||
// ModTime returns the cached ModTime
|
||||
func (d *Directory) ModTime() time.Time {
|
||||
return time.Unix(0, d.CacheModTime)
|
||||
}
|
||||
|
||||
// Size returns the cached Size
|
||||
func (d *Directory) Size() int64 {
|
||||
return d.CacheSize
|
||||
}
|
||||
|
||||
// Items returns the cached Items
|
||||
func (d *Directory) Items() int64 {
|
||||
return d.CacheItems
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Directory = (*Directory)(nil)
|
||||
)
|
||||
668
backend/cache/handle.go
vendored
Normal file
668
backend/cache/handle.go
vendored
Normal file
@@ -0,0 +1,668 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var uploaderMap = make(map[string]*backgroundWriter)
|
||||
var uploaderMapMx sync.Mutex
|
||||
|
||||
// initBackgroundUploader returns a single instance
|
||||
func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
|
||||
// write lock to create one
|
||||
uploaderMapMx.Lock()
|
||||
defer uploaderMapMx.Unlock()
|
||||
if b, ok := uploaderMap[fs.String()]; ok {
|
||||
// if it was already started we close it so that it can be started again
|
||||
if b.running {
|
||||
b.close()
|
||||
} else {
|
||||
return b, nil
|
||||
}
|
||||
}
|
||||
|
||||
bb := newBackgroundWriter(fs)
|
||||
uploaderMap[fs.String()] = bb
|
||||
return uploaderMap[fs.String()], nil
|
||||
}
|
||||
|
||||
// Handle is managing the read/write/seek operations on an open handle
|
||||
type Handle struct {
|
||||
cachedObject *Object
|
||||
cfs *Fs
|
||||
memory *Memory
|
||||
preloadQueue chan int64
|
||||
preloadOffset int64
|
||||
offset int64
|
||||
seenOffsets map[int64]bool
|
||||
mu sync.Mutex
|
||||
confirmReading chan bool
|
||||
|
||||
UseMemory bool
|
||||
workers []*worker
|
||||
closed bool
|
||||
reading bool
|
||||
}
|
||||
|
||||
// NewObjectHandle returns a new Handle for an existing Object
|
||||
func NewObjectHandle(o *Object, cfs *Fs) *Handle {
|
||||
r := &Handle{
|
||||
cachedObject: o,
|
||||
cfs: cfs,
|
||||
offset: 0,
|
||||
preloadOffset: -1, // -1 to trigger the first preload
|
||||
|
||||
UseMemory: cfs.chunkMemory,
|
||||
reading: false,
|
||||
}
|
||||
r.seenOffsets = make(map[int64]bool)
|
||||
r.memory = NewMemory(-1)
|
||||
|
||||
// create a larger buffer to queue up requests
|
||||
r.preloadQueue = make(chan int64, r.cfs.totalWorkers*10)
|
||||
r.confirmReading = make(chan bool)
|
||||
r.startReadWorkers()
|
||||
return r
|
||||
}
|
||||
|
||||
// cacheFs is a convenience method to get the parent cache FS of the object's manager
|
||||
func (r *Handle) cacheFs() *Fs {
|
||||
return r.cfs
|
||||
}
|
||||
|
||||
// storage is a convenience method to get the persistent storage of the object's manager
|
||||
func (r *Handle) storage() *Persistent {
|
||||
return r.cacheFs().cache
|
||||
}
|
||||
|
||||
// String representation of this reader
|
||||
func (r *Handle) String() string {
|
||||
return r.cachedObject.abs()
|
||||
}
|
||||
|
||||
// startReadWorkers will start the worker pool
|
||||
func (r *Handle) startReadWorkers() {
|
||||
if r.hasAtLeastOneWorker() {
|
||||
return
|
||||
}
|
||||
totalWorkers := r.cacheFs().totalWorkers
|
||||
|
||||
if r.cacheFs().plexConnector.isConfigured() {
|
||||
if !r.cacheFs().plexConnector.isConnected() {
|
||||
err := r.cacheFs().plexConnector.authenticate()
|
||||
if err != nil {
|
||||
fs.Errorf(r, "failed to authenticate to Plex: %v", err)
|
||||
}
|
||||
}
|
||||
if r.cacheFs().plexConnector.isConnected() {
|
||||
totalWorkers = 1
|
||||
}
|
||||
}
|
||||
|
||||
r.scaleWorkers(totalWorkers)
|
||||
}
|
||||
|
||||
// scaleOutWorkers will increase the worker pool count by the provided amount
|
||||
func (r *Handle) scaleWorkers(desired int) {
|
||||
current := len(r.workers)
|
||||
if current == desired {
|
||||
return
|
||||
}
|
||||
if current > desired {
|
||||
// scale in gracefully
|
||||
for i := 0; i < current-desired; i++ {
|
||||
r.preloadQueue <- -1
|
||||
}
|
||||
} else {
|
||||
// scale out
|
||||
for i := 0; i < desired-current; i++ {
|
||||
w := &worker{
|
||||
r: r,
|
||||
ch: r.preloadQueue,
|
||||
id: current + i,
|
||||
}
|
||||
go w.run()
|
||||
|
||||
r.workers = append(r.workers, w)
|
||||
}
|
||||
}
|
||||
// ignore first scale out from 0
|
||||
if current != 0 {
|
||||
fs.Debugf(r, "scale workers to %v", desired)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Handle) confirmExternalReading() {
|
||||
// if we have a max value of workers
|
||||
// then we skip this step
|
||||
if len(r.workers) > 1 ||
|
||||
!r.cacheFs().plexConnector.isConfigured() {
|
||||
return
|
||||
}
|
||||
if !r.cacheFs().plexConnector.isPlaying(r.cachedObject) {
|
||||
return
|
||||
}
|
||||
fs.Infof(r, "confirmed reading by external reader")
|
||||
r.scaleWorkers(r.cacheFs().totalMaxWorkers)
|
||||
}
|
||||
|
||||
// queueOffset will send an offset to the workers if it's different from the last one
|
||||
func (r *Handle) queueOffset(offset int64) {
|
||||
if offset != r.preloadOffset {
|
||||
// clean past in-memory chunks
|
||||
if r.UseMemory {
|
||||
go r.memory.CleanChunksByNeed(offset)
|
||||
}
|
||||
r.confirmExternalReading()
|
||||
r.preloadOffset = offset
|
||||
|
||||
// clear the past seen chunks
|
||||
// they will remain in our persistent storage but will be removed from transient
|
||||
// so they need to be picked up by a worker
|
||||
for k := range r.seenOffsets {
|
||||
if k < offset {
|
||||
r.seenOffsets[k] = false
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
o := r.preloadOffset + r.cacheFs().chunkSize*int64(i)
|
||||
if o < 0 || o >= r.cachedObject.Size() {
|
||||
continue
|
||||
}
|
||||
if v, ok := r.seenOffsets[o]; ok && v {
|
||||
continue
|
||||
}
|
||||
|
||||
r.seenOffsets[o] = true
|
||||
r.preloadQueue <- o
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Handle) hasAtLeastOneWorker() bool {
|
||||
oneWorker := false
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
if r.workers[i].isRunning() {
|
||||
oneWorker = true
|
||||
}
|
||||
}
|
||||
return oneWorker
|
||||
}
|
||||
|
||||
// getChunk is called by the FS to retrieve a specific chunk of known start and size from where it can find it
|
||||
// it can be from transient or persistent cache
|
||||
// it will also build the chunk from the cache's specific chunk boundaries and build the final desired chunk in a buffer
|
||||
func (r *Handle) getChunk(chunkStart int64) ([]byte, error) {
|
||||
var data []byte
|
||||
var err error
|
||||
|
||||
// we calculate the modulus of the requested offset with the size of a chunk
|
||||
offset := chunkStart % r.cacheFs().chunkSize
|
||||
|
||||
// we align the start offset of the first chunk to a likely chunk in the storage
|
||||
chunkStart = chunkStart - offset
|
||||
r.queueOffset(chunkStart)
|
||||
found := false
|
||||
|
||||
if r.UseMemory {
|
||||
data, err = r.memory.GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
// we're gonna give the workers a chance to pickup the chunk
|
||||
// and retry a couple of times
|
||||
for i := 0; i < r.cacheFs().readRetries*8; i++ {
|
||||
data, err = r.storage().GetChunk(r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
|
||||
fs.Debugf(r, "%v: chunk retry storage: %v", chunkStart, i)
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
}
|
||||
}
|
||||
|
||||
// not found in ram or
|
||||
// the worker didn't managed to download the chunk in time so we abort and close the stream
|
||||
if err != nil || len(data) == 0 || !found {
|
||||
if !r.hasAtLeastOneWorker() {
|
||||
fs.Errorf(r, "out of workers")
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
return nil, errors.Errorf("chunk not found %v", chunkStart)
|
||||
}
|
||||
|
||||
// first chunk will be aligned with the start
|
||||
if offset > 0 {
|
||||
if offset > int64(len(data)) {
|
||||
fs.Errorf(r, "unexpected conditions during reading. current position: %v, current chunk position: %v, current chunk size: %v, offset: %v, chunk size: %v, file size: %v",
|
||||
r.offset, chunkStart, len(data), offset, r.cacheFs().chunkSize, r.cachedObject.Size())
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
data = data[int(offset):]
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Read a chunk from storage or len(p)
|
||||
func (r *Handle) Read(p []byte) (n int, err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
var buf []byte
|
||||
|
||||
// first reading
|
||||
if !r.reading {
|
||||
r.reading = true
|
||||
}
|
||||
// reached EOF
|
||||
if r.offset >= r.cachedObject.Size() {
|
||||
return 0, io.EOF
|
||||
}
|
||||
currentOffset := r.offset
|
||||
buf, err = r.getChunk(currentOffset)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
fs.Errorf(r, "(%v/%v) error (%v) response", currentOffset, r.cachedObject.Size(), err)
|
||||
}
|
||||
if len(buf) == 0 && err != io.ErrUnexpectedEOF {
|
||||
return 0, io.EOF
|
||||
}
|
||||
readSize := copy(p, buf)
|
||||
newOffset := currentOffset + int64(readSize)
|
||||
r.offset = newOffset
|
||||
|
||||
return readSize, err
|
||||
}
|
||||
|
||||
// Close will tell the workers to stop
|
||||
func (r *Handle) Close() error {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if r.closed {
|
||||
return errors.New("file already closed")
|
||||
}
|
||||
|
||||
close(r.preloadQueue)
|
||||
r.closed = true
|
||||
// wait for workers to complete their jobs before returning
|
||||
waitCount := 3
|
||||
for i := 0; i < len(r.workers); i++ {
|
||||
waitIdx := 0
|
||||
for r.workers[i].isRunning() && waitIdx < waitCount {
|
||||
time.Sleep(time.Second)
|
||||
waitIdx++
|
||||
}
|
||||
}
|
||||
r.memory.db.Flush()
|
||||
|
||||
fs.Debugf(r, "cache reader closed %v", r.offset)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Seek will move the current offset based on whence and instruct the workers to move there too
|
||||
func (r *Handle) Seek(offset int64, whence int) (int64, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
var err error
|
||||
switch whence {
|
||||
case io.SeekStart:
|
||||
fs.Debugf(r, "moving offset set from %v to %v", r.offset, offset)
|
||||
r.offset = offset
|
||||
case io.SeekCurrent:
|
||||
fs.Debugf(r, "moving offset cur from %v to %v", r.offset, r.offset+offset)
|
||||
r.offset += offset
|
||||
case io.SeekEnd:
|
||||
fs.Debugf(r, "moving offset end (%v) from %v to %v", r.cachedObject.Size(), r.offset, r.cachedObject.Size()+offset)
|
||||
r.offset = r.cachedObject.Size() + offset
|
||||
default:
|
||||
err = errors.Errorf("cache: unimplemented seek whence %v", whence)
|
||||
}
|
||||
|
||||
chunkStart := r.offset - (r.offset % r.cacheFs().chunkSize)
|
||||
if chunkStart >= r.cacheFs().chunkSize {
|
||||
chunkStart = chunkStart - r.cacheFs().chunkSize
|
||||
}
|
||||
r.queueOffset(chunkStart)
|
||||
|
||||
return r.offset, err
|
||||
}
|
||||
|
||||
type worker struct {
|
||||
r *Handle
|
||||
ch <-chan int64
|
||||
rc io.ReadCloser
|
||||
id int
|
||||
running bool
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// String is a representation of this worker
|
||||
func (w *worker) String() string {
|
||||
return fmt.Sprintf("worker-%v <%v>", w.id, w.r.cachedObject.Name)
|
||||
}
|
||||
|
||||
// reader will return a reader depending on the capabilities of the source reader:
|
||||
// - if it supports seeking it will seek to the desired offset and return the same reader
|
||||
// - if it doesn't support seeking it will close a possible existing one and open at the desired offset
|
||||
// - if there's no reader associated with this worker, it will create one
|
||||
func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error) {
|
||||
var err error
|
||||
r := w.rc
|
||||
if w.rc == nil {
|
||||
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||
return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
if !closeOpen {
|
||||
if do, ok := r.(fs.RangeSeeker); ok {
|
||||
_, err = do.RangeSeek(offset, io.SeekStart, end-offset)
|
||||
return r, err
|
||||
} else if do, ok := r.(io.Seeker); ok {
|
||||
_, err = do.Seek(offset, io.SeekStart)
|
||||
return r, err
|
||||
}
|
||||
}
|
||||
|
||||
_ = w.rc.Close()
|
||||
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||
r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return r, nil
|
||||
})
|
||||
}
|
||||
|
||||
func (w *worker) isRunning() bool {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
return w.running
|
||||
}
|
||||
|
||||
func (w *worker) setRunning(f bool) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.running = f
|
||||
}
|
||||
|
||||
// run is the main loop for the worker which receives offsets to preload
|
||||
func (w *worker) run() {
|
||||
var err error
|
||||
var data []byte
|
||||
defer w.setRunning(false)
|
||||
defer func() {
|
||||
if w.rc != nil {
|
||||
_ = w.rc.Close()
|
||||
w.setRunning(false)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
chunkStart, open := <-w.ch
|
||||
w.setRunning(true)
|
||||
if chunkStart < 0 || !open {
|
||||
break
|
||||
}
|
||||
|
||||
// skip if it exists
|
||||
if w.r.UseMemory {
|
||||
if w.r.memory.HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
|
||||
// add it in ram if it's in the persistent storage
|
||||
data, err = w.r.storage().GetChunk(w.r.cachedObject, chunkStart)
|
||||
if err == nil {
|
||||
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if w.r.storage().HasChunk(w.r.cachedObject, chunkStart) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
chunkEnd := chunkStart + w.r.cacheFs().chunkSize
|
||||
// TODO: Remove this comment if it proves to be reliable for #1896
|
||||
//if chunkEnd > w.r.cachedObject.Size() {
|
||||
// chunkEnd = w.r.cachedObject.Size()
|
||||
//}
|
||||
|
||||
w.download(chunkStart, chunkEnd, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||
var err error
|
||||
var data []byte
|
||||
|
||||
// stop retries
|
||||
if retry >= w.r.cacheFs().readRetries {
|
||||
return
|
||||
}
|
||||
// back-off between retries
|
||||
if retry > 0 {
|
||||
time.Sleep(time.Second * time.Duration(retry))
|
||||
}
|
||||
|
||||
closeOpen := false
|
||||
if retry > 0 {
|
||||
closeOpen = true
|
||||
}
|
||||
w.rc, err = w.reader(chunkStart, chunkEnd, closeOpen)
|
||||
// we seem to be getting only errors so we abort
|
||||
if err != nil {
|
||||
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
|
||||
err = w.r.cachedObject.refreshFromSource(true)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "%v", err)
|
||||
}
|
||||
w.download(chunkStart, chunkEnd, retry+1)
|
||||
return
|
||||
}
|
||||
|
||||
data = make([]byte, chunkEnd-chunkStart)
|
||||
var sourceRead int
|
||||
sourceRead, err = io.ReadFull(w.rc, data)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
||||
err = w.r.cachedObject.refreshFromSource(true)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "%v", err)
|
||||
}
|
||||
w.download(chunkStart, chunkEnd, retry+1)
|
||||
return
|
||||
}
|
||||
data = data[:sourceRead] // reslice to remove extra garbage
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
fs.Debugf(w, "partial downloaded chunk %v", fs.SizeSuffix(chunkStart))
|
||||
} else {
|
||||
fs.Debugf(w, "downloaded chunk %v", chunkStart)
|
||||
}
|
||||
|
||||
if w.r.UseMemory {
|
||||
err = w.r.memory.AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "failed caching chunk in ram %v: %v", chunkStart, err)
|
||||
}
|
||||
}
|
||||
|
||||
err = w.r.storage().AddChunk(w.r.cachedObject.abs(), data, chunkStart)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "failed caching chunk in storage %v: %v", chunkStart, err)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// BackgroundUploadStarted is a state for a temp file that has started upload
|
||||
BackgroundUploadStarted = iota
|
||||
// BackgroundUploadCompleted is a state for a temp file that has completed upload
|
||||
BackgroundUploadCompleted
|
||||
// BackgroundUploadError is a state for a temp file that has an error upload
|
||||
BackgroundUploadError
|
||||
)
|
||||
|
||||
// BackgroundUploadState is an entity that maps to an existing file which is stored on the temp fs
|
||||
type BackgroundUploadState struct {
|
||||
Remote string
|
||||
Status int
|
||||
Error error
|
||||
}
|
||||
|
||||
type backgroundWriter struct {
|
||||
fs *Fs
|
||||
stateCh chan int
|
||||
running bool
|
||||
notifyCh chan BackgroundUploadState
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func newBackgroundWriter(f *Fs) *backgroundWriter {
|
||||
b := &backgroundWriter{
|
||||
fs: f,
|
||||
stateCh: make(chan int),
|
||||
notifyCh: make(chan BackgroundUploadState),
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) close() {
|
||||
b.stateCh <- 2
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.running = false
|
||||
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) pause() {
|
||||
b.stateCh <- 1
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) play() {
|
||||
b.stateCh <- 0
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) isRunning() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.running
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) notify(remote string, status int, err error) {
|
||||
state := BackgroundUploadState{
|
||||
Remote: remote,
|
||||
Status: status,
|
||||
Error: err,
|
||||
}
|
||||
select {
|
||||
case b.notifyCh <- state:
|
||||
fs.Debugf(remote, "notified background upload state: %v", state.Status)
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (b *backgroundWriter) run() {
|
||||
state := 0
|
||||
for {
|
||||
b.mu.Lock()
|
||||
b.running = true
|
||||
b.mu.Unlock()
|
||||
select {
|
||||
case s := <-b.stateCh:
|
||||
state = s
|
||||
default:
|
||||
//
|
||||
}
|
||||
switch state {
|
||||
case 1:
|
||||
runtime.Gosched()
|
||||
time.Sleep(time.Millisecond * 500)
|
||||
continue
|
||||
case 2:
|
||||
return
|
||||
}
|
||||
|
||||
absPath, err := b.fs.cache.getPendingUpload(b.fs.Root(), b.fs.tempWriteWait)
|
||||
if err != nil || absPath == "" || !b.fs.isRootInPath(absPath) {
|
||||
time.Sleep(time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
remote := b.fs.cleanRootFromPath(absPath)
|
||||
b.notify(remote, BackgroundUploadStarted, nil)
|
||||
fs.Infof(remote, "background upload: started upload")
|
||||
err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
|
||||
if err != nil {
|
||||
b.notify(remote, BackgroundUploadError, err)
|
||||
_ = b.fs.cache.rollbackPendingUpload(absPath)
|
||||
fs.Errorf(remote, "background upload: %v", err)
|
||||
continue
|
||||
}
|
||||
// clean empty dirs up to root
|
||||
thisDir := cleanPath(path.Dir(remote))
|
||||
for thisDir != "" {
|
||||
thisList, err := b.fs.tempFs.List(thisDir)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(thisList) > 0 {
|
||||
break
|
||||
}
|
||||
err = b.fs.tempFs.Rmdir(thisDir)
|
||||
fs.Debugf(thisDir, "cleaned from temp path")
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
thisDir = cleanPath(path.Dir(thisDir))
|
||||
}
|
||||
fs.Infof(remote, "background upload: uploaded entry")
|
||||
err = b.fs.cache.removePendingUpload(absPath)
|
||||
if err != nil && !strings.Contains(err.Error(), "pending upload not found") {
|
||||
fs.Errorf(remote, "background upload: %v", err)
|
||||
}
|
||||
parentCd := NewDirectory(b.fs, cleanPath(path.Dir(remote)))
|
||||
err = b.fs.cache.ExpireDir(parentCd)
|
||||
if err != nil {
|
||||
fs.Errorf(parentCd, "background upload: cache expire error: %v", err)
|
||||
}
|
||||
b.fs.notifyChangeUpstream(remote, fs.EntryObject)
|
||||
fs.Infof(remote, "finished background upload")
|
||||
b.notify(remote, BackgroundUploadCompleted, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ io.ReadCloser = (*Handle)(nil)
|
||||
_ io.Seeker = (*Handle)(nil)
|
||||
)
|
||||
358
backend/cache/object.go
vendored
Normal file
358
backend/cache/object.go
vendored
Normal file
@@ -0,0 +1,358 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"io"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
objectInCache = "Object"
|
||||
objectPendingUpload = "TempObject"
|
||||
)
|
||||
|
||||
// Object is a generic file like object that stores basic information about it
|
||||
type Object struct {
|
||||
fs.Object `json:"-"`
|
||||
|
||||
ParentFs fs.Fs `json:"-"` // parent fs
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
Dir string `json:"dir"` // abs path of the object
|
||||
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
|
||||
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
|
||||
CacheStorable bool `json:"storable"` // says whether this object can be stored
|
||||
CacheType string `json:"cacheType"`
|
||||
CacheTs time.Time `json:"cacheTs"`
|
||||
CacheHashes map[hash.Type]string // all supported hashes cached
|
||||
|
||||
refreshMutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewObject builds one from a generic fs.Object
|
||||
func NewObject(f *Fs, remote string) *Object {
|
||||
fullRemote := path.Join(f.Root(), remote)
|
||||
dir, name := path.Split(fullRemote)
|
||||
|
||||
cacheType := objectInCache
|
||||
parentFs := f.UnWrap()
|
||||
if f.tempWritePath != "" {
|
||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||
if err == nil { // queued for upload
|
||||
cacheType = objectPendingUpload
|
||||
parentFs = f.tempFs
|
||||
fs.Debugf(fullRemote, "pending upload found")
|
||||
}
|
||||
}
|
||||
|
||||
co := &Object{
|
||||
ParentFs: parentFs,
|
||||
CacheFs: f,
|
||||
Name: cleanPath(name),
|
||||
Dir: cleanPath(dir),
|
||||
CacheModTime: time.Now().UnixNano(),
|
||||
CacheSize: 0,
|
||||
CacheStorable: false,
|
||||
CacheType: cacheType,
|
||||
CacheTs: time.Now(),
|
||||
}
|
||||
return co
|
||||
}
|
||||
|
||||
// ObjectFromOriginal builds one from a generic fs.Object
|
||||
func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
||||
var co *Object
|
||||
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
|
||||
dir, name := path.Split(fullRemote)
|
||||
|
||||
cacheType := objectInCache
|
||||
parentFs := f.UnWrap()
|
||||
if f.tempWritePath != "" {
|
||||
_, err := f.cache.SearchPendingUpload(fullRemote)
|
||||
if err == nil { // queued for upload
|
||||
cacheType = objectPendingUpload
|
||||
parentFs = f.tempFs
|
||||
fs.Debugf(fullRemote, "pending upload found")
|
||||
}
|
||||
}
|
||||
|
||||
co = &Object{
|
||||
ParentFs: parentFs,
|
||||
CacheFs: f,
|
||||
Name: cleanPath(name),
|
||||
Dir: cleanPath(dir),
|
||||
CacheType: cacheType,
|
||||
CacheTs: time.Now(),
|
||||
}
|
||||
co.updateData(o)
|
||||
return co
|
||||
}
|
||||
|
||||
func (o *Object) updateData(source fs.Object) {
|
||||
o.Object = source
|
||||
o.CacheModTime = source.ModTime().UnixNano()
|
||||
o.CacheSize = source.Size()
|
||||
o.CacheStorable = source.Storable()
|
||||
o.CacheTs = time.Now()
|
||||
o.CacheHashes = make(map[hash.Type]string)
|
||||
}
|
||||
|
||||
// Fs returns its FS info
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.CacheFs
|
||||
}
|
||||
|
||||
// String returns a human friendly name for this object
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.Remote()
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
p := path.Join(o.Dir, o.Name)
|
||||
return o.CacheFs.cleanRootFromPath(p)
|
||||
}
|
||||
|
||||
// abs returns the absolute path to the object
|
||||
func (o *Object) abs() string {
|
||||
return path.Join(o.Dir, o.Name)
|
||||
}
|
||||
|
||||
// ModTime returns the cached ModTime
|
||||
func (o *Object) ModTime() time.Time {
|
||||
_ = o.refresh()
|
||||
return time.Unix(0, o.CacheModTime)
|
||||
}
|
||||
|
||||
// Size returns the cached Size
|
||||
func (o *Object) Size() int64 {
|
||||
_ = o.refresh()
|
||||
return o.CacheSize
|
||||
}
|
||||
|
||||
// Storable returns the cached Storable
|
||||
func (o *Object) Storable() bool {
|
||||
_ = o.refresh()
|
||||
return o.CacheStorable
|
||||
}
|
||||
|
||||
// refresh will check if the object info is expired and request the info from source if it is
|
||||
// all these conditions must be true to ignore a refresh
|
||||
// 1. cache ts didn't expire yet
|
||||
// 2. is not pending a notification from the wrapped fs
|
||||
func (o *Object) refresh() error {
|
||||
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
||||
isExpired := time.Now().After(o.CacheTs.Add(o.CacheFs.fileAge))
|
||||
if !isExpired && !isNotified {
|
||||
return nil
|
||||
}
|
||||
|
||||
return o.refreshFromSource(true)
|
||||
}
|
||||
|
||||
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
|
||||
func (o *Object) refreshFromSource(force bool) error {
|
||||
o.refreshMutex.Lock()
|
||||
defer o.refreshMutex.Unlock()
|
||||
var err error
|
||||
var liveObject fs.Object
|
||||
|
||||
if o.Object != nil && !force {
|
||||
return nil
|
||||
}
|
||||
if o.isTempFile() {
|
||||
liveObject, err = o.ParentFs.NewObject(o.Remote())
|
||||
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
||||
} else {
|
||||
liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
|
||||
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
||||
}
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error refreshing object in : %v", err)
|
||||
return err
|
||||
}
|
||||
o.updateData(liveObject)
|
||||
o.persist()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModTime sets the ModTime of this object
|
||||
func (o *Object) SetModTime(t time.Time) error {
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := o.Object.SetModTime(t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.CacheModTime = t.UnixNano()
|
||||
o.persist()
|
||||
fs.Debugf(o, "updated ModTime: %v", t)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Open is used to request a specific part of the file using fs.RangeOption
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
if err := o.refreshFromSource(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var err error
|
||||
cacheReader := NewObjectHandle(o, o.CacheFs)
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
}
|
||||
_, err = cacheReader.Seek(offset, io.SeekStart)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return readers.NewLimitedReadCloser(cacheReader, limit), nil
|
||||
}
|
||||
|
||||
// Update will change the object data
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
if o.CacheFs.tempWritePath != "" {
|
||||
o.CacheFs.backgroundRunner.pause()
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||
return errors.Errorf("%v is currently uploading, can't update", o)
|
||||
}
|
||||
}
|
||||
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
||||
|
||||
// FIXME use reliable upload
|
||||
err := o.Object.Update(in, src, options...)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error updating source: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// deleting cached chunks and info to be replaced with new ones
|
||||
_ = o.CacheFs.cache.RemoveObject(o.abs())
|
||||
// advertise to ChangeNotify if wrapped doesn't do that
|
||||
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
|
||||
|
||||
o.CacheModTime = src.ModTime().UnixNano()
|
||||
o.CacheSize = src.Size()
|
||||
o.CacheHashes = make(map[hash.Type]string)
|
||||
o.CacheTs = time.Now()
|
||||
o.persist()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove deletes the object from both the cache and the source
|
||||
func (o *Object) Remove() error {
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
if o.CacheFs.tempWritePath != "" {
|
||||
o.CacheFs.backgroundRunner.pause()
|
||||
defer o.CacheFs.backgroundRunner.play()
|
||||
// don't allow started uploads
|
||||
if o.isTempFile() && o.tempFileStartedUpload() {
|
||||
return errors.Errorf("%v is currently uploading, can't delete", o)
|
||||
}
|
||||
}
|
||||
err := o.Object.Remove()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fs.Debugf(o, "removing object")
|
||||
_ = o.CacheFs.cache.RemoveObject(o.abs())
|
||||
_ = o.CacheFs.cache.removePendingUpload(o.abs())
|
||||
parentCd := NewDirectory(o.CacheFs, cleanPath(path.Dir(o.Remote())))
|
||||
_ = o.CacheFs.cache.ExpireDir(parentCd)
|
||||
// advertise to ChangeNotify if wrapped doesn't do that
|
||||
o.CacheFs.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hash requests a hash of the object and stores in the cache
|
||||
// since it might or might not be called, this is lazy loaded
|
||||
func (o *Object) Hash(ht hash.Type) (string, error) {
|
||||
_ = o.refresh()
|
||||
if o.CacheHashes == nil {
|
||||
o.CacheHashes = make(map[hash.Type]string)
|
||||
}
|
||||
|
||||
cachedHash, found := o.CacheHashes[ht]
|
||||
if found {
|
||||
return cachedHash, nil
|
||||
}
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
return "", err
|
||||
}
|
||||
liveHash, err := o.Object.Hash(ht)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
o.CacheHashes[ht] = liveHash
|
||||
|
||||
o.persist()
|
||||
fs.Debugf(o, "object hash cached: %v", liveHash)
|
||||
|
||||
return liveHash, nil
|
||||
}
|
||||
|
||||
// persist adds this object to the persistent cache
|
||||
func (o *Object) persist() *Object {
|
||||
err := o.CacheFs.cache.AddObject(o)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "failed to cache object: %v", err)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
||||
func (o *Object) isTempFile() bool {
|
||||
_, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
|
||||
if err != nil {
|
||||
o.CacheType = objectInCache
|
||||
return false
|
||||
}
|
||||
|
||||
o.CacheType = objectPendingUpload
|
||||
return true
|
||||
}
|
||||
|
||||
func (o *Object) tempFileStartedUpload() bool {
|
||||
started, err := o.CacheFs.cache.SearchPendingUpload(o.abs())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return started
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
282
backend/cache/plex.go
vendored
Normal file
282
backend/cache/plex.go
vendored
Normal file
@@ -0,0 +1,282 @@
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"sync"
|
||||
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/patrickmn/go-cache"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
const (
|
||||
// defPlexLoginURL is the default URL for Plex login
|
||||
defPlexLoginURL = "https://plex.tv/users/sign_in.json"
|
||||
defPlexNotificationURL = "%s/:/websockets/notifications?X-Plex-Token=%s"
|
||||
)
|
||||
|
||||
// PlaySessionStateNotification is part of the API response of Plex
|
||||
type PlaySessionStateNotification struct {
|
||||
SessionKey string `json:"sessionKey"`
|
||||
GUID string `json:"guid"`
|
||||
Key string `json:"key"`
|
||||
ViewOffset int64 `json:"viewOffset"`
|
||||
State string `json:"state"`
|
||||
TranscodeSession string `json:"transcodeSession"`
|
||||
}
|
||||
|
||||
// NotificationContainer is part of the API response of Plex
|
||||
type NotificationContainer struct {
|
||||
Type string `json:"type"`
|
||||
Size int `json:"size"`
|
||||
PlaySessionState []PlaySessionStateNotification `json:"PlaySessionStateNotification"`
|
||||
}
|
||||
|
||||
// PlexNotification is part of the API response of Plex
|
||||
type PlexNotification struct {
|
||||
Container NotificationContainer `json:"NotificationContainer"`
|
||||
}
|
||||
|
||||
// plexConnector is managing the cache integration with Plex
|
||||
type plexConnector struct {
|
||||
url *url.URL
|
||||
username string
|
||||
password string
|
||||
token string
|
||||
f *Fs
|
||||
mu sync.Mutex
|
||||
running bool
|
||||
runningMu sync.Mutex
|
||||
stateCache *cache.Cache
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnector(f *Fs, plexURL, username, password string) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := &plexConnector{
|
||||
f: f,
|
||||
url: u,
|
||||
username: username,
|
||||
password: password,
|
||||
token: "",
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
}
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
// newPlexConnector connects to a Plex server and generates a token
|
||||
func newPlexConnectorWithToken(f *Fs, plexURL, token string) (*plexConnector, error) {
|
||||
u, err := url.ParseRequestURI(strings.TrimRight(plexURL, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := &plexConnector{
|
||||
f: f,
|
||||
url: u,
|
||||
token: token,
|
||||
stateCache: cache.New(time.Hour, time.Minute),
|
||||
}
|
||||
pc.listenWebsocket()
|
||||
|
||||
return pc, nil
|
||||
}
|
||||
|
||||
func (p *plexConnector) closeWebsocket() {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
fs.Infof("plex", "stopped Plex watcher")
|
||||
p.running = false
|
||||
}
|
||||
|
||||
func (p *plexConnector) listenWebsocket() {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
|
||||
u := strings.Replace(p.url.String(), "http://", "ws://", 1)
|
||||
u = strings.Replace(u, "https://", "wss://", 1)
|
||||
conn, err := websocket.Dial(fmt.Sprintf(defPlexNotificationURL, strings.TrimRight(u, "/"), p.token),
|
||||
"", "http://localhost")
|
||||
if err != nil {
|
||||
fs.Errorf("plex", "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
p.running = true
|
||||
go func() {
|
||||
for {
|
||||
if !p.isConnected() {
|
||||
break
|
||||
}
|
||||
|
||||
notif := &PlexNotification{}
|
||||
err := websocket.JSON.Receive(conn, notif)
|
||||
if err != nil {
|
||||
fs.Debugf("plex", "%v", err)
|
||||
p.closeWebsocket()
|
||||
break
|
||||
}
|
||||
// we're only interested in play events
|
||||
if notif.Container.Type == "playing" {
|
||||
// we loop through each of them
|
||||
for _, v := range notif.Container.PlaySessionState {
|
||||
// event type of playing
|
||||
if v.State == "playing" {
|
||||
// if it's not cached get the details and cache them
|
||||
if _, found := p.stateCache.Get(v.Key); !found {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("%s%s", p.url.String(), v.Key), nil)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
var data []byte
|
||||
data, err = ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
p.stateCache.Set(v.Key, data, cache.DefaultExpiration)
|
||||
}
|
||||
} else if v.State == "stopped" {
|
||||
p.stateCache.Delete(v.Key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// fillDefaultHeaders will add common headers to requests
|
||||
func (p *plexConnector) fillDefaultHeaders(req *http.Request) {
|
||||
req.Header.Add("X-Plex-Client-Identifier", fmt.Sprintf("rclone (%v)", p.f.String()))
|
||||
req.Header.Add("X-Plex-Product", fmt.Sprintf("rclone (%v)", p.f.Name()))
|
||||
req.Header.Add("X-Plex-Version", fs.Version)
|
||||
req.Header.Add("Accept", "application/json")
|
||||
if p.token != "" {
|
||||
req.Header.Add("X-Plex-Token", p.token)
|
||||
}
|
||||
}
|
||||
|
||||
// authenticate will generate a token based on a username/password
|
||||
func (p *plexConnector) authenticate() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
form := url.Values{}
|
||||
form.Set("user[login]", p.username)
|
||||
form.Add("user[password]", p.password)
|
||||
req, err := http.NewRequest("POST", defPlexLoginURL, strings.NewReader(form.Encode()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.fillDefaultHeaders(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var data map[string]interface{}
|
||||
err = json.NewDecoder(resp.Body).Decode(&data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to obtain token: %v", err)
|
||||
}
|
||||
tokenGen, ok := get(data, "user", "authToken")
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to obtain token: %v", data)
|
||||
}
|
||||
token, ok := tokenGen.(string)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to obtain token: %v", data)
|
||||
}
|
||||
p.token = token
|
||||
if p.token != "" {
|
||||
config.FileSet(p.f.Name(), "plex_token", p.token)
|
||||
config.SaveConfig()
|
||||
fs.Infof(p.f.Name(), "Connected to Plex server: %v", p.url.String())
|
||||
}
|
||||
p.listenWebsocket()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isConnected checks if this rclone is authenticated to Plex
|
||||
func (p *plexConnector) isConnected() bool {
|
||||
p.runningMu.Lock()
|
||||
defer p.runningMu.Unlock()
|
||||
return p.running
|
||||
}
|
||||
|
||||
// isConfigured checks if this rclone is configured to use a Plex server
|
||||
func (p *plexConnector) isConfigured() bool {
|
||||
return p.url != nil
|
||||
}
|
||||
|
||||
func (p *plexConnector) isPlaying(co *Object) bool {
|
||||
var err error
|
||||
if !p.isConnected() {
|
||||
p.listenWebsocket()
|
||||
}
|
||||
|
||||
remote := co.Remote()
|
||||
if cr, yes := p.f.isWrappedByCrypt(); yes {
|
||||
remote, err = cr.DecryptFileName(co.Remote())
|
||||
if err != nil {
|
||||
fs.Debugf("plex", "can not decrypt wrapped file: %v", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
isPlaying := false
|
||||
for _, v := range p.stateCache.Items() {
|
||||
if bytes.Contains(v.Object.([]byte), []byte(remote)) {
|
||||
isPlaying = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return isPlaying
|
||||
}
|
||||
|
||||
// adapted from: https://stackoverflow.com/a/28878037 (credit)
|
||||
func get(m interface{}, path ...interface{}) (interface{}, bool) {
|
||||
for _, p := range path {
|
||||
switch idx := p.(type) {
|
||||
case string:
|
||||
if mm, ok := m.(map[string]interface{}); ok {
|
||||
if val, found := mm[idx]; found {
|
||||
m = val
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
case int:
|
||||
if mm, ok := m.([]interface{}); ok {
|
||||
if len(mm) > idx {
|
||||
m = mm[idx]
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
return m, true
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,go1.7
|
||||
// +build !plan9
|
||||
|
||||
package cache
|
||||
|
||||
@@ -14,8 +14,6 @@ import (
|
||||
|
||||
// Memory is a wrapper of transient storage for a go-cache store
|
||||
type Memory struct {
|
||||
ChunkStorage
|
||||
|
||||
db *cache.Cache
|
||||
}
|
||||
|
||||
1099
backend/cache/storage_persistent.go
vendored
Normal file
1099
backend/cache/storage_persistent.go
vendored
Normal file
File diff suppressed because it is too large
Load Diff
@@ -13,7 +13,9 @@ import (
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/crypt/pkcs7"
|
||||
"github.com/ncw/rclone/backend/crypt/pkcs7"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
@@ -63,10 +65,11 @@ type ReadSeekCloser interface {
|
||||
io.Reader
|
||||
io.Seeker
|
||||
io.Closer
|
||||
fs.RangeSeeker
|
||||
}
|
||||
|
||||
// OpenAtOffset opens the file handle at the offset given
|
||||
type OpenAtOffset func(offset int64) (io.ReadCloser, error)
|
||||
// OpenRangeSeek opens the file handle at the offset with the limit given
|
||||
type OpenRangeSeek func(offset, limit int64) (io.ReadCloser, error)
|
||||
|
||||
// Cipher is used to swap out the encryption implementations
|
||||
type Cipher interface {
|
||||
@@ -83,11 +86,13 @@ type Cipher interface {
|
||||
// DecryptData
|
||||
DecryptData(io.ReadCloser) (io.ReadCloser, error)
|
||||
// DecryptDataSeek decrypt at a given position
|
||||
DecryptDataSeek(open OpenAtOffset, offset int64) (ReadSeekCloser, error)
|
||||
DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
|
||||
// EncryptedSize calculates the size of the data when encrypted
|
||||
EncryptedSize(int64) int64
|
||||
// DecryptedSize calculates the size of the data when decrypted
|
||||
DecryptedSize(int64) (int64, error)
|
||||
// NameEncryptionMode returns the used mode for name handling
|
||||
NameEncryptionMode() NameEncryptionMode
|
||||
}
|
||||
|
||||
// NameEncryptionMode is the type of file name encryption in use
|
||||
@@ -544,6 +549,10 @@ func (c *cipher) DecryptDirName(in string) (string, error) {
|
||||
return c.decryptFileName(in)
|
||||
}
|
||||
|
||||
func (c *cipher) NameEncryptionMode() NameEncryptionMode {
|
||||
return c.mode
|
||||
}
|
||||
|
||||
// nonce is an NACL secretbox nonce
|
||||
type nonce [fileNonceSize]byte
|
||||
|
||||
@@ -691,11 +700,12 @@ func (fh *encrypter) finish(err error) (int, error) {
|
||||
|
||||
// Encrypt data encrypts the data stream
|
||||
func (c *cipher) EncryptData(in io.Reader) (io.Reader, error) {
|
||||
in, wrap := accounting.UnWrap(in) // unwrap the accounting off the Reader
|
||||
out, err := c.newEncrypter(in, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
return wrap(out), nil // and wrap the accounting back on
|
||||
}
|
||||
|
||||
// decrypter decrypts an io.ReaderCloser on the fly
|
||||
@@ -710,7 +720,8 @@ type decrypter struct {
|
||||
bufIndex int
|
||||
bufSize int
|
||||
err error
|
||||
open OpenAtOffset
|
||||
limit int64 // limit of bytes to read, -1 for unlimited
|
||||
open OpenRangeSeek
|
||||
}
|
||||
|
||||
// newDecrypter creates a new file handle decrypting on the fly
|
||||
@@ -720,6 +731,7 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
c: c,
|
||||
buf: c.getBlock(),
|
||||
readBuf: c.getBlock(),
|
||||
limit: -1,
|
||||
}
|
||||
// Read file header (magic + nonce)
|
||||
readBuf := fh.readBuf[:fileHeaderSize]
|
||||
@@ -741,9 +753,24 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
}
|
||||
|
||||
// newDecrypterSeek creates a new file handle decrypting on the fly
|
||||
func (c *cipher) newDecrypterSeek(open OpenAtOffset, offset int64) (fh *decrypter, err error) {
|
||||
func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
||||
var rc io.ReadCloser
|
||||
doRangeSeek := false
|
||||
setLimit := false
|
||||
// Open initially with no seek
|
||||
rc, err := open(0)
|
||||
if offset == 0 && limit < 0 {
|
||||
// If no offset or limit then open whole file
|
||||
rc, err = open(0, -1)
|
||||
} else if offset == 0 {
|
||||
// If no offset open the header + limit worth of the file
|
||||
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
|
||||
rc, err = open(0, int64(fileHeaderSize)+underlyingLimit)
|
||||
setLimit = true
|
||||
} else {
|
||||
// Otherwise just read the header to start with
|
||||
rc, err = open(0, int64(fileHeaderSize))
|
||||
doRangeSeek = true
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -752,14 +779,17 @@ func (c *cipher) newDecrypterSeek(open OpenAtOffset, offset int64) (fh *decrypte
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fh.open = open // will be called by fh.Seek
|
||||
if offset != 0 {
|
||||
_, err = fh.Seek(offset, 0)
|
||||
fh.open = open // will be called by fh.RangeSeek
|
||||
if doRangeSeek {
|
||||
_, err = fh.RangeSeek(offset, io.SeekStart, limit)
|
||||
if err != nil {
|
||||
_ = fh.Close()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if setLimit {
|
||||
fh.limit = limit
|
||||
}
|
||||
return fh, nil
|
||||
}
|
||||
|
||||
@@ -812,20 +842,73 @@ func (fh *decrypter) Read(p []byte) (n int, err error) {
|
||||
return 0, fh.finish(err)
|
||||
}
|
||||
}
|
||||
n = copy(p, fh.buf[fh.bufIndex:fh.bufSize])
|
||||
toCopy := fh.bufSize - fh.bufIndex
|
||||
if fh.limit >= 0 && fh.limit < int64(toCopy) {
|
||||
toCopy = int(fh.limit)
|
||||
}
|
||||
n = copy(p, fh.buf[fh.bufIndex:fh.bufIndex+toCopy])
|
||||
fh.bufIndex += n
|
||||
if fh.limit >= 0 {
|
||||
fh.limit -= int64(n)
|
||||
if fh.limit == 0 {
|
||||
return n, fh.finish(io.EOF)
|
||||
}
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Seek as per io.Seeker
|
||||
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
|
||||
// calculateUnderlying converts an (offset, limit) in a crypted file
|
||||
// into an (underlyingOffset, underlyingLimit) for the underlying
|
||||
// file.
|
||||
//
|
||||
// It also returns number of bytes to discard after reading the first
|
||||
// block and number of blocks this is from the start so the nonce can
|
||||
// be incremented.
|
||||
func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit, discard, blocks int64) {
|
||||
// blocks we need to seek, plus bytes we need to discard
|
||||
blocks, discard = offset/blockDataSize, offset%blockDataSize
|
||||
|
||||
// Offset in underlying stream we need to seek
|
||||
underlyingOffset = int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize)
|
||||
|
||||
// work out how many blocks we need to read
|
||||
underlyingLimit = int64(-1)
|
||||
if limit >= 0 {
|
||||
// bytes to read beyond the first block
|
||||
bytesToRead := limit - (blockDataSize - discard)
|
||||
|
||||
// Read the first block
|
||||
blocksToRead := int64(1)
|
||||
|
||||
if bytesToRead > 0 {
|
||||
// Blocks that need to be read plus left over blocks
|
||||
extraBlocksToRead, endBytes := bytesToRead/blockDataSize, bytesToRead%blockDataSize
|
||||
if endBytes != 0 {
|
||||
// If left over bytes must read another block
|
||||
extraBlocksToRead++
|
||||
}
|
||||
blocksToRead += extraBlocksToRead
|
||||
}
|
||||
|
||||
// Must read a whole number of blocks
|
||||
underlyingLimit = blocksToRead * (blockHeaderSize + blockDataSize)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// RangeSeek behaves like a call to Seek(offset int64, whence
|
||||
// int) with the output wrapped in an io.LimitedReader
|
||||
// limiting the total length to limit.
|
||||
//
|
||||
// RangeSeek with a limit of < 0 is equivalent to a regular Seek.
|
||||
func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
|
||||
if fh.open == nil {
|
||||
return 0, fh.finish(errors.New("can't seek - not initialised with newDecrypterSeek"))
|
||||
}
|
||||
if whence != 0 {
|
||||
if whence != io.SeekStart {
|
||||
return 0, fh.finish(errors.New("can only seek from the start"))
|
||||
}
|
||||
|
||||
@@ -836,20 +919,16 @@ func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
|
||||
return 0, fh.err
|
||||
}
|
||||
|
||||
// blocks we need to seek, plus bytes we need to discard
|
||||
blocks, discard := offset/blockDataSize, offset%blockDataSize
|
||||
|
||||
// Offset in underlying stream we need to seek
|
||||
underlyingOffset := int64(fileHeaderSize) + blocks*(blockHeaderSize+blockDataSize)
|
||||
underlyingOffset, underlyingLimit, discard, blocks := calculateUnderlying(offset, limit)
|
||||
|
||||
// Move the nonce on the correct number of blocks from the start
|
||||
fh.nonce = fh.initialNonce
|
||||
fh.nonce.add(uint64(blocks))
|
||||
|
||||
// Can we seek underlying stream directly?
|
||||
if do, ok := fh.rc.(io.Seeker); ok {
|
||||
if do, ok := fh.rc.(fs.RangeSeeker); ok {
|
||||
// Seek underlying stream directly
|
||||
_, err := do.Seek(underlyingOffset, 0)
|
||||
_, err := do.RangeSeek(underlyingOffset, 0, underlyingLimit)
|
||||
if err != nil {
|
||||
return 0, fh.finish(err)
|
||||
}
|
||||
@@ -859,9 +938,9 @@ func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
|
||||
fh.rc = nil
|
||||
|
||||
// Re-open the underlying object with the offset given
|
||||
rc, err := fh.open(underlyingOffset)
|
||||
rc, err := fh.open(underlyingOffset, underlyingLimit)
|
||||
if err != nil {
|
||||
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset"))
|
||||
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
||||
}
|
||||
|
||||
// Set the file handle
|
||||
@@ -880,9 +959,17 @@ func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
|
||||
}
|
||||
fh.bufIndex = int(discard)
|
||||
|
||||
// Set the limit
|
||||
fh.limit = limit
|
||||
|
||||
return offset, nil
|
||||
}
|
||||
|
||||
// Seek implements the io.Seeker interface
|
||||
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
|
||||
return fh.RangeSeek(offset, whence, -1)
|
||||
}
|
||||
|
||||
// finish sets the final error and tidies up
|
||||
func (fh *decrypter) finish(err error) error {
|
||||
if fh.err != nil {
|
||||
@@ -954,8 +1041,8 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
// The open function must return a ReadCloser opened to the offset supplied
|
||||
//
|
||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||
func (c *cipher) DecryptDataSeek(open OpenAtOffset, offset int64) (ReadSeekCloser, error) {
|
||||
out, err := c.newDecrypterSeek(open, offset)
|
||||
func (c *cipher) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||
out, err := c.newDecrypterSeek(open, offset, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -992,7 +1079,9 @@ func (c *cipher) DecryptedSize(size int64) (int64, error) {
|
||||
|
||||
// check interfaces
|
||||
var (
|
||||
_ Cipher = (*cipher)(nil)
|
||||
_ io.ReadCloser = (*decrypter)(nil)
|
||||
_ io.Reader = (*encrypter)(nil)
|
||||
_ Cipher = (*cipher)(nil)
|
||||
_ io.ReadCloser = (*decrypter)(nil)
|
||||
_ io.Seeker = (*decrypter)(nil)
|
||||
_ fs.RangeSeeker = (*decrypter)(nil)
|
||||
_ io.Reader = (*encrypter)(nil)
|
||||
)
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/crypt/pkcs7"
|
||||
"github.com/ncw/rclone/backend/crypt/pkcs7"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -935,7 +935,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
assert.Equal(t, int64(16), n)
|
||||
}
|
||||
|
||||
func TestNewDecrypterSeek(t *testing.T) {
|
||||
func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = &zeroes{} // nodge the crypto rand generator
|
||||
@@ -956,42 +956,188 @@ func TestNewDecrypterSeek(t *testing.T) {
|
||||
127, 128, 129, 255, 256, 257, 511, 512, 513, 1023, 1024, 1025, 2047, 2048, 2049,
|
||||
4095, 4096, 4097, 8191, 8192, 8193, 16383, 16384, 16385, 32767, 32768, 32769,
|
||||
65535, 65536, 65537, 131071, 131072, 131073, dataSize - 1, dataSize}
|
||||
limits := []int{-1, 0, 1, 65535, 65536, 65537, 131071, 131072, 131073}
|
||||
|
||||
// Open stream with a seek of underlyingOffset
|
||||
open := func(underlyingOffset int64) (io.ReadCloser, error) {
|
||||
return ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):])), nil
|
||||
var reader io.ReadCloser
|
||||
open := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
end := len(ciphertext)
|
||||
if underlyingLimit >= 0 {
|
||||
end = int(underlyingOffset + underlyingLimit)
|
||||
if end > len(ciphertext) {
|
||||
end = len(ciphertext)
|
||||
}
|
||||
}
|
||||
reader = ioutil.NopCloser(bytes.NewBuffer(ciphertext[int(underlyingOffset):end]))
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
inBlock := make([]byte, 1024)
|
||||
inBlock := make([]byte, dataSize)
|
||||
|
||||
// Check the seek worked by reading a block and checking it
|
||||
// against what it should be
|
||||
check := func(rc ReadSeekCloser, offset int) {
|
||||
check := func(rc io.Reader, offset, limit int) {
|
||||
n, err := io.ReadFull(rc, inBlock)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
seekedDecrypted := inBlock[:n]
|
||||
|
||||
require.Equal(t, plaintext[offset:offset+n], seekedDecrypted)
|
||||
what := fmt.Sprintf("offset = %d, limit = %d", offset, limit)
|
||||
if limit >= 0 {
|
||||
assert.Equal(t, limit, n, what)
|
||||
}
|
||||
require.Equal(t, plaintext[offset:offset+n], seekedDecrypted, what)
|
||||
|
||||
// We should have completely emptied the reader at this point
|
||||
n, err = reader.Read(inBlock)
|
||||
assert.Equal(t, io.EOF, err)
|
||||
assert.Equal(t, 0, n)
|
||||
}
|
||||
|
||||
// Now try decoding it with a open/seek
|
||||
for _, offset := range trials {
|
||||
rc, err := c.DecryptDataSeek(open, int64(offset))
|
||||
assert.NoError(t, err)
|
||||
for _, limit := range limits {
|
||||
if offset+limit > len(plaintext) {
|
||||
continue
|
||||
}
|
||||
rc, err := c.DecryptDataSeek(open, int64(offset), int64(limit))
|
||||
assert.NoError(t, err)
|
||||
|
||||
check(rc, offset)
|
||||
check(rc, offset, limit)
|
||||
}
|
||||
}
|
||||
|
||||
// Now try decoding it with a single open and lots of seeks
|
||||
rc, err := c.DecryptDataSeek(open, 0)
|
||||
// Try decoding it with a single open and lots of seeks
|
||||
fh, err := c.DecryptDataSeek(open, 0, -1)
|
||||
assert.NoError(t, err)
|
||||
for _, offset := range trials {
|
||||
_, err := rc.Seek(int64(offset), 0)
|
||||
assert.NoError(t, err)
|
||||
for _, limit := range limits {
|
||||
if offset+limit > len(plaintext) {
|
||||
continue
|
||||
}
|
||||
_, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit))
|
||||
assert.NoError(t, err)
|
||||
|
||||
check(rc, offset)
|
||||
check(fh, offset, limit)
|
||||
}
|
||||
}
|
||||
|
||||
// Do some checks on the open callback
|
||||
for _, test := range []struct {
|
||||
offset, limit int64
|
||||
wantOffset, wantLimit int64
|
||||
}{
|
||||
// unlimited
|
||||
{0, -1, int64(fileHeaderSize), -1},
|
||||
{1, -1, int64(fileHeaderSize), -1},
|
||||
{blockDataSize - 1, -1, int64(fileHeaderSize), -1},
|
||||
{blockDataSize, -1, int64(fileHeaderSize) + blockSize, -1},
|
||||
{blockDataSize + 1, -1, int64(fileHeaderSize) + blockSize, -1},
|
||||
// limit=1
|
||||
{0, 1, int64(fileHeaderSize), blockSize},
|
||||
{1, 1, int64(fileHeaderSize), blockSize},
|
||||
{blockDataSize - 1, 1, int64(fileHeaderSize), blockSize},
|
||||
{blockDataSize, 1, int64(fileHeaderSize) + blockSize, blockSize},
|
||||
{blockDataSize + 1, 1, int64(fileHeaderSize) + blockSize, blockSize},
|
||||
// limit=100
|
||||
{0, 100, int64(fileHeaderSize), blockSize},
|
||||
{1, 100, int64(fileHeaderSize), blockSize},
|
||||
{blockDataSize - 1, 100, int64(fileHeaderSize), 2 * blockSize},
|
||||
{blockDataSize, 100, int64(fileHeaderSize) + blockSize, blockSize},
|
||||
{blockDataSize + 1, 100, int64(fileHeaderSize) + blockSize, blockSize},
|
||||
// limit=blockDataSize-1
|
||||
{0, blockDataSize - 1, int64(fileHeaderSize), blockSize},
|
||||
{1, blockDataSize - 1, int64(fileHeaderSize), blockSize},
|
||||
{blockDataSize - 1, blockDataSize - 1, int64(fileHeaderSize), 2 * blockSize},
|
||||
{blockDataSize, blockDataSize - 1, int64(fileHeaderSize) + blockSize, blockSize},
|
||||
{blockDataSize + 1, blockDataSize - 1, int64(fileHeaderSize) + blockSize, blockSize},
|
||||
// limit=blockDataSize
|
||||
{0, blockDataSize, int64(fileHeaderSize), blockSize},
|
||||
{1, blockDataSize, int64(fileHeaderSize), 2 * blockSize},
|
||||
{blockDataSize - 1, blockDataSize, int64(fileHeaderSize), 2 * blockSize},
|
||||
{blockDataSize, blockDataSize, int64(fileHeaderSize) + blockSize, blockSize},
|
||||
{blockDataSize + 1, blockDataSize, int64(fileHeaderSize) + blockSize, 2 * blockSize},
|
||||
// limit=blockDataSize+1
|
||||
{0, blockDataSize + 1, int64(fileHeaderSize), 2 * blockSize},
|
||||
{1, blockDataSize + 1, int64(fileHeaderSize), 2 * blockSize},
|
||||
{blockDataSize - 1, blockDataSize + 1, int64(fileHeaderSize), 2 * blockSize},
|
||||
{blockDataSize, blockDataSize + 1, int64(fileHeaderSize) + blockSize, 2 * blockSize},
|
||||
{blockDataSize + 1, blockDataSize + 1, int64(fileHeaderSize) + blockSize, 2 * blockSize},
|
||||
} {
|
||||
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
|
||||
callCount := 0
|
||||
testOpen := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
switch callCount {
|
||||
case 0:
|
||||
assert.Equal(t, int64(0), underlyingOffset, what)
|
||||
assert.Equal(t, int64(-1), underlyingLimit, what)
|
||||
case 1:
|
||||
assert.Equal(t, test.wantOffset, underlyingOffset, what)
|
||||
assert.Equal(t, test.wantLimit, underlyingLimit, what)
|
||||
default:
|
||||
t.Errorf("Too many calls %d for %s", callCount+1, what)
|
||||
}
|
||||
callCount++
|
||||
return open(underlyingOffset, underlyingLimit)
|
||||
}
|
||||
fh, err := c.DecryptDataSeek(testOpen, 0, -1)
|
||||
assert.NoError(t, err)
|
||||
gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, gotOffset, test.offset)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecrypterCalculateUnderlying(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
offset, limit int64
|
||||
wantOffset, wantLimit int64
|
||||
wantDiscard, wantBlocks int64
|
||||
}{
|
||||
// unlimited
|
||||
{0, -1, int64(fileHeaderSize), -1, 0, 0},
|
||||
{1, -1, int64(fileHeaderSize), -1, 1, 0},
|
||||
{blockDataSize - 1, -1, int64(fileHeaderSize), -1, blockDataSize - 1, 0},
|
||||
{blockDataSize, -1, int64(fileHeaderSize) + blockSize, -1, 0, 1},
|
||||
{blockDataSize + 1, -1, int64(fileHeaderSize) + blockSize, -1, 1, 1},
|
||||
// limit=1
|
||||
{0, 1, int64(fileHeaderSize), blockSize, 0, 0},
|
||||
{1, 1, int64(fileHeaderSize), blockSize, 1, 0},
|
||||
{blockDataSize - 1, 1, int64(fileHeaderSize), blockSize, blockDataSize - 1, 0},
|
||||
{blockDataSize, 1, int64(fileHeaderSize) + blockSize, blockSize, 0, 1},
|
||||
{blockDataSize + 1, 1, int64(fileHeaderSize) + blockSize, blockSize, 1, 1},
|
||||
// limit=100
|
||||
{0, 100, int64(fileHeaderSize), blockSize, 0, 0},
|
||||
{1, 100, int64(fileHeaderSize), blockSize, 1, 0},
|
||||
{blockDataSize - 1, 100, int64(fileHeaderSize), 2 * blockSize, blockDataSize - 1, 0},
|
||||
{blockDataSize, 100, int64(fileHeaderSize) + blockSize, blockSize, 0, 1},
|
||||
{blockDataSize + 1, 100, int64(fileHeaderSize) + blockSize, blockSize, 1, 1},
|
||||
// limit=blockDataSize-1
|
||||
{0, blockDataSize - 1, int64(fileHeaderSize), blockSize, 0, 0},
|
||||
{1, blockDataSize - 1, int64(fileHeaderSize), blockSize, 1, 0},
|
||||
{blockDataSize - 1, blockDataSize - 1, int64(fileHeaderSize), 2 * blockSize, blockDataSize - 1, 0},
|
||||
{blockDataSize, blockDataSize - 1, int64(fileHeaderSize) + blockSize, blockSize, 0, 1},
|
||||
{blockDataSize + 1, blockDataSize - 1, int64(fileHeaderSize) + blockSize, blockSize, 1, 1},
|
||||
// limit=blockDataSize
|
||||
{0, blockDataSize, int64(fileHeaderSize), blockSize, 0, 0},
|
||||
{1, blockDataSize, int64(fileHeaderSize), 2 * blockSize, 1, 0},
|
||||
{blockDataSize - 1, blockDataSize, int64(fileHeaderSize), 2 * blockSize, blockDataSize - 1, 0},
|
||||
{blockDataSize, blockDataSize, int64(fileHeaderSize) + blockSize, blockSize, 0, 1},
|
||||
{blockDataSize + 1, blockDataSize, int64(fileHeaderSize) + blockSize, 2 * blockSize, 1, 1},
|
||||
// limit=blockDataSize+1
|
||||
{0, blockDataSize + 1, int64(fileHeaderSize), 2 * blockSize, 0, 0},
|
||||
{1, blockDataSize + 1, int64(fileHeaderSize), 2 * blockSize, 1, 0},
|
||||
{blockDataSize - 1, blockDataSize + 1, int64(fileHeaderSize), 2 * blockSize, blockDataSize - 1, 0},
|
||||
{blockDataSize, blockDataSize + 1, int64(fileHeaderSize) + blockSize, 2 * blockSize, 0, 1},
|
||||
{blockDataSize + 1, blockDataSize + 1, int64(fileHeaderSize) + blockSize, 2 * blockSize, 1, 1},
|
||||
} {
|
||||
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
|
||||
underlyingOffset, underlyingLimit, discard, blocks := calculateUnderlying(test.offset, test.limit)
|
||||
assert.Equal(t, test.wantOffset, underlyingOffset, what)
|
||||
assert.Equal(t, test.wantLimit, underlyingLimit, what)
|
||||
assert.Equal(t, test.wantDiscard, discard, what)
|
||||
assert.Equal(t, test.wantBlocks, blocks, what)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,13 +10,17 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Globals
|
||||
var (
|
||||
// Flags
|
||||
cryptShowMapping = fs.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
|
||||
cryptShowMapping = flags.BoolP("crypt-show-mapping", "", false, "For all files listed show how the names encrypt.")
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -69,27 +73,27 @@ func init() {
|
||||
})
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
mode, err := NewNameEncryptionMode(fs.ConfigFileGet(name, "filename_encryption", "standard"))
|
||||
// NewCipher constructs a Cipher for the given config name
|
||||
func NewCipher(name string) (Cipher, error) {
|
||||
mode, err := NewNameEncryptionMode(config.FileGet(name, "filename_encryption", "standard"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dirNameEncrypt, err := strconv.ParseBool(fs.ConfigFileGet(name, "directory_name_encryption", "true"))
|
||||
dirNameEncrypt, err := strconv.ParseBool(config.FileGet(name, "directory_name_encryption", "true"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
password := fs.ConfigFileGet(name, "password", "")
|
||||
password := config.FileGet(name, "password", "")
|
||||
if password == "" {
|
||||
return nil, errors.New("password not set in config file")
|
||||
}
|
||||
password, err = fs.Reveal(password)
|
||||
password, err = obscure.Reveal(password)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password")
|
||||
}
|
||||
salt := fs.ConfigFileGet(name, "password2", "")
|
||||
salt := config.FileGet(name, "password2", "")
|
||||
if salt != "" {
|
||||
salt, err = fs.Reveal(salt)
|
||||
salt, err = obscure.Reveal(salt)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decrypt password2")
|
||||
}
|
||||
@@ -98,7 +102,16 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to make cipher")
|
||||
}
|
||||
remote := fs.ConfigFileGet(name, "remote")
|
||||
return cipher, nil
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
cipher, err := NewCipher(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remote := config.FileGet(name, "remote")
|
||||
if strings.HasPrefix(remote, name+":") {
|
||||
return nil, errors.New("can't point crypt remote at itself - check the value of the remote setting")
|
||||
}
|
||||
@@ -118,12 +131,11 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
name: name,
|
||||
root: rpath,
|
||||
cipher: cipher,
|
||||
mode: mode,
|
||||
}
|
||||
// the features here are ones we could support, and they are
|
||||
// ANDed with the ones from wrappedFs
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: mode == NameEncryptionOff,
|
||||
CaseInsensitive: cipher.NameEncryptionMode() == NameEncryptionOff,
|
||||
DuplicateFiles: true,
|
||||
ReadMimeType: false, // MimeTypes not supported with crypt
|
||||
WriteMimeType: false,
|
||||
@@ -131,18 +143,18 @@ func NewFs(name, rpath string) (fs.Fs, error) {
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
doDirChangeNotify := wrappedFs.Features().DirChangeNotify
|
||||
if doDirChangeNotify != nil {
|
||||
f.features.DirChangeNotify = func(notifyFunc func(string), pollInterval time.Duration) chan bool {
|
||||
wrappedNotifyFunc := func(path string) {
|
||||
doChangeNotify := wrappedFs.Features().ChangeNotify
|
||||
if doChangeNotify != nil {
|
||||
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval time.Duration) chan bool {
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
decrypted, err := f.DecryptFileName(path)
|
||||
if err != nil {
|
||||
fs.Logf(f, "DirChangeNotify was unable to decrypt %q: %s", path, err)
|
||||
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
|
||||
return
|
||||
}
|
||||
notifyFunc(decrypted)
|
||||
notifyFunc(decrypted, entryType)
|
||||
}
|
||||
return doDirChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||
return doChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,7 +168,6 @@ type Fs struct {
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
cipher Cipher
|
||||
mode NameEncryptionMode
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
@@ -279,14 +290,48 @@ type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.O
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, err := f.cipher.EncryptData(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Find a hash the destination supports to compute a hash of
|
||||
// the encrypted data
|
||||
ht := f.Fs.Hashes().GetOne()
|
||||
var hasher *hash.MultiHasher
|
||||
if ht != hash.None {
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
wrappedIn = io.TeeReader(wrappedIn, hasher)
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check the hashes of the encrypted data if we were comparing them
|
||||
if ht != hash.None && hasher != nil {
|
||||
srcHash := hasher.Sums()[ht]
|
||||
var dstHash string
|
||||
dstHash, err = o.Hash(ht)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||
}
|
||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||
// remove object
|
||||
err = o.Remove()
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
return nil, errors.Errorf("corrupted on transfer: %v crypted hash differ %q vs %q", ht, srcHash, dstHash)
|
||||
}
|
||||
}
|
||||
|
||||
return f.newObject(o), nil
|
||||
}
|
||||
|
||||
@@ -305,8 +350,8 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() fs.HashSet {
|
||||
return fs.HashSet(fs.HashNone)
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
@@ -440,11 +485,25 @@ func (f *Fs) CleanUp() error {
|
||||
return do()
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
}
|
||||
return do()
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.Fs
|
||||
}
|
||||
|
||||
// EncryptFileName returns an encrypted file name
|
||||
func (f *Fs) EncryptFileName(fileName string) string {
|
||||
return f.cipher.EncryptFileName(fileName)
|
||||
}
|
||||
|
||||
// DecryptFileName returns a decrypted file name
|
||||
func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
||||
return f.cipher.DecryptFileName(encryptedFileName)
|
||||
@@ -454,13 +513,19 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
||||
// src with it, and calcuates the hash given by HashType on the fly
|
||||
//
|
||||
// Note that we break lots of encapsulation in this function.
|
||||
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType fs.HashType) (hash string, err error) {
|
||||
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
// Read the nonce - opening the file is sufficient to read the nonce in
|
||||
in, err := o.Open()
|
||||
// use a limited read so we only read the header
|
||||
in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to read nonce")
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
}
|
||||
nonce := in.(*decrypter).nonce
|
||||
d, err := f.cipher.(*cipher).newDecrypter(in)
|
||||
if err != nil {
|
||||
_ = in.Close()
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
}
|
||||
nonce := d.nonce
|
||||
// fs.Debugf(o, "Read nonce % 2x", nonce)
|
||||
|
||||
// Check nonce isn't all zeros
|
||||
@@ -474,8 +539,8 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType fs.HashType) (hash s
|
||||
fs.Errorf(o, "empty nonce read")
|
||||
}
|
||||
|
||||
// Close in once we have read the nonce
|
||||
err = in.Close()
|
||||
// Close d (and hence in) once we have read the nonce
|
||||
err = d.Close()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to close nonce read")
|
||||
}
|
||||
@@ -494,7 +559,10 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType fs.HashType) (hash s
|
||||
}
|
||||
|
||||
// pipe into hash
|
||||
m := fs.NewMultiHasher()
|
||||
m, err := hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to make hasher")
|
||||
}
|
||||
_, err = io.Copy(m, out)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to hash data")
|
||||
@@ -553,8 +621,8 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(hash fs.HashType) (string, error) {
|
||||
return "", nil
|
||||
func (o *Object) Hash(ht hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// UnWrap returns the wrapped Object
|
||||
@@ -564,38 +632,48 @@ func (o *Object) UnWrap() fs.Object {
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
var offset int64
|
||||
var openOptions []fs.OpenOption
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
}
|
||||
// pass on Options to underlying open if appropriate
|
||||
openOptions = append(openOptions, option)
|
||||
}
|
||||
}
|
||||
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset int64) (io.ReadCloser, error) {
|
||||
if underlyingOffset == 0 {
|
||||
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
if underlyingOffset == 0 && underlyingLimit < 0 {
|
||||
// Open with no seek
|
||||
return o.Object.Open()
|
||||
return o.Object.Open(openOptions...)
|
||||
}
|
||||
// Open stream with a seek of underlyingOffset
|
||||
return o.Object.Open(&fs.SeekOption{Offset: underlyingOffset})
|
||||
}, offset)
|
||||
// Open stream with a range of underlyingOffset, underlyingLimit
|
||||
end := int64(-1)
|
||||
if underlyingLimit >= 0 {
|
||||
end = underlyingOffset + underlyingLimit - 1
|
||||
if end >= o.Object.Size() {
|
||||
end = -1
|
||||
}
|
||||
}
|
||||
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
|
||||
return o.Object.Open(newOpenOptions...)
|
||||
}, offset, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rc, err
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
wrappedIn, err := o.f.cipher.EncryptData(in)
|
||||
if err != nil {
|
||||
return err
|
||||
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return o.Object, o.Object.Update(in, src, options...)
|
||||
}
|
||||
return o.Object.Update(wrappedIn, o.f.newObjectInfo(src))
|
||||
_, err := o.f.put(in, src, options, update)
|
||||
return err
|
||||
}
|
||||
|
||||
// newDir returns a dir with the Name decrypted
|
||||
@@ -647,22 +725,24 @@ func (o *ObjectInfo) Size() int64 {
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *ObjectInfo) Hash(hash fs.HashType) (string, error) {
|
||||
func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.CleanUpper = (*Fs)(nil)
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
)
|
||||
62
backend/crypt/crypt_test.go
Normal file
62
backend/crypt/crypt_test.go
Normal file
@@ -0,0 +1,62 @@
|
||||
// Test Crypt filesystem interface
|
||||
package crypt_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestStandard runs integration tests against the remote
|
||||
func TestStandard(t *testing.T) {
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestOff runs integration tests against the remote
|
||||
func TestOff(t *testing.T) {
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-off")
|
||||
name := "TestCrypt2"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// TestObfuscate runs integration tests against the remote
|
||||
func TestObfuscate(t *testing.T) {
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-obfuscate")
|
||||
name := "TestCrypt3"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
})
|
||||
}
|
||||
1739
backend/drive/drive.go
Normal file
1739
backend/drive/drive.go
Normal file
File diff suppressed because it is too large
Load Diff
112
backend/drive/drive_internal_test.go
Normal file
112
backend/drive/drive_internal_test.go
Normal file
@@ -0,0 +1,112 @@
|
||||
package drive
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/api/drive/v3"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
const exampleExportFormats = `{
|
||||
"application/vnd.google-apps.document": [
|
||||
"application/rtf",
|
||||
"application/vnd.oasis.opendocument.text",
|
||||
"text/html",
|
||||
"application/pdf",
|
||||
"application/epub+zip",
|
||||
"application/zip",
|
||||
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.spreadsheet": [
|
||||
"application/x-vnd.oasis.opendocument.spreadsheet",
|
||||
"text/tab-separated-values",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"text/csv",
|
||||
"application/zip",
|
||||
"application/vnd.oasis.opendocument.spreadsheet"
|
||||
],
|
||||
"application/vnd.google-apps.jam": [
|
||||
"application/pdf"
|
||||
],
|
||||
"application/vnd.google-apps.script": [
|
||||
"application/vnd.google-apps.script+json"
|
||||
],
|
||||
"application/vnd.google-apps.presentation": [
|
||||
"application/vnd.oasis.opendocument.presentation",
|
||||
"application/pdf",
|
||||
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
"text/plain"
|
||||
],
|
||||
"application/vnd.google-apps.form": [
|
||||
"application/zip"
|
||||
],
|
||||
"application/vnd.google-apps.drawing": [
|
||||
"image/svg+xml",
|
||||
"image/png",
|
||||
"application/pdf",
|
||||
"image/jpeg"
|
||||
]
|
||||
}`
|
||||
|
||||
var exportFormats map[string][]string
|
||||
|
||||
// Load the example export formats into exportFormats for testing
|
||||
func TestInternalLoadExampleExportFormats(t *testing.T) {
|
||||
assert.NoError(t, json.Unmarshal([]byte(exampleExportFormats), &exportFormats))
|
||||
}
|
||||
|
||||
func TestInternalParseExtensions(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
want []string
|
||||
wantErr error
|
||||
}{
|
||||
{"doc", []string{"doc"}, nil},
|
||||
{" docx ,XLSX, pptx,svg", []string{"docx", "xlsx", "pptx", "svg"}, nil},
|
||||
{"docx,svg,Docx", []string{"docx", "svg"}, nil},
|
||||
{"docx,potato,docx", []string{"docx"}, errors.New(`couldn't find mime type for extension "potato"`)},
|
||||
} {
|
||||
f := new(Fs)
|
||||
gotErr := f.parseExtensions(test.in)
|
||||
if test.wantErr == nil {
|
||||
assert.NoError(t, gotErr)
|
||||
} else {
|
||||
assert.EqualError(t, gotErr, test.wantErr.Error())
|
||||
}
|
||||
assert.Equal(t, test.want, f.extensions)
|
||||
}
|
||||
|
||||
// Test it is appending
|
||||
f := new(Fs)
|
||||
assert.Nil(t, f.parseExtensions("docx,svg"))
|
||||
assert.Nil(t, f.parseExtensions("docx,svg,xlsx"))
|
||||
assert.Equal(t, []string{"docx", "svg", "xlsx"}, f.extensions)
|
||||
|
||||
}
|
||||
|
||||
func TestInternalFindExportFormat(t *testing.T) {
|
||||
item := new(drive.File)
|
||||
item.MimeType = "application/vnd.google-apps.document"
|
||||
for _, test := range []struct {
|
||||
extensions []string
|
||||
wantExtension string
|
||||
wantMimeType string
|
||||
}{
|
||||
{[]string{}, "", ""},
|
||||
{[]string{"pdf"}, "pdf", "application/pdf"},
|
||||
{[]string{"pdf", "rtf", "xls"}, "pdf", "application/pdf"},
|
||||
{[]string{"xls", "rtf", "pdf"}, "rtf", "application/rtf"},
|
||||
{[]string{"xls", "csv", "svg"}, "", ""},
|
||||
} {
|
||||
f := new(Fs)
|
||||
f.extensions = test.extensions
|
||||
gotExtension, gotMimeType := f.findExportFormat("file", exportFormats[item.MimeType])
|
||||
assert.Equal(t, test.wantExtension, gotExtension)
|
||||
assert.Equal(t, test.wantMimeType, gotMimeType)
|
||||
}
|
||||
}
|
||||
17
backend/drive/drive_test.go
Normal file
17
backend/drive/drive_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Drive filesystem interface
|
||||
package drive_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDrive:",
|
||||
NilObject: (*drive.Object)(nil),
|
||||
})
|
||||
}
|
||||
249
backend/drive/upload.go
Normal file
249
backend/drive/upload.go
Normal file
@@ -0,0 +1,249 @@
|
||||
// Upload for drive
|
||||
//
|
||||
// Docs
|
||||
// Resumable upload: https://developers.google.com/drive/web/manage-uploads#resumable
|
||||
// Best practices: https://developers.google.com/drive/web/manage-uploads#best-practices
|
||||
// Files insert: https://developers.google.com/drive/v2/reference/files/insert
|
||||
// Files update: https://developers.google.com/drive/v2/reference/files/update
|
||||
//
|
||||
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const (
|
||||
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
|
||||
statusResumeIncomplete = 308
|
||||
)
|
||||
|
||||
// resumableUpload is used by the generated APIs to provide resumable uploads.
|
||||
// It is not used by developers directly.
|
||||
type resumableUpload struct {
|
||||
f *Fs
|
||||
remote string
|
||||
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
|
||||
URI string
|
||||
// Media is the object being uploaded.
|
||||
Media io.Reader
|
||||
// MediaType defines the media type, e.g. "image/jpeg".
|
||||
MediaType string
|
||||
// ContentLength is the full size of the object being uploaded.
|
||||
ContentLength int64
|
||||
// Return value
|
||||
ret *drive.File
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType string, fileID string, info *drive.File, remote string) (*drive.File, error) {
|
||||
params := make(url.Values)
|
||||
params.Set("alt", "json")
|
||||
params.Set("uploadType", "resumable")
|
||||
params.Set("fields", partialFields)
|
||||
if f.isTeamDrive {
|
||||
params.Set("supportsTeamDrives", "true")
|
||||
}
|
||||
if *driveKeepRevisionForever {
|
||||
params.Set("keepRevisionForever", "true")
|
||||
}
|
||||
urls := "https://www.googleapis.com/upload/drive/v3/files"
|
||||
method := "POST"
|
||||
if fileID != "" {
|
||||
params.Set("setModifiedDate", "true")
|
||||
urls += "/{fileId}"
|
||||
method = "PATCH"
|
||||
}
|
||||
urls += "?" + params.Encode()
|
||||
var res *http.Response
|
||||
var err error
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
var body io.Reader
|
||||
body, err = googleapi.WithoutDataWrapper.JSONReader(info)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var req *http.Request
|
||||
req, err = http.NewRequest(method, urls, body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileID,
|
||||
})
|
||||
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
|
||||
req.Header.Set("X-Upload-Content-Type", contentType)
|
||||
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
|
||||
res, err = f.client.Do(req)
|
||||
if err == nil {
|
||||
defer googleapi.CloseBody(res)
|
||||
err = googleapi.CheckResponse(res)
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
loc := res.Header.Get("Location")
|
||||
rx := &resumableUpload{
|
||||
f: f,
|
||||
remote: remote,
|
||||
URI: loc,
|
||||
Media: in,
|
||||
MediaType: contentType,
|
||||
ContentLength: size,
|
||||
}
|
||||
return rx.Upload()
|
||||
}
|
||||
|
||||
// Make an http.Request for the range passed in
|
||||
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||
req, _ := http.NewRequest("POST", rx.URI, body)
|
||||
req.ContentLength = reqSize
|
||||
if reqSize != 0 {
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
|
||||
} else {
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
|
||||
}
|
||||
req.Header.Set("Content-Type", rx.MediaType)
|
||||
return req
|
||||
}
|
||||
|
||||
// rangeRE matches the transfer status response from the server. $1 is
|
||||
// the last byte index uploaded.
|
||||
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
|
||||
|
||||
// Query drive for the amount transferred so far
|
||||
//
|
||||
// If error is nil, then start should be valid
|
||||
func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
req := rx.makeRequest(0, nil, 0)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer googleapi.CloseBody(res)
|
||||
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
|
||||
return rx.ContentLength, nil
|
||||
}
|
||||
if res.StatusCode != statusResumeIncomplete {
|
||||
err = googleapi.CheckResponse(res)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
|
||||
}
|
||||
Range := res.Header.Get("Range")
|
||||
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
|
||||
start, err = strconv.ParseInt(m[1], 10, 64)
|
||||
if err == nil {
|
||||
return start, nil
|
||||
}
|
||||
}
|
||||
return 0, errors.Errorf("unable to parse range %q", Range)
|
||||
}
|
||||
|
||||
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
||||
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
req := rx.makeRequest(start, chunk, chunkSize)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 599, err
|
||||
}
|
||||
defer googleapi.CloseBody(res)
|
||||
if res.StatusCode == statusResumeIncomplete {
|
||||
return res.StatusCode, nil
|
||||
}
|
||||
err = googleapi.CheckResponse(res)
|
||||
if err != nil {
|
||||
return res.StatusCode, err
|
||||
}
|
||||
|
||||
// When the entire file upload is complete, the server
|
||||
// responds with an HTTP 201 Created along with any metadata
|
||||
// associated with this resource. If this request had been
|
||||
// updating an existing entity rather than creating a new one,
|
||||
// the HTTP response code for a completed upload would have
|
||||
// been 200 OK.
|
||||
//
|
||||
// So parse the response out of the body. We aren't expecting
|
||||
// any other 2xx codes, so we parse it unconditionaly on
|
||||
// StatusCode
|
||||
if err = json.NewDecoder(res.Body).Decode(&rx.ret); err != nil {
|
||||
return 598, err
|
||||
}
|
||||
|
||||
return res.StatusCode, nil
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
// It retries each chunk using the pacer and --low-level-retries
|
||||
func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
start := int64(0)
|
||||
var StatusCode int
|
||||
var err error
|
||||
buf := make([]byte, int(chunkSize))
|
||||
for start < rx.ContentLength {
|
||||
reqSize := rx.ContentLength - start
|
||||
if reqSize >= int64(chunkSize) {
|
||||
reqSize = int64(chunkSize)
|
||||
}
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||
|
||||
// Transfer the chunk
|
||||
err = rx.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
|
||||
again, err := shouldRetry(err)
|
||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||
again = false
|
||||
err = nil
|
||||
}
|
||||
return again, err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
start += reqSize
|
||||
}
|
||||
// Resume or retry uploads that fail due to connection interruptions or
|
||||
// any 5xx errors, including:
|
||||
//
|
||||
// 500 Internal Server Error
|
||||
// 502 Bad Gateway
|
||||
// 503 Service Unavailable
|
||||
// 504 Gateway Timeout
|
||||
//
|
||||
// Use an exponential backoff strategy if any 5xx server error is
|
||||
// returned when resuming or retrying upload requests. These errors can
|
||||
// occur if a server is getting overloaded. Exponential backoff can help
|
||||
// alleviate these kinds of problems during periods of high volume of
|
||||
// requests or heavy network traffic. Other kinds of requests should not
|
||||
// be handled by exponential backoff but you can still retry a number of
|
||||
// them. When retrying these requests, limit the number of times you
|
||||
// retry them. For example your code could limit to ten retries or less
|
||||
// before reporting an error.
|
||||
//
|
||||
// Handle 404 Not Found errors when doing resumable uploads by starting
|
||||
// the entire upload over from the beginning.
|
||||
if rx.ret == nil {
|
||||
return nil, fserrors.RetryErrorf("Incomplete upload - retry, last error %d", StatusCode)
|
||||
}
|
||||
return rx.ret, nil
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/dropbox/dbhash"
|
||||
"github.com/ncw/rclone/backend/dropbox/dbhash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -22,7 +22,6 @@ of path_display and all will be well.
|
||||
*/
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
@@ -32,10 +31,19 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/common"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/files"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/ncw/rclone/pacer"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
@@ -59,7 +67,7 @@ var (
|
||||
// },
|
||||
Endpoint: dropbox.OAuthEndpoint(""),
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
@@ -105,25 +113,28 @@ func init() {
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "app_key",
|
||||
Help: "Dropbox App Key - leave blank normally.",
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Dropbox App Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: "app_secret",
|
||||
Help: "Dropbox App Secret - leave blank normally.",
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Dropbox App Client Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
fs.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
|
||||
flags.VarP(&uploadChunkSize, "dropbox-chunk-size", "", fmt.Sprintf("Upload chunk size. Max %v.", maxUploadChunkSize))
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
features *fs.Features // optional features
|
||||
srv files.Client // the connection to the dropbox server
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on
|
||||
features *fs.Features // optional features
|
||||
srv files.Client // the connection to the dropbox server
|
||||
sharing sharing.Client // as above, but for generating sharing links
|
||||
users users.Client // as above, but for accessing user information
|
||||
slashRoot string // root with "/" prefix, lowercase
|
||||
slashRootSlash string // root with "/" prefix and postfix, lowercase
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
ns string // The namespace we are using or "" for none
|
||||
}
|
||||
|
||||
// Object describes a dropbox object
|
||||
@@ -170,7 +181,7 @@ func shouldRetry(err error) (bool, error) {
|
||||
if strings.Contains(baseErrString, "too_many_write_operations") || strings.Contains(baseErrString, "too_many_requests") {
|
||||
return true, err
|
||||
}
|
||||
return fs.ShouldRetry(err), err
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
|
||||
// NewFs contstructs an Fs from the path, container:path
|
||||
@@ -181,11 +192,11 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
|
||||
// Convert the old token if it exists. The old token was just
|
||||
// just a string, the new one is a JSON blob
|
||||
oldToken := strings.TrimSpace(fs.ConfigFileGet(name, fs.ConfigToken))
|
||||
oldToken := strings.TrimSpace(config.FileGet(name, config.ConfigToken))
|
||||
if oldToken != "" && oldToken[0] != '{' {
|
||||
fs.Infof(name, "Converting token to new format")
|
||||
newToken := fmt.Sprintf(`{"access_token":"%s","token_type":"bearer","expiry":"0001-01-01T00:00:00Z"}`, oldToken)
|
||||
err := fs.ConfigSetValueAndSave(name, fs.ConfigToken, newToken)
|
||||
err := config.SetValueAndSave(name, config.ConfigToken, newToken)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFS convert token")
|
||||
}
|
||||
@@ -193,20 +204,21 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
|
||||
oAuthClient, _, err := oauthutil.NewClient(name, dropboxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure dropbox: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure dropbox")
|
||||
}
|
||||
|
||||
config := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
Client: oAuthClient, // maybe???
|
||||
}
|
||||
srv := files.New(config)
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
srv: srv,
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
|
||||
}
|
||||
config := dropbox.Config{
|
||||
LogLevel: dropbox.LogOff, // logging in the SDK: LogOff, LogDebug, LogInfo
|
||||
Client: oAuthClient, // maybe???
|
||||
HeaderGenerator: f.headerGenerator,
|
||||
}
|
||||
f.srv = files.New(config)
|
||||
f.sharing = sharing.New(config)
|
||||
f.users = users.New(config)
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
ReadMimeType: true,
|
||||
@@ -214,6 +226,27 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
}).Fill(f)
|
||||
f.setRoot(root)
|
||||
|
||||
// If root starts with / then use the actual root
|
||||
if strings.HasPrefix(root, "/") {
|
||||
var acc *users.FullAccount
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
acc, err = f.users.GetCurrentAccount()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "get current account failed")
|
||||
}
|
||||
switch x := acc.RootInfo.(type) {
|
||||
case *common.TeamRootInfo:
|
||||
f.ns = x.RootNamespaceId
|
||||
case *common.UserRootInfo:
|
||||
f.ns = x.RootNamespaceId
|
||||
default:
|
||||
return nil, errors.Errorf("unknown RootInfo type %v %T", acc.RootInfo, acc.RootInfo)
|
||||
}
|
||||
fs.Debugf(f, "Using root namespace %q", f.ns)
|
||||
}
|
||||
|
||||
// See if the root is actually an object
|
||||
_, err = f.getFileMetadata(f.slashRoot)
|
||||
if err == nil {
|
||||
@@ -228,14 +261,22 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// headerGenerator for dropbox sdk
|
||||
func (f *Fs) headerGenerator(hostType string, style string, namespace string, route string) map[string]string {
|
||||
if f.ns == "" {
|
||||
return map[string]string{}
|
||||
}
|
||||
return map[string]string{
|
||||
"Dropbox-API-Path-Root": `{".tag": "namespace_id", "namespace_id": "` + f.ns + `"}`,
|
||||
}
|
||||
}
|
||||
|
||||
// Sets root in f
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = strings.Trim(root, "/")
|
||||
lowerCaseRoot := strings.ToLower(f.root)
|
||||
|
||||
f.slashRoot = "/" + lowerCaseRoot
|
||||
f.slashRoot = "/" + f.root
|
||||
f.slashRootSlash = f.slashRoot
|
||||
if lowerCaseRoot != "" {
|
||||
if f.root != "" {
|
||||
f.slashRootSlash += "/"
|
||||
}
|
||||
}
|
||||
@@ -408,21 +449,6 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// A read closer which doesn't close the input
|
||||
type readCloser struct {
|
||||
in io.Reader
|
||||
}
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (rc *readCloser) Read(p []byte) (n int, err error) {
|
||||
return rc.in.Read(p)
|
||||
}
|
||||
|
||||
// Dummy close function
|
||||
func (rc *readCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put the object
|
||||
//
|
||||
// Copy the reader in to the new object which is returned
|
||||
@@ -631,6 +657,52 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return dstObj, nil
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
absPath := "/" + path.Join(f.Root(), remote)
|
||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||
Path: absPath,
|
||||
}
|
||||
var linkRes sharing.IsSharedLinkMetadata
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
linkRes, err = f.sharing.CreateSharedLinkWithSettings(&createArg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil && strings.Contains(err.Error(), sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
||||
listArg := sharing.ListSharedLinksArg{
|
||||
Path: absPath,
|
||||
DirectOnly: true,
|
||||
}
|
||||
var listRes *sharing.ListSharedLinksResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
listRes, err = f.sharing.ListSharedLinks(&listArg)
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if len(listRes.Links) == 0 {
|
||||
err = errors.New("Dropbox says the sharing link already exists, but list came back empty")
|
||||
return
|
||||
}
|
||||
linkRes = listRes.Links[0]
|
||||
}
|
||||
if err == nil {
|
||||
switch res := linkRes.(type) {
|
||||
case *sharing.FileLinkMetadata:
|
||||
link = res.Url
|
||||
case *sharing.FolderLinkMetadata:
|
||||
link = res.Url
|
||||
default:
|
||||
err = fmt.Errorf("Don't know how to extract link, response has unknown format: %T", res)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
//
|
||||
@@ -674,9 +746,36 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (usage *fs.Usage, err error) {
|
||||
var q *users.SpaceUsage
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
q, err = f.users.GetSpaceUsage()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "about failed")
|
||||
}
|
||||
var total uint64
|
||||
if q.Allocation != nil {
|
||||
if q.Allocation.Individual != nil {
|
||||
total += q.Allocation.Individual.Allocated
|
||||
}
|
||||
if q.Allocation.Team != nil {
|
||||
total += q.Allocation.Team.Allocated
|
||||
}
|
||||
}
|
||||
usage = &fs.Usage{
|
||||
Total: fs.NewUsageValue(int64(total)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(int64(q.Used)), // bytes in use
|
||||
Free: fs.NewUsageValue(int64(total - q.Used)), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() fs.HashSet {
|
||||
return fs.HashSet(fs.HashDropbox)
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.Dropbox)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -700,9 +799,9 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the dropbox special hash
|
||||
func (o *Object) Hash(t fs.HashType) (string, error) {
|
||||
if t != fs.HashDropbox {
|
||||
return "", fs.ErrHashUnsupported
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
if t != hash.Dropbox {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
@@ -749,20 +848,6 @@ func (o *Object) remotePath() string {
|
||||
return o.fs.slashRootSlash + o.remote
|
||||
}
|
||||
|
||||
// Returns the key for the metadata database for a given path
|
||||
func metadataKey(path string) string {
|
||||
// NB File system is case insensitive
|
||||
path = strings.ToLower(path)
|
||||
hash := md5.New()
|
||||
_, _ = hash.Write([]byte(path))
|
||||
return fmt.Sprintf("%x", hash.Sum(nil))
|
||||
}
|
||||
|
||||
// Returns the key for the metadata database
|
||||
func (o *Object) metadataKey() string {
|
||||
return metadataKey(o.remotePath())
|
||||
}
|
||||
|
||||
// readMetaData gets the info if it hasn't already been fetched
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
@@ -813,7 +898,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
case files.DownloadAPIError:
|
||||
// Don't attempt to retry copyright violation errors
|
||||
if e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
|
||||
return nil, fs.NoRetryError(err)
|
||||
return nil, fserrors.NoRetryError(err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -831,7 +916,7 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
if size != -1 {
|
||||
chunks = int(size/chunkSize) + 1
|
||||
}
|
||||
in := fs.NewCountingReader(in0)
|
||||
in := readers.NewCountingReader(in0)
|
||||
buf := make([]byte, int(chunkSize))
|
||||
|
||||
fmtChunk := func(cur int, last bool) {
|
||||
@@ -847,10 +932,10 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
// write the first chunk
|
||||
fmtChunk(1, false)
|
||||
var res *files.UploadSessionStartResult
|
||||
chunk := fs.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, 0); err != nil {
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
res, err = o.fs.srv.UploadSessionStart(&files.UploadSessionStartArg{}, chunk)
|
||||
@@ -883,10 +968,10 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
}
|
||||
cursor.Offset = in.BytesRead()
|
||||
fmtChunk(currentChunk, false)
|
||||
chunk = fs.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
chunk = readers.NewRepeatableLimitReaderBuffer(in, buf, chunkSize)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, 0); err != nil {
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
err = o.fs.srv.UploadSessionAppendV2(&appendArg, chunk)
|
||||
@@ -906,10 +991,10 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
Commit: commitInfo,
|
||||
}
|
||||
fmtChunk(currentChunk, true)
|
||||
chunk = fs.NewRepeatableReaderBuffer(in, buf)
|
||||
chunk = readers.NewRepeatableReaderBuffer(in, buf)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
// seek to the start in case this is a retry
|
||||
if _, err = chunk.Seek(0, 0); err != nil {
|
||||
if _, err = chunk.Seek(0, io.SeekStart); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
|
||||
@@ -966,11 +1051,13 @@ func (o *Object) Remove() (err error) {
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
)
|
||||
17
backend/dropbox/dropbox_test.go
Normal file
17
backend/dropbox/dropbox_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Dropbox filesystem interface
|
||||
package dropbox_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/dropbox"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestDropbox:",
|
||||
NilObject: (*dropbox.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -13,6 +13,10 @@ import (
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -160,33 +164,33 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
func NewFs(name, root string) (ff fs.Fs, err error) {
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
// FIXME Convert the old scheme used for the first beta - remove after release
|
||||
if ftpURL := fs.ConfigFileGet(name, "url"); ftpURL != "" {
|
||||
if ftpURL := config.FileGet(name, "url"); ftpURL != "" {
|
||||
fs.Infof(name, "Converting old configuration")
|
||||
u, err := url.Parse(ftpURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Failed to parse old url %q", ftpURL)
|
||||
}
|
||||
parts := strings.Split(u.Host, ":")
|
||||
fs.ConfigFileSet(name, "host", parts[0])
|
||||
config.FileSet(name, "host", parts[0])
|
||||
if len(parts) > 1 {
|
||||
fs.ConfigFileSet(name, "port", parts[1])
|
||||
config.FileSet(name, "port", parts[1])
|
||||
}
|
||||
fs.ConfigFileSet(name, "host", u.Host)
|
||||
fs.ConfigFileSet(name, "user", fs.ConfigFileGet(name, "username"))
|
||||
fs.ConfigFileSet(name, "pass", fs.ConfigFileGet(name, "password"))
|
||||
fs.ConfigFileDeleteKey(name, "username")
|
||||
fs.ConfigFileDeleteKey(name, "password")
|
||||
fs.ConfigFileDeleteKey(name, "url")
|
||||
fs.SaveConfig()
|
||||
config.FileSet(name, "host", u.Host)
|
||||
config.FileSet(name, "user", config.FileGet(name, "username"))
|
||||
config.FileSet(name, "pass", config.FileGet(name, "password"))
|
||||
config.FileDeleteKey(name, "username")
|
||||
config.FileDeleteKey(name, "password")
|
||||
config.FileDeleteKey(name, "url")
|
||||
config.SaveConfig()
|
||||
if u.Path != "" && u.Path != "/" {
|
||||
fs.Errorf(name, "Path %q in FTP URL no longer supported - put it on the end of the remote %s:%s", u.Path, name, u.Path)
|
||||
}
|
||||
}
|
||||
host := fs.ConfigFileGet(name, "host")
|
||||
user := fs.ConfigFileGet(name, "user")
|
||||
pass := fs.ConfigFileGet(name, "pass")
|
||||
port := fs.ConfigFileGet(name, "port")
|
||||
pass, err = fs.Reveal(pass)
|
||||
host := config.FileGet(name, "host")
|
||||
user := config.FileGet(name, "user")
|
||||
pass := config.FileGet(name, "pass")
|
||||
port := config.FileGet(name, "port")
|
||||
pass, err = obscure.Reveal(pass)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewFS decrypt password")
|
||||
}
|
||||
@@ -243,7 +247,7 @@ func translateErrorFile(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable:
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
@@ -255,16 +259,15 @@ func translateErrorDir(err error) error {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable:
|
||||
case ftp.StatusFileUnavailable, ftp.StatusFileActionIgnored:
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
dir := path.Dir(fullPath)
|
||||
@@ -272,32 +275,58 @@ func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
||||
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "NewObject")
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
}
|
||||
files, err := c.List(dir)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
for i, file := range files {
|
||||
if file.Type != ftp.EntryTypeFolder && file.Name == base {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: files[i].Size,
|
||||
ModTime: files[i].Time,
|
||||
}
|
||||
o.info = info
|
||||
|
||||
return o, nil
|
||||
for _, file := range files {
|
||||
if file.Name == base {
|
||||
return file, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if entry != nil && entry.Type != ftp.EntryTypeFolder {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: entry.Size,
|
||||
ModTime: entry.Time,
|
||||
}
|
||||
o.info = info
|
||||
|
||||
return o, nil
|
||||
}
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// dirExists checks the directory pointed to by remote exists or not
|
||||
func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
entry, err := f.findItem(remote)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "dirExists")
|
||||
}
|
||||
if entry != nil && entry.Type == ftp.EntryTypeFolder {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
@@ -318,6 +347,18 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if err != nil {
|
||||
return nil, translateErrorDir(err)
|
||||
}
|
||||
// Annoyingly FTP returns success for a directory which
|
||||
// doesn't exist, so check it really doesn't exist if no
|
||||
// entries found.
|
||||
if len(files) == 0 {
|
||||
exists, err := f.dirExists(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
if !exists {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
for i := range files {
|
||||
object := files[i]
|
||||
newremote := path.Join(dir, object.Name)
|
||||
@@ -346,7 +387,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
|
||||
// Hashes are not supported
|
||||
func (f *Fs) Hashes() fs.HashSet {
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -434,6 +475,15 @@ func (f *Fs) mkdir(abspath string) error {
|
||||
}
|
||||
err = c.MakeDir(abspath)
|
||||
f.putFtpConnection(&c, err)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusFileUnavailable: // dir already exists: see issue #2181
|
||||
err = nil
|
||||
case 521: // dir already exists: error number according to RFC 959: issue #2363
|
||||
err = nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -565,8 +615,8 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the hash of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t fs.HashType) (string, error) {
|
||||
return "", fs.ErrHashUnsupported
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
@@ -630,11 +680,13 @@ func (f *ftpReadCloser) Close() error {
|
||||
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
var offset int64
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.Size())
|
||||
default:
|
||||
if option.Mandatory() {
|
||||
fs.Logf(o, "Unsupported mandatory option: %v", option)
|
||||
@@ -650,7 +702,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
rc = &ftpReadCloser{rc: fd, c: c, f: o.fs}
|
||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
17
backend/ftp/ftp_test.go
Normal file
17
backend/ftp/ftp_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test FTP filesystem interface
|
||||
package ftp_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/ftp"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTP:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -28,7 +28,15 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
@@ -43,17 +51,18 @@ const (
|
||||
timeFormatOut = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
metaMtime = "mtime" // key to store mtime under in metadata
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minSleep = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
var (
|
||||
gcsLocation = fs.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
|
||||
gcsStorageClass = fs.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
|
||||
gcsLocation = flags.StringP("gcs-location", "", "", "Default location for buckets (us|eu|asia|us-central1|us-east1|us-east4|us-west1|asia-east1|asia-noetheast1|asia-southeast1|australia-southeast1|europe-west1|europe-west2).")
|
||||
gcsStorageClass = flags.StringP("gcs-storage-class", "", "", "Default storage class for buckets (MULTI_REGIONAL|REGIONAL|STANDARD|NEARLINE|COLDLINE|DURABLE_REDUCED_AVAILABILITY).")
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageFullControlScope},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.TitleBarRedirectURL,
|
||||
}
|
||||
)
|
||||
@@ -65,7 +74,7 @@ func init() {
|
||||
Description: "Google Cloud Storage (this is not Google Drive)",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string) {
|
||||
if fs.ConfigFileGet(name, "service_account_file") != "" {
|
||||
if config.FileGet(name, "service_account_file") != "" {
|
||||
return
|
||||
}
|
||||
err := oauthutil.Config("google cloud storage", name, storageConfig)
|
||||
@@ -74,10 +83,10 @@ func init() {
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigClientID,
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Google Application Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: fs.ConfigClientSecret,
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Google Application Client Secret - leave blank normally.",
|
||||
}, {
|
||||
Name: "project_number",
|
||||
@@ -213,6 +222,7 @@ type Fs struct {
|
||||
bucketACL string // used when creating new buckets
|
||||
location string // location of new buckets
|
||||
storageClass string // storage class of new buckets
|
||||
pacer *pacer.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -256,6 +266,30 @@ func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// shouldRetry determines whehter a given err rates being retried
|
||||
func shouldRetry(err error) (again bool, errOut error) {
|
||||
again = false
|
||||
if err != nil {
|
||||
if fserrors.ShouldRetry(err) {
|
||||
again = true
|
||||
} else {
|
||||
switch gerr := err.(type) {
|
||||
case *googleapi.Error:
|
||||
if gerr.Code >= 500 && gerr.Code < 600 {
|
||||
// All 5xx errors should be retried
|
||||
again = true
|
||||
} else if len(gerr.Errors) > 0 {
|
||||
reason := gerr.Errors[0].Reason
|
||||
if reason == "rateLimitExceeded" || reason == "userRateLimitExceeded" {
|
||||
again = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return again, err
|
||||
}
|
||||
|
||||
// Pattern to match a storage path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
@@ -271,16 +305,12 @@ func parsePath(path string) (bucket, directory string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func getServiceAccountClient(keyJsonfilePath string) (*http.Client, error) {
|
||||
data, err := ioutil.ReadFile(os.ExpandEnv(keyJsonfilePath))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error opening credentials file")
|
||||
}
|
||||
conf, err := google.JWTConfigFromJSON(data, storageConfig.Scopes...)
|
||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error processing credentials")
|
||||
}
|
||||
ctxWithSpecialClient := oauthutil.Context(fs.Config.Client())
|
||||
ctxWithSpecialClient := oauthutil.Context(fshttp.NewClient(fs.Config))
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
@@ -289,16 +319,25 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
var oAuthClient *http.Client
|
||||
var err error
|
||||
|
||||
serviceAccountPath := fs.ConfigFileGet(name, "service_account_file")
|
||||
if serviceAccountPath != "" {
|
||||
oAuthClient, err = getServiceAccountClient(serviceAccountPath)
|
||||
// try loading service account credentials from env variable, then from a file
|
||||
serviceAccountCreds := []byte(config.FileGet(name, "service_account_credentials"))
|
||||
serviceAccountPath := config.FileGet(name, "service_account_file")
|
||||
if len(serviceAccountCreds) == 0 && serviceAccountPath != "" {
|
||||
loadedCreds, err := ioutil.ReadFile(os.ExpandEnv(serviceAccountPath))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed configuring Google Cloud Storage Service Account: %v", err)
|
||||
return nil, errors.Wrap(err, "error opening service account credentials file")
|
||||
}
|
||||
serviceAccountCreds = loadedCreds
|
||||
}
|
||||
if len(serviceAccountCreds) > 0 {
|
||||
oAuthClient, err = getServiceAccountClient(serviceAccountCreds)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed configuring Google Cloud Storage Service Account")
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(name, storageConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure Google Cloud Storage: %v", err)
|
||||
return nil, errors.Wrap(err, "failed to configure Google Cloud Storage")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -311,11 +350,12 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
projectNumber: fs.ConfigFileGet(name, "project_number"),
|
||||
objectACL: fs.ConfigFileGet(name, "object_acl"),
|
||||
bucketACL: fs.ConfigFileGet(name, "bucket_acl"),
|
||||
location: fs.ConfigFileGet(name, "location"),
|
||||
storageClass: fs.ConfigFileGet(name, "storage_class"),
|
||||
projectNumber: config.FileGet(name, "project_number"),
|
||||
objectACL: config.FileGet(name, "object_acl"),
|
||||
bucketACL: config.FileGet(name, "bucket_acl"),
|
||||
location: config.FileGet(name, "location"),
|
||||
storageClass: config.FileGet(name, "storage_class"),
|
||||
pacer: pacer.New().SetMinSleep(minSleep).SetPacer(pacer.GoogleDrivePacer),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
@@ -345,7 +385,10 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
// Check to see if the object exists
|
||||
_, err = f.svc.Objects.Get(bucket, directory).Do()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(bucket, directory).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
@@ -393,7 +436,7 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
||||
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
|
||||
root := f.root
|
||||
rootLength := len(root)
|
||||
if dir != "" {
|
||||
@@ -404,7 +447,11 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
||||
list = list.Delimiter("/")
|
||||
}
|
||||
for {
|
||||
objects, err := list.Do()
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = list.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code == http.StatusNotFound {
|
||||
@@ -431,6 +478,17 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) error {
|
||||
continue
|
||||
}
|
||||
remote := object.Name[rootLength:]
|
||||
// is this a directory marker?
|
||||
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
|
||||
if recurse {
|
||||
// add a directory in if --fast-list since will have no prefixes
|
||||
err = fn(remote[:len(remote)-1], object, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -457,6 +515,15 @@ func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory b
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// mark the bucket as being OK
|
||||
func (f *Fs) markBucketOK() {
|
||||
if f.bucket != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
f.bucketOK = true
|
||||
f.bucketOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
@@ -473,6 +540,8 @@ func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return entries, err
|
||||
}
|
||||
|
||||
@@ -486,7 +555,11 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
listBuckets := f.svc.Buckets.List(f.projectNumber).MaxResults(listChunks)
|
||||
for {
|
||||
buckets, err := listBuckets.Do()
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
buckets, err = listBuckets.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -538,7 +611,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.bucket == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
list := fs.NewListRHelper(callback)
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
if err != nil {
|
||||
@@ -549,6 +622,8 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
@@ -572,13 +647,19 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Mkdir(dir string) (err error) {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.bucketOK {
|
||||
return nil
|
||||
}
|
||||
_, err := f.svc.Buckets.Get(f.bucket).Do()
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
f.bucketOK = true
|
||||
@@ -600,7 +681,10 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
Location: f.location,
|
||||
StorageClass: f.storageClass,
|
||||
}
|
||||
_, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketACL).Do()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Buckets.Insert(f.projectNumber, &bucket).PredefinedAcl(f.bucketACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
}
|
||||
@@ -611,13 +695,16 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
//
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
// to delete was not empty.
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
func (f *Fs) Rmdir(dir string) (err error) {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
return nil
|
||||
}
|
||||
err := f.svc.Buckets.Delete(f.bucket).Do()
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(f.bucket).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
}
|
||||
@@ -659,7 +746,11 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObject := srcObj.fs.root + srcObj.remote
|
||||
dstBucket := f.bucket
|
||||
dstObject := f.root + remote
|
||||
newObject, err := f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||
var newObject *storage.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -669,8 +760,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() fs.HashSet {
|
||||
return fs.HashSet(fs.HashMD5)
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.MD5)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -694,9 +785,9 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t fs.HashType) (string, error) {
|
||||
if t != fs.HashMD5 {
|
||||
return "", fs.ErrHashUnsupported
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
return o.md5sum, nil
|
||||
}
|
||||
@@ -747,7 +838,11 @@ func (o *Object) readMetaData() (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
object, err := o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
var object *storage.Object
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code == http.StatusNotFound {
|
||||
@@ -781,14 +876,18 @@ func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
func (o *Object) SetModTime(modTime time.Time) (err error) {
|
||||
// This only adds metadata so will perserve other metadata
|
||||
object := storage.Object{
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
newObject, err := o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -808,7 +907,17 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
return nil, err
|
||||
}
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
res, err := o.fs.client.Do(req)
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
res, err = o.fs.client.Do(req)
|
||||
if err == nil {
|
||||
err = googleapi.CheckResponse(res)
|
||||
if err != nil {
|
||||
_ = res.Body.Close() // ignore error
|
||||
}
|
||||
}
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -837,7 +946,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
Updated: modTime.Format(timeFormatOut), // Doesn't get set
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
newObject, err := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectACL).Do()
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
newObject, err = o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name).PredefinedAcl(o.fs.objectACL).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -847,8 +960,12 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
return o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
func (o *Object) Remove() (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
17
backend/googlecloudstorage/googlecloudstorage_test.go
Normal file
17
backend/googlecloudstorage/googlecloudstorage_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/googlecloudstorage"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestGoogleCloudStorage:",
|
||||
NilObject: (*googlecloudstorage.Object)(nil),
|
||||
})
|
||||
}
|
||||
489
backend/http/http.go
Normal file
489
backend/http/http.go
Normal file
@@ -0,0 +1,489 @@
|
||||
// Package http provides a filesystem interface using golang.org/net/http
|
||||
//
|
||||
// It treats HTML pages served from the endpoint as directory
|
||||
// listings, and includes any links found as files.
|
||||
package http
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/html"
|
||||
)
|
||||
|
||||
var (
|
||||
errorReadOnly = errors.New("http remotes are read only")
|
||||
timeUnset = time.Unix(0, 0)
|
||||
)
|
||||
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "http",
|
||||
Description: "http Connection",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "url",
|
||||
Help: "URL of http host to connect to",
|
||||
Optional: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "https://example.com",
|
||||
Help: "Connect to example.com",
|
||||
}},
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
type Fs struct {
|
||||
name string
|
||||
root string
|
||||
features *fs.Features // optional features
|
||||
endpoint *url.URL
|
||||
endpointURL string // endpoint as a string
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// Object is a remote object that has been stat'd (so it exists, but is not necessarily open for reading)
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
size int64
|
||||
modTime time.Time
|
||||
contentType string
|
||||
}
|
||||
|
||||
// statusError returns an error if the res contained an error
|
||||
func statusError(res *http.Response, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if res.StatusCode < 200 || res.StatusCode > 299 {
|
||||
_ = res.Body.Close()
|
||||
return errors.Errorf("HTTP Error %d: %s", res.StatusCode, res.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string) (fs.Fs, error) {
|
||||
endpoint := config.FileGet(name, "url")
|
||||
if !strings.HasSuffix(endpoint, "/") {
|
||||
endpoint += "/"
|
||||
}
|
||||
|
||||
// Parse the endpoint and stick the root onto it
|
||||
base, err := url.Parse(endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u, err := rest.URLJoin(base, rest.URLPathEscape(root))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
|
||||
var isFile = false
|
||||
if !strings.HasSuffix(u.String(), "/") {
|
||||
// Make a client which doesn't follow redirects so the server
|
||||
// doesn't redirect http://host/dir to http://host/dir/
|
||||
noRedir := *client
|
||||
noRedir.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
// check to see if points to a file
|
||||
res, err := noRedir.Head(u.String())
|
||||
err = statusError(res, err)
|
||||
if err == nil {
|
||||
isFile = true
|
||||
}
|
||||
}
|
||||
|
||||
newRoot := u.String()
|
||||
if isFile {
|
||||
// Point to the parent if this is a file
|
||||
newRoot, _ = path.Split(u.String())
|
||||
} else {
|
||||
if !strings.HasSuffix(newRoot, "/") {
|
||||
newRoot += "/"
|
||||
}
|
||||
}
|
||||
|
||||
u, err = url.Parse(newRoot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
httpClient: client,
|
||||
endpoint: u,
|
||||
endpointURL: u.String(),
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
if isFile {
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
if !strings.HasSuffix(f.endpointURL, "/") {
|
||||
return nil, errors.New("internal error: url doesn't end with /")
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Name returns the configured name of the file system
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root returns the root for the filesystem
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns the URL for the filesystem
|
||||
func (f *Fs) String() string {
|
||||
return f.endpointURL
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// Precision is the remote http file system's modtime precision, which we have no way of knowing. We estimate at 1s
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return time.Second
|
||||
}
|
||||
|
||||
// NewObject creates a new remote http file object
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := o.stat()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Stat failed")
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// Join's the remote onto the base URL
|
||||
func (f *Fs) url(remote string) string {
|
||||
return f.endpointURL + rest.URLPathEscape(remote)
|
||||
}
|
||||
|
||||
// parse s into an int64, on failure return def
|
||||
func parseInt64(s string, def int64) int64 {
|
||||
n, e := strconv.ParseInt(s, 10, 64)
|
||||
if e != nil {
|
||||
return def
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Errors returned by parseName
|
||||
var (
|
||||
errURLJoinFailed = errors.New("URLJoin failed")
|
||||
errFoundQuestionMark = errors.New("found ? in URL")
|
||||
errHostMismatch = errors.New("host mismatch")
|
||||
errSchemeMismatch = errors.New("scheme mismatch")
|
||||
errNotUnderRoot = errors.New("not under root")
|
||||
errNameIsEmpty = errors.New("name is empty")
|
||||
errNameContainsSlash = errors.New("name contains /")
|
||||
)
|
||||
|
||||
// parseName turns a name as found in the page into a remote path or returns an error
|
||||
func parseName(base *url.URL, name string) (string, error) {
|
||||
// make URL absolute
|
||||
u, err := rest.URLJoin(base, name)
|
||||
if err != nil {
|
||||
return "", errURLJoinFailed
|
||||
}
|
||||
// check it doesn't have URL parameters
|
||||
uStr := u.String()
|
||||
if strings.Index(uStr, "?") >= 0 {
|
||||
return "", errFoundQuestionMark
|
||||
}
|
||||
// check that this is going back to the same host and scheme
|
||||
if base.Host != u.Host {
|
||||
return "", errHostMismatch
|
||||
}
|
||||
if base.Scheme != u.Scheme {
|
||||
return "", errSchemeMismatch
|
||||
}
|
||||
// check has path prefix
|
||||
if !strings.HasPrefix(u.Path, base.Path) {
|
||||
return "", errNotUnderRoot
|
||||
}
|
||||
// calculate the name relative to the base
|
||||
name = u.Path[len(base.Path):]
|
||||
// musn't be empty
|
||||
if name == "" {
|
||||
return "", errNameIsEmpty
|
||||
}
|
||||
// mustn't contain a / - we are looking for a single level directory
|
||||
slash := strings.Index(name, "/")
|
||||
if slash >= 0 && slash != len(name)-1 {
|
||||
return "", errNameContainsSlash
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// Parse turns HTML for a directory into names
|
||||
// base should be the base URL to resolve any relative names from
|
||||
func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
||||
doc, err := html.Parse(in)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var walk func(*html.Node)
|
||||
walk = func(n *html.Node) {
|
||||
if n.Type == html.ElementNode && n.Data == "a" {
|
||||
for _, a := range n.Attr {
|
||||
if a.Key == "href" {
|
||||
name, err := parseName(base, a.Val)
|
||||
if err == nil {
|
||||
names = append(names, name)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for c := n.FirstChild; c != nil; c = c.NextSibling {
|
||||
walk(c)
|
||||
}
|
||||
}
|
||||
walk(doc)
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// Read the directory passed in
|
||||
func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
URL := f.url(dir)
|
||||
u, err := url.Parse(URL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
}
|
||||
if !strings.HasSuffix(URL, "/") {
|
||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
}
|
||||
res, err := f.httpClient.Get(URL)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to readDir")
|
||||
}
|
||||
defer fs.CheckClose(res.Body, &err)
|
||||
|
||||
contentType := strings.SplitN(res.Header.Get("Content-Type"), ";", 2)[0]
|
||||
switch contentType {
|
||||
case "text/html":
|
||||
names, err = parse(u, res.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "readDir")
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("Can't parse content type %q", contentType)
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if !strings.HasSuffix(dir, "/") && dir != "" {
|
||||
dir += "/"
|
||||
}
|
||||
names, err := f.readDir(dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
||||
}
|
||||
for _, name := range names {
|
||||
isDir := name[len(name)-1] == '/'
|
||||
name = strings.TrimRight(name, "/")
|
||||
remote := path.Join(dir, name)
|
||||
if isDir {
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
} else {
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
if err = file.stat(); err != nil {
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
continue
|
||||
}
|
||||
entries = append(entries, file)
|
||||
}
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// Fs is the filesystem this remote http file object is located within
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// String returns the URL to the remote HTTP file
|
||||
func (o *Object) String() string {
|
||||
if o == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Remote the name of the remote HTTP file, relative to the fs root
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
||||
func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
// Size returns the size in bytes of the remote http file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.size
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the remote http file
|
||||
func (o *Object) ModTime() time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
// url returns the native url of the object
|
||||
func (o *Object) url() string {
|
||||
return o.fs.url(o.remote)
|
||||
}
|
||||
|
||||
// stat updates the info field in the Object
|
||||
func (o *Object) stat() error {
|
||||
url := o.url()
|
||||
res, err := o.fs.httpClient.Head(url)
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to stat")
|
||||
}
|
||||
t, err := http.ParseTime(res.Header.Get("Last-Modified"))
|
||||
if err != nil {
|
||||
t = timeUnset
|
||||
}
|
||||
o.size = parseInt64(res.Header.Get("Content-Length"), -1)
|
||||
o.modTime = t
|
||||
o.contentType = res.Header.Get("Content-Type")
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModTime sets the modification and access time to the specified time
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Storable returns whether the remote http file is a regular file (not a directory, symbolic link, block device, character device, named pipe, etc)
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Open a remote http file object for reading. Seek is supported
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
url := o.url()
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
|
||||
// Add optional headers
|
||||
for k, v := range fs.OpenOptionHeaders(options) {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
|
||||
// Do the request
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
err = statusError(res, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// Hashes returns hash.HashNone to indicate remote hashing is unavailable
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.None)
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Remove a remote http file object
|
||||
func (o *Object) Remove() error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Object = &Object{}
|
||||
_ fs.MimeTyper = &Object{}
|
||||
)
|
||||
@@ -15,8 +15,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/rest"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -36,12 +37,12 @@ func prepareServer(t *testing.T) func() {
|
||||
ts := httptest.NewServer(fileServer)
|
||||
|
||||
// Configure the remote
|
||||
fs.LoadConfig()
|
||||
config.LoadConfig()
|
||||
// fs.Config.LogLevel = fs.LogLevelDebug
|
||||
// fs.Config.DumpHeaders = true
|
||||
// fs.Config.DumpBodies = true
|
||||
fs.ConfigFileSet(remoteName, "type", "http")
|
||||
fs.ConfigFileSet(remoteName, "url", ts.URL)
|
||||
config.FileSet(remoteName, "type", "http")
|
||||
config.FileSet(remoteName, "url", ts.URL)
|
||||
|
||||
// return a function to tidy up
|
||||
return ts.Close
|
||||
@@ -150,6 +151,7 @@ func TestOpen(t *testing.T) {
|
||||
fd, err := o.Open()
|
||||
require.NoError(t, err)
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
|
||||
@@ -157,6 +159,7 @@ func TestOpen(t *testing.T) {
|
||||
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fd.Close())
|
||||
assert.Equal(t, "eetro", string(data))
|
||||
}
|
||||
@@ -206,30 +209,34 @@ func TestIsAFileSubDir(t *testing.T) {
|
||||
|
||||
func TestParseName(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
base string
|
||||
val string
|
||||
wantOK bool
|
||||
want string
|
||||
base string
|
||||
val string
|
||||
wantErr error
|
||||
want string
|
||||
}{
|
||||
{"http://example.com/", "potato", true, "potato"},
|
||||
{"http://example.com/dir/", "potato", true, "potato"},
|
||||
{"http://example.com/dir/", "../dir/potato", true, "potato"},
|
||||
{"http://example.com/dir/", "..", false, ""},
|
||||
{"http://example.com/dir/", "http://example.com/", false, ""},
|
||||
{"http://example.com/dir/", "http://example.com/dir/", false, ""},
|
||||
{"http://example.com/dir/", "http://example.com/dir/potato", true, "potato"},
|
||||
{"http://example.com/dir/", "/dir/", false, ""},
|
||||
{"http://example.com/dir/", "/dir/potato", true, "potato"},
|
||||
{"http://example.com/dir/", "subdir/potato", false, ""},
|
||||
{"http://example.com/dir/", "With percent %25.txt", true, "With percent %.txt"},
|
||||
{"http://example.com/dir/", "With colon :", false, ""},
|
||||
{"http://example.com/dir/", rest.URLEscape("With colon :"), true, "With colon :"},
|
||||
{"http://example.com/", "potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "potato?download=true", errFoundQuestionMark, ""},
|
||||
{"http://example.com/dir/", "../dir/potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "..", errNotUnderRoot, ""},
|
||||
{"http://example.com/dir/", "http://example.com/", errNotUnderRoot, ""},
|
||||
{"http://example.com/dir/", "http://example.com/dir/", errNameIsEmpty, ""},
|
||||
{"http://example.com/dir/", "http://example.com/dir/potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "https://example.com/dir/potato", errSchemeMismatch, ""},
|
||||
{"http://example.com/dir/", "http://notexample.com/dir/potato", errHostMismatch, ""},
|
||||
{"http://example.com/dir/", "/dir/", errNameIsEmpty, ""},
|
||||
{"http://example.com/dir/", "/dir/potato", nil, "potato"},
|
||||
{"http://example.com/dir/", "subdir/potato", errNameContainsSlash, ""},
|
||||
{"http://example.com/dir/", "With percent %25.txt", nil, "With percent %.txt"},
|
||||
{"http://example.com/dir/", "With colon :", errURLJoinFailed, ""},
|
||||
{"http://example.com/dir/", rest.URLPathEscape("With colon :"), nil, "With colon :"},
|
||||
{"http://example.com/Dungeons%20%26%20Dragons/", "/Dungeons%20&%20Dragons/D%26D%20Basic%20%28Holmes%2C%20B%2C%20X%2C%20BECMI%29/", nil, "D&D Basic (Holmes, B, X, BECMI)/"},
|
||||
} {
|
||||
u, err := url.Parse(test.base)
|
||||
require.NoError(t, err)
|
||||
got, gotOK := parseName(u, test.val)
|
||||
got, gotErr := parseName(u, test.val)
|
||||
what := fmt.Sprintf("test %d base=%q, val=%q", i, test.base, test.val)
|
||||
assert.Equal(t, test.wantOK, gotOK, what)
|
||||
assert.Equal(t, test.wantErr, gotErr, what)
|
||||
assert.Equal(t, test.want, got, what)
|
||||
}
|
||||
}
|
||||
1
backend/http/test/files/four/under four.txt
Normal file
1
backend/http/test/files/four/under four.txt
Normal file
@@ -0,0 +1 @@
|
||||
beetroot
|
||||
1
backend/http/test/files/one%.txt
Normal file
1
backend/http/test/files/one%.txt
Normal file
@@ -0,0 +1 @@
|
||||
hello
|
||||
1
backend/http/test/files/three/underthree.txt
Normal file
1
backend/http/test/files/three/underthree.txt
Normal file
@@ -0,0 +1 @@
|
||||
rutabaga
|
||||
1
backend/http/test/files/two.html
Normal file
1
backend/http/test/files/two.html
Normal file
@@ -0,0 +1 @@
|
||||
potato
|
||||
54
backend/hubic/auth.go
Normal file
54
backend/hubic/auth.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package hubic
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
// auth is an authenticator for swift
|
||||
type auth struct {
|
||||
f *Fs
|
||||
}
|
||||
|
||||
// newAuth creates a swift authenticator
|
||||
func newAuth(f *Fs) *auth {
|
||||
return &auth{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Request constructs a http.Request for authentication
|
||||
//
|
||||
// returns nil for not needed
|
||||
func (a *auth) Request(*swift.Connection) (*http.Request, error) {
|
||||
err := a.f.getCredentials()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Response parses the result of an http request
|
||||
func (a *auth) Response(resp *http.Response) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// The public storage URL - set Internal to true to read
|
||||
// internal/service net URL
|
||||
func (a *auth) StorageUrl(Internal bool) string { // nolint
|
||||
return a.f.credentials.Endpoint
|
||||
}
|
||||
|
||||
// The access token
|
||||
func (a *auth) Token() string {
|
||||
return a.f.credentials.Token
|
||||
}
|
||||
|
||||
// The CDN url if available
|
||||
func (a *auth) CdnUrl() string { // nolint
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var _ swift.Authenticator = (*auth)(nil)
|
||||
@@ -13,9 +13,12 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/oauthutil"
|
||||
"github.com/ncw/rclone/swift"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
swiftLib "github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/oauth2"
|
||||
@@ -38,7 +41,7 @@ var (
|
||||
TokenURL: "https://api.hubic.com/oauth/token/",
|
||||
},
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: fs.MustReveal(rcloneEncryptedClientSecret),
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
}
|
||||
)
|
||||
@@ -56,10 +59,10 @@ func init() {
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: fs.ConfigClientID,
|
||||
Name: config.ConfigClientID,
|
||||
Help: "Hubic Client Id - leave blank normally.",
|
||||
}, {
|
||||
Name: fs.ConfigClientSecret,
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Hubic Client Secret - leave blank normally.",
|
||||
}},
|
||||
})
|
||||
@@ -157,7 +160,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
Auth: newAuth(f),
|
||||
ConnectTimeout: 10 * fs.Config.ConnectTimeout, // Use the timeouts in the transport
|
||||
Timeout: 10 * fs.Config.Timeout, // Use the timeouts in the transport
|
||||
Transport: fs.Config.Transport(),
|
||||
Transport: fshttp.NewTransport(fs.Config),
|
||||
}
|
||||
err = c.Authenticate()
|
||||
if err != nil {
|
||||
17
backend/hubic/hubic_test.go
Normal file
17
backend/hubic/hubic_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Hubic filesystem interface
|
||||
package hubic_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/hubic"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
})
|
||||
}
|
||||
29
backend/local/about_unix.go
Normal file
29
backend/local/about_unix.go
Normal file
@@ -0,0 +1,29 @@
|
||||
// +build darwin dragonfly freebsd linux
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
var s syscall.Statfs_t
|
||||
err := syscall.Statfs(f.root, &s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read disk usage")
|
||||
}
|
||||
bs := int64(s.Bsize)
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(bs * int64(s.Blocks)), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(bs * int64(s.Blocks-s.Bfree)), // bytes in use
|
||||
Free: fs.NewUsageValue(bs * int64(s.Bavail)), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// check interface
|
||||
var _ fs.Abouter = &Fs{}
|
||||
36
backend/local/about_windows.go
Normal file
36
backend/local/about_windows.go
Normal file
@@ -0,0 +1,36 @@
|
||||
// +build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var getFreeDiskSpace = syscall.NewLazyDLL("kernel32.dll").NewProc("GetDiskFreeSpaceExW")
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
var available, total, free int64
|
||||
_, _, e1 := getFreeDiskSpace.Call(
|
||||
uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(f.root))),
|
||||
uintptr(unsafe.Pointer(&available)), // lpFreeBytesAvailable - for this user
|
||||
uintptr(unsafe.Pointer(&total)), // lpTotalNumberOfBytes
|
||||
uintptr(unsafe.Pointer(&free)), // lpTotalNumberOfFreeBytes
|
||||
)
|
||||
if e1 != syscall.Errno(0) {
|
||||
return nil, errors.Wrap(e1, "failed to read disk usage")
|
||||
}
|
||||
usage := &fs.Usage{
|
||||
Total: fs.NewUsageValue(total), // quota of bytes that can be used
|
||||
Used: fs.NewUsageValue(total - free), // bytes in use
|
||||
Free: fs.NewUsageValue(available), // bytes which can be uploaded before reaching the quota
|
||||
}
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
// check interface
|
||||
var _ fs.Abouter = &Fs{}
|
||||
@@ -16,14 +16,19 @@ import (
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/appengine/log"
|
||||
)
|
||||
|
||||
var (
|
||||
followSymlinks = fs.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
|
||||
skipSymlinks = fs.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
|
||||
noUTFNorm = fs.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
|
||||
followSymlinks = flags.BoolP("copy-links", "L", false, "Follow symlinks and copy the pointed to item.")
|
||||
skipSymlinks = flags.BoolP("skip-links", "", false, "Don't warn about skipped symlinks.")
|
||||
noUTFNorm = flags.BoolP("local-no-unicode-normalization", "", false, "Don't apply unicode normalization to paths and filenames")
|
||||
noCheckUpdated = flags.BoolP("local-no-check-updated", "", false, "Don't check to see if the files change during upload")
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -60,8 +65,9 @@ type Fs struct {
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
nounc bool // Skip UNC conversion on Windows
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
dirNames *mapper // directory name mapping
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
dirNames *mapper // directory name mapping
|
||||
objectHashesMu sync.Mutex // global lock for Object.hashes
|
||||
}
|
||||
|
||||
// Object represents a local filesystem object
|
||||
@@ -72,7 +78,7 @@ type Object struct {
|
||||
size int64 // file metadata - always present
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
hashes map[fs.HashType]string // Hashes
|
||||
hashes map[hash.Type]string // Hashes
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -85,7 +91,7 @@ func NewFs(name, root string) (fs.Fs, error) {
|
||||
log.Errorf(nil, "The --local-no-unicode-normalization flag is deprecated and will be removed")
|
||||
}
|
||||
|
||||
nounc := fs.ConfigFileGet(name, "nounc")
|
||||
nounc := config.FileGet(name, "nounc")
|
||||
f := &Fs{
|
||||
name: name,
|
||||
warned: make(map[string]struct{}),
|
||||
@@ -175,6 +181,9 @@ func (f *Fs) newObjectWithInfo(remote, dstPath string, info os.FileInfo) (fs.Obj
|
||||
if os.IsNotExist(err) {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
if os.IsPermission(err) {
|
||||
return nil, fs.ErrorPermissionDenied
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
@@ -413,7 +422,7 @@ func (f *Fs) readPrecision() (precision time.Duration) {
|
||||
|
||||
// If it matches - have found the precision
|
||||
// fmt.Println("compare", fi.ModTime(), t)
|
||||
if fi.ModTime() == t {
|
||||
if fi.ModTime().Equal(t) {
|
||||
// fmt.Println("Precision detected as", duration)
|
||||
return duration
|
||||
}
|
||||
@@ -484,7 +493,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
} else if err != nil {
|
||||
// not quite clear, but probably trying to move a file across file system
|
||||
// boundaries. Copying might still work.
|
||||
fs.Errorf(src, "Can't move: %v: trying copy", err)
|
||||
fs.Debugf(src, "Can't move: %v: trying copy", err)
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
@@ -528,12 +537,25 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
// Do the move
|
||||
return os.Rename(srcPath, dstPath)
|
||||
err = os.Rename(srcPath, dstPath)
|
||||
if os.IsNotExist(err) {
|
||||
// race condition, source was deleted in the meantime
|
||||
return err
|
||||
} else if os.IsPermission(err) {
|
||||
// not enough rights to write to dst
|
||||
return err
|
||||
} else if err != nil {
|
||||
// not quite clear, but probably trying to move directory across file system
|
||||
// boundaries. Copying might still work.
|
||||
fs.Debugf(src, "Can't move dir: %v: trying copy", err)
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() fs.HashSet {
|
||||
return fs.SupportedHashes
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Supported
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -557,7 +579,7 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the requested hash of a file as a lowercase hex string
|
||||
func (o *Object) Hash(r fs.HashType) (string, error) {
|
||||
func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
// Check that the underlying file hasn't changed
|
||||
oldtime := o.modTime
|
||||
oldsize := o.size
|
||||
@@ -566,17 +588,16 @@ func (o *Object) Hash(r fs.HashType) (string, error) {
|
||||
return "", errors.Wrap(err, "hash: failed to stat")
|
||||
}
|
||||
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size {
|
||||
o.hashes = nil
|
||||
}
|
||||
o.fs.objectHashesMu.Lock()
|
||||
hashes := o.hashes
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
if o.hashes == nil {
|
||||
o.hashes = make(map[fs.HashType]string)
|
||||
if !o.modTime.Equal(oldtime) || oldsize != o.size || hashes == nil {
|
||||
in, err := os.Open(o.path)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to open")
|
||||
}
|
||||
o.hashes, err = fs.HashStream(in)
|
||||
hashes, err = hash.Stream(in)
|
||||
closeErr := in.Close()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "hash: failed to read")
|
||||
@@ -584,8 +605,11 @@ func (o *Object) Hash(r fs.HashType) (string, error) {
|
||||
if closeErr != nil {
|
||||
return "", errors.Wrap(closeErr, "hash: failed to close")
|
||||
}
|
||||
o.fs.objectHashesMu.Lock()
|
||||
o.hashes = hashes
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
}
|
||||
return o.hashes[r], nil
|
||||
return hashes[r], nil
|
||||
}
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
@@ -618,11 +642,6 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
}
|
||||
mode := o.mode
|
||||
// On windows a file with os.ModeSymlink represents a file with reparse points
|
||||
if runtime.GOOS == "windows" && (mode&os.ModeSymlink) != 0 {
|
||||
fs.Debugf(o, "Clearing symlink bit to allow a file with reparse points to be copied")
|
||||
mode &^= os.ModeSymlink
|
||||
}
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
if !*skipSymlinks {
|
||||
fs.Logf(o, "Can't follow symlink without -L/--copy-links")
|
||||
@@ -641,13 +660,28 @@ func (o *Object) Storable() bool {
|
||||
// localOpenFile wraps an io.ReadCloser and updates the md5sum of the
|
||||
// object that is read
|
||||
type localOpenFile struct {
|
||||
o *Object // object that is open
|
||||
in io.ReadCloser // handle we are wrapping
|
||||
hash *fs.MultiHasher // currently accumulating hashes
|
||||
o *Object // object that is open
|
||||
in io.ReadCloser // handle we are wrapping
|
||||
hash *hash.MultiHasher // currently accumulating hashes
|
||||
fd *os.File // file object reference
|
||||
}
|
||||
|
||||
// Read bytes from the object - see io.Reader
|
||||
func (file *localOpenFile) Read(p []byte) (n int, err error) {
|
||||
if !*noCheckUpdated {
|
||||
// Check if file has the same size and modTime
|
||||
fi, err := file.fd.Stat()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "can't read status of source file while transferring")
|
||||
}
|
||||
if file.o.size != fi.Size() {
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
|
||||
}
|
||||
if !file.o.modTime.Equal(fi.ModTime()) {
|
||||
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
|
||||
}
|
||||
}
|
||||
|
||||
n, err = file.in.Read(p)
|
||||
if n > 0 {
|
||||
// Hash routines never return an error
|
||||
@@ -661,7 +695,9 @@ func (file *localOpenFile) Close() (err error) {
|
||||
err = file.in.Close()
|
||||
if err == nil {
|
||||
if file.hash.Size() == file.o.Size() {
|
||||
file.o.fs.objectHashesMu.Lock()
|
||||
file.o.hashes = file.hash.Sums()
|
||||
file.o.fs.objectHashesMu.Unlock()
|
||||
}
|
||||
}
|
||||
return err
|
||||
@@ -669,12 +705,14 @@ func (file *localOpenFile) Close() (err error) {
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
var offset int64
|
||||
hashes := fs.SupportedHashes
|
||||
var offset, limit int64 = 0, -1
|
||||
hashes := hash.Supported
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
offset = x.Offset
|
||||
case *fs.RangeOption:
|
||||
offset, limit = x.Decode(o.size)
|
||||
case *fs.HashesOption:
|
||||
hashes = x.Hashes
|
||||
default:
|
||||
@@ -688,21 +726,23 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, limit)
|
||||
if offset != 0 {
|
||||
// seek the object
|
||||
_, err = fd.Seek(offset, 0)
|
||||
_, err = fd.Seek(offset, io.SeekStart)
|
||||
// don't attempt to make checksums
|
||||
return fd, err
|
||||
return wrappedFd, err
|
||||
}
|
||||
hash, err := fs.NewMultiHasherTypes(hashes)
|
||||
hash, err := hash.NewMultiHasherTypes(hashes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Update the md5sum as we go along
|
||||
in = &localOpenFile{
|
||||
o: o,
|
||||
in: fd,
|
||||
in: wrappedFd,
|
||||
hash: hash,
|
||||
fd: fd,
|
||||
}
|
||||
return in, nil
|
||||
}
|
||||
@@ -715,7 +755,7 @@ func (o *Object) mkdirAll() error {
|
||||
|
||||
// Update the object from in with modTime and size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
hashes := fs.SupportedHashes
|
||||
hashes := hash.Supported
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.HashesOption:
|
||||
@@ -734,7 +774,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Calculate the hash of the object we are reading as we go along
|
||||
hash, err := fs.NewMultiHasherTypes(hashes)
|
||||
hash, err := hash.NewMultiHasherTypes(hashes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -754,7 +794,9 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// All successful so update the hashes
|
||||
o.fs.objectHashesMu.Lock()
|
||||
o.hashes = hash.Sums()
|
||||
o.fs.objectHashesMu.Unlock()
|
||||
|
||||
// Set the mtime
|
||||
err = o.SetModTime(src.ModTime())
|
||||
@@ -792,7 +834,7 @@ func (o *Object) lstat() error {
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
return os.Remove(o.path)
|
||||
return remove(o.path)
|
||||
}
|
||||
|
||||
// Return the directory and file from an OS path. Assumes
|
||||
78
backend/local/local_internal_test.go
Normal file
78
backend/local/local_internal_test.go
Normal file
@@ -0,0 +1,78 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestMain drives the tests
|
||||
func TestMain(m *testing.M) {
|
||||
fstest.TestMain(m)
|
||||
}
|
||||
|
||||
func TestMapper(t *testing.T) {
|
||||
m := newMapper()
|
||||
assert.Equal(t, m.m, map[string]string{})
|
||||
assert.Equal(t, "potato", m.Save("potato", "potato"))
|
||||
assert.Equal(t, m.m, map[string]string{})
|
||||
assert.Equal(t, "-r'áö", m.Save("-r?'a´o¨", "-r'áö"))
|
||||
assert.Equal(t, m.m, map[string]string{
|
||||
"-r'áö": "-r?'a´o¨",
|
||||
})
|
||||
assert.Equal(t, "potato", m.Load("potato"))
|
||||
assert.Equal(t, "-r?'a´o¨", m.Load("-r'áö"))
|
||||
}
|
||||
|
||||
// Test copy with source file that's updating
|
||||
func TestUpdatingCheck(t *testing.T) {
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
filePath := "sub dir/local test"
|
||||
r.WriteFile(filePath, "content", time.Now())
|
||||
|
||||
fd, err := os.Open(path.Join(r.LocalName, filePath))
|
||||
if err != nil {
|
||||
t.Fatalf("failed opening file %q: %v", filePath, err)
|
||||
}
|
||||
|
||||
fi, err := fd.Stat()
|
||||
require.NoError(t, err)
|
||||
o := &Object{size: fi.Size(), modTime: fi.ModTime()}
|
||||
wrappedFd := readers.NewLimitedReadCloser(fd, -1)
|
||||
hash, err := hash.NewMultiHasherTypes(hash.Supported)
|
||||
require.NoError(t, err)
|
||||
in := localOpenFile{
|
||||
o: o,
|
||||
in: wrappedFd,
|
||||
hash: hash,
|
||||
fd: fd,
|
||||
}
|
||||
|
||||
buf := make([]byte, 1)
|
||||
_, err = in.Read(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.Errorf(t, err, "can't copy - source file is being updated")
|
||||
|
||||
// turn the checking off and try again
|
||||
|
||||
*noCheckUpdated = true
|
||||
defer func() {
|
||||
*noCheckUpdated = false
|
||||
}()
|
||||
|
||||
r.WriteFile(filePath, "content updated", time.Now())
|
||||
_, err = in.Read(buf)
|
||||
require.NoError(t, err)
|
||||
|
||||
}
|
||||
17
backend/local/local_test.go
Normal file
17
backend/local/local_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test Local filesystem interface
|
||||
package local_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "",
|
||||
NilObject: (*local.Object)(nil),
|
||||
})
|
||||
}
|
||||
@@ -9,10 +9,11 @@ import (
|
||||
"syscall"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/flags"
|
||||
)
|
||||
|
||||
var (
|
||||
oneFileSystem = fs.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
|
||||
oneFileSystem = flags.BoolP("one-file-system", "x", false, "Don't cross filesystem boundaries.")
|
||||
)
|
||||
|
||||
// readDevice turns a valid os.FileInfo into a device number,
|
||||
10
backend/local/remove_other.go
Normal file
10
backend/local/remove_other.go
Normal file
@@ -0,0 +1,10 @@
|
||||
//+build !windows
|
||||
|
||||
package local
|
||||
|
||||
import "os"
|
||||
|
||||
// Removes name, retrying on a sharing violation
|
||||
func remove(name string) error {
|
||||
return os.Remove(name)
|
||||
}
|
||||
50
backend/local/remove_test.go
Normal file
50
backend/local/remove_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Check we can remove an open file
|
||||
func TestRemove(t *testing.T) {
|
||||
fd, err := ioutil.TempFile("", "rclone-remove-test")
|
||||
require.NoError(t, err)
|
||||
name := fd.Name()
|
||||
defer func() {
|
||||
_ = os.Remove(name)
|
||||
}()
|
||||
|
||||
exists := func() bool {
|
||||
_, err := os.Stat(name)
|
||||
if err == nil {
|
||||
return true
|
||||
} else if os.IsNotExist(err) {
|
||||
return false
|
||||
}
|
||||
require.NoError(t, err)
|
||||
return false
|
||||
}
|
||||
|
||||
assert.True(t, exists())
|
||||
// close the file in the background
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
require.NoError(t, fd.Close())
|
||||
}()
|
||||
// delete the open file
|
||||
err = remove(name)
|
||||
require.NoError(t, err)
|
||||
// check it no longer exists
|
||||
assert.False(t, exists())
|
||||
// wait for background close
|
||||
wg.Wait()
|
||||
}
|
||||
38
backend/local/remove_windows.go
Normal file
38
backend/local/remove_windows.go
Normal file
@@ -0,0 +1,38 @@
|
||||
//+build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
)
|
||||
|
||||
const (
|
||||
ERROR_SHARING_VIOLATION syscall.Errno = 32
|
||||
)
|
||||
|
||||
// Removes name, retrying on a sharing violation
|
||||
func remove(name string) (err error) {
|
||||
const maxTries = 10
|
||||
var sleepTime = 1 * time.Millisecond
|
||||
for i := 0; i < maxTries; i++ {
|
||||
err = os.Remove(name)
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
pathErr, ok := err.(*os.PathError)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
if pathErr.Err != ERROR_SHARING_VIOLATION {
|
||||
break
|
||||
}
|
||||
fs.Logf(name, "Remove detected sharing violation - retry %d/%d sleeping %v", i+1, maxTries, sleepTime)
|
||||
time.Sleep(sleepTime)
|
||||
sleepTime <<= 1
|
||||
}
|
||||
return err
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user