mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
904 Commits
v1.47.0
...
vfs-refact
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
40b9e312c6 | ||
|
|
2b268f9724 | ||
|
|
7a5a74cecb | ||
|
|
54a0c6b8ad | ||
|
|
1ad23c4dc8 | ||
|
|
7586a345ff | ||
|
|
393b94bb70 | ||
|
|
e3c11c9ca1 | ||
|
|
3c91abce74 | ||
|
|
87d856d71b | ||
|
|
3855c003ce | ||
|
|
abb9f89f65 | ||
|
|
17b4058ee9 | ||
|
|
9663f9b2ab | ||
|
|
d6e10dba33 | ||
|
|
da5cbc194a | ||
|
|
e8eb658ba5 | ||
|
|
28f69f25a0 | ||
|
|
07e4b9bb7f | ||
|
|
708b967f15 | ||
|
|
7e2568a312 | ||
|
|
bde0334bd8 | ||
|
|
5470d34740 | ||
|
|
ac9cb50fdb | ||
|
|
4a8b548add | ||
|
|
481c8a40ea | ||
|
|
25ef3a281b | ||
|
|
219bd97e8a | ||
|
|
8b14cd24aa | ||
|
|
3893c14889 | ||
|
|
c41fbc0f90 | ||
|
|
f45425e5a9 | ||
|
|
bd9fd629bc | ||
|
|
3b19f48929 | ||
|
|
4996edc030 | ||
|
|
964f1f6a7e | ||
|
|
e75c1f70bb | ||
|
|
19a4d74ee7 | ||
|
|
55b5eded23 | ||
|
|
3dbcf0af2d | ||
|
|
4e1a511f88 | ||
|
|
b71e1a16b1 | ||
|
|
ec1271818f | ||
|
|
8318020387 | ||
|
|
c38d7be373 | ||
|
|
dc31212c3d | ||
|
|
ac60b36e77 | ||
|
|
1d73f071f6 | ||
|
|
5c869d5bd3 | ||
|
|
a54210a2e4 | ||
|
|
040d226028 | ||
|
|
8b664c3ec5 | ||
|
|
102a38bb95 | ||
|
|
7a54e13110 | ||
|
|
feee92c790 | ||
|
|
de93852512 | ||
|
|
dfb710eab7 | ||
|
|
25cfeb2a64 | ||
|
|
90377f5e65 | ||
|
|
f1d9bd5eab | ||
|
|
4ee3c21a9d | ||
|
|
fe6f4135b4 | ||
|
|
3dfa63b85c | ||
|
|
ff2343475a | ||
|
|
bffd7f0f14 | ||
|
|
7c55fafe33 | ||
|
|
2e7fe06beb | ||
|
|
8ff91ff31b | ||
|
|
4d1c616e97 | ||
|
|
43daecd89b | ||
|
|
9f99c20232 | ||
|
|
97ed8db75d | ||
|
|
f80d98553a | ||
|
|
b3e7a9d01c | ||
|
|
01fc063128 | ||
|
|
e71edd5577 | ||
|
|
27a34dd183 | ||
|
|
7662f15939 | ||
|
|
bfd9f32188 | ||
|
|
9c9cdf1712 | ||
|
|
0e5537cd25 | ||
|
|
151d0a274e | ||
|
|
dc77ec4ba1 | ||
|
|
0d7573dd81 | ||
|
|
e4d2d228bd | ||
|
|
ede36b001b | ||
|
|
3afb2a4798 | ||
|
|
62dbdcdbcc | ||
|
|
06df133159 | ||
|
|
0ab2693da6 | ||
|
|
4b1cb1be43 | ||
|
|
9d96680329 | ||
|
|
d694bb30e5 | ||
|
|
9c858c3228 | ||
|
|
7125cb10f5 | ||
|
|
ba421fd069 | ||
|
|
77e55b8265 | ||
|
|
18d26e2ddb | ||
|
|
f338a2d907 | ||
|
|
77fa8194f2 | ||
|
|
ccaca04a5d | ||
|
|
84191ac6dc | ||
|
|
7cf8ea354c | ||
|
|
24ef00a258 | ||
|
|
00d30ce0d7 | ||
|
|
db39adeb3e | ||
|
|
ef7ac088c0 | ||
|
|
08a3957880 | ||
|
|
4499b08afc | ||
|
|
422ad38e5b | ||
|
|
0b7f959433 | ||
|
|
4b9da601be | ||
|
|
c789436580 | ||
|
|
277d94feac | ||
|
|
6757244918 | ||
|
|
36157d8ae5 | ||
|
|
251cfc100e | ||
|
|
9fb10064ee | ||
|
|
bedeaf23af | ||
|
|
14e93bfd8a | ||
|
|
65071599a2 | ||
|
|
5403e1c79a | ||
|
|
5697caf20b | ||
|
|
68056f08ab | ||
|
|
81002747c5 | ||
|
|
1bd9f522e0 | ||
|
|
3a1b41ac22 | ||
|
|
375d25f158 | ||
|
|
0e57335396 | ||
|
|
bafe7d5a73 | ||
|
|
c555dc71c2 | ||
|
|
3c620d521d | ||
|
|
0a5c83ece1 | ||
|
|
1ba5e99152 | ||
|
|
95c83b37fb | ||
|
|
89634795b0 | ||
|
|
b88dec51e5 | ||
|
|
f2a789ea98 | ||
|
|
63128834da | ||
|
|
5f822f2660 | ||
|
|
b81601baff | ||
|
|
58064bdd2b | ||
|
|
ba01d5e8ab | ||
|
|
e510d460c2 | ||
|
|
42de601fa6 | ||
|
|
3801b8109e | ||
|
|
e0d41da3e3 | ||
|
|
92662baceb | ||
|
|
87c844bce1 | ||
|
|
ae340cf7d9 | ||
|
|
11f501bd44 | ||
|
|
a4bc4daf30 | ||
|
|
51dca8c8d4 | ||
|
|
6b3021209a | ||
|
|
f263828edc | ||
|
|
b7019a91c2 | ||
|
|
27c3481ea4 | ||
|
|
706da80d88 | ||
|
|
b6e86b2c7f | ||
|
|
4453fa4ba6 | ||
|
|
540fd3f173 | ||
|
|
1af4bb0c84 | ||
|
|
15d19131bd | ||
|
|
9d993e584b | ||
|
|
21b17b14a9 | ||
|
|
1b89b38a4c | ||
|
|
7242c7ce95 | ||
|
|
ad2bb86d8c | ||
|
|
eb10ac346f | ||
|
|
7e6fac8b1e | ||
|
|
2e0774f3cf | ||
|
|
b9fb313f71 | ||
|
|
0e64df4b4c | ||
|
|
69ac04fec9 | ||
|
|
8a2d1dbe24 | ||
|
|
584e705c0c | ||
|
|
32a3ba9e3f | ||
|
|
db1c7f9ca8 | ||
|
|
207474abab | ||
|
|
f754d897e5 | ||
|
|
4daecd3158 | ||
|
|
59c75ba442 | ||
|
|
0ecb8bc2f9 | ||
|
|
1ab4985046 | ||
|
|
6e683b4359 | ||
|
|
241921c786 | ||
|
|
a186284b23 | ||
|
|
41ba1bba2b | ||
|
|
50bb9b7bdd | ||
|
|
4537d9b5cf | ||
|
|
684dbe0e9d | ||
|
|
572c1079a5 | ||
|
|
cb97239a60 | ||
|
|
e48145f959 | ||
|
|
2150cf7362 | ||
|
|
707e51eac7 | ||
|
|
0d10640aaa | ||
|
|
f4746f5064 | ||
|
|
c05bb63f96 | ||
|
|
e2773b3b4e | ||
|
|
d3b0bed091 | ||
|
|
33c80bbb96 | ||
|
|
705e4694ed | ||
|
|
4fbc90d115 | ||
|
|
ed39adc65b | ||
|
|
162fdfe455 | ||
|
|
8f33c932f2 | ||
|
|
4195bd7880 | ||
|
|
d72f3e31c0 | ||
|
|
11f44cff50 | ||
|
|
c3751e9a50 | ||
|
|
420ae905b5 | ||
|
|
a7d65bd519 | ||
|
|
1db31d7149 | ||
|
|
4641bd5116 | ||
|
|
7e602dbf39 | ||
|
|
e14d968f8d | ||
|
|
e0eeeaafcd | ||
|
|
d46f8d0ae5 | ||
|
|
1e6278556c | ||
|
|
303f4ee152 | ||
|
|
2fe8285f89 | ||
|
|
f5443ac939 | ||
|
|
7cf056b2c2 | ||
|
|
75a6c49f87 | ||
|
|
19229b1215 | ||
|
|
b5bb4c2a21 | ||
|
|
479c803fd9 | ||
|
|
3dcf1e61cf | ||
|
|
3da1cbfc81 | ||
|
|
0c9a8cf776 | ||
|
|
f3871377c3 | ||
|
|
cc9a7dc073 | ||
|
|
b61dd809ee | ||
|
|
f158a398f3 | ||
|
|
acefa5c40d | ||
|
|
2784c3234b | ||
|
|
c21a4fee58 | ||
|
|
358f5a8084 | ||
|
|
9115752679 | ||
|
|
51efb349ac | ||
|
|
e0d9314059 | ||
|
|
21c6babdbb | ||
|
|
5beeac7959 | ||
|
|
be5392f448 | ||
|
|
c00dcb7e67 | ||
|
|
6150ae89d6 | ||
|
|
1e423d21e1 | ||
|
|
53d55ae760 | ||
|
|
5928704e1b | ||
|
|
5ddfa9f7f6 | ||
|
|
9b5308144f | ||
|
|
4b20afa94a | ||
|
|
049ff1f269 | ||
|
|
3f7af64316 | ||
|
|
0eaf5475ef | ||
|
|
7bf056316f | ||
|
|
520ddbcceb | ||
|
|
1ce1ea34aa | ||
|
|
e6378daadf | ||
|
|
7ff95c6250 | ||
|
|
6d58d9a86f | ||
|
|
e0356f5aae | ||
|
|
191cfb79d1 | ||
|
|
e81eca4055 | ||
|
|
ee3215ac76 | ||
|
|
199ac61bde | ||
|
|
a40cc1167d | ||
|
|
c57ea8d867 | ||
|
|
1868c77e16 | ||
|
|
378a3f4133 | ||
|
|
daff5a824e | ||
|
|
6fabf476cf | ||
|
|
ab895390f4 | ||
|
|
a3a5857874 | ||
|
|
0f0079ff71 | ||
|
|
18c029e0f0 | ||
|
|
7eee2f904a | ||
|
|
3ef0c73826 | ||
|
|
59026c4761 | ||
|
|
76f5e273d2 | ||
|
|
2bbfcc74e9 | ||
|
|
ba7c2ac443 | ||
|
|
2d9b8cb981 | ||
|
|
2e50543053 | ||
|
|
22bf8589cd | ||
|
|
0871c57f1b | ||
|
|
0c265713fd | ||
|
|
9cb549a227 | ||
|
|
13e46c4b3f | ||
|
|
d40972bf1a | ||
|
|
b002ff8d54 | ||
|
|
38652d046d | ||
|
|
0b6cdb7677 | ||
|
|
543100070a | ||
|
|
e337cae0c5 | ||
|
|
90a23ae01b | ||
|
|
dd150efdd7 | ||
|
|
af05e290cf | ||
|
|
f9f9d5029b | ||
|
|
7d3b67f6cc | ||
|
|
929f275ae5 | ||
|
|
c526bdb579 | ||
|
|
1b2ffbeca0 | ||
|
|
19429083ad | ||
|
|
6e378d7d32 | ||
|
|
1fe1a19339 | ||
|
|
b63e9befe8 | ||
|
|
b4b59c53f1 | ||
|
|
77b42aa33a | ||
|
|
910c80bd02 | ||
|
|
9049bb62ca | ||
|
|
7aa2b4191c | ||
|
|
41ed33b08e | ||
|
|
f3b0f8a9f0 | ||
|
|
65a82fe77d | ||
|
|
c892a6f8ef | ||
|
|
02c777ffbf | ||
|
|
bc45f6f952 | ||
|
|
3d807ab449 | ||
|
|
5d33236050 | ||
|
|
a4d572d004 | ||
|
|
58f280b8a2 | ||
|
|
ec09de1628 | ||
|
|
6abaa9e22c | ||
|
|
e8b92f4853 | ||
|
|
50a3a96e27 | ||
|
|
8950b586c4 | ||
|
|
3f40849343 | ||
|
|
7271a404db | ||
|
|
7d0d7e66ca | ||
|
|
0cac9d9bd0 | ||
|
|
8c1edf410c | ||
|
|
1833167d10 | ||
|
|
455b9280ba | ||
|
|
45e440d356 | ||
|
|
593de059be | ||
|
|
c78d1dd18b | ||
|
|
2a82aca225 | ||
|
|
7712b780ba | ||
|
|
5c2dfeee46 | ||
|
|
572d302620 | ||
|
|
eff11b44cf | ||
|
|
15b1feea9d | ||
|
|
6337cc70d3 | ||
|
|
d210fecf3b | ||
|
|
f962fb9499 | ||
|
|
7f378ca8e3 | ||
|
|
9a5ea9c8a8 | ||
|
|
d15425e8c8 | ||
|
|
b3faee9471 | ||
|
|
5271fe3b3f | ||
|
|
7da1c84a7f | ||
|
|
cbdab14057 | ||
|
|
7b1274e29a | ||
|
|
d21ddf280c | ||
|
|
135717e12b | ||
|
|
6b55b8b133 | ||
|
|
b94b2a3723 | ||
|
|
e2914c0097 | ||
|
|
fd51f24906 | ||
|
|
4615343b73 | ||
|
|
1dc8bcd48c | ||
|
|
def411da62 | ||
|
|
f73dae1e77 | ||
|
|
77a520c97c | ||
|
|
23bf6bb4d8 | ||
|
|
04eb96b50b | ||
|
|
b9bd15a8c9 | ||
|
|
b581f2de26 | ||
|
|
5cef5f8b49 | ||
|
|
8d8fad724b | ||
|
|
4098907511 | ||
|
|
5b8a339baf | ||
|
|
3e53376a49 | ||
|
|
d122d1d191 | ||
|
|
35d6ff89bf | ||
|
|
53bec33027 | ||
|
|
3304bb7a56 | ||
|
|
f55a99218c | ||
|
|
6e053ecbd0 | ||
|
|
7e738c9d71 | ||
|
|
7689bd7e21 | ||
|
|
33f129fbbc | ||
|
|
a8adce9c59 | ||
|
|
6ae7bd7914 | ||
|
|
32af4cd6f3 | ||
|
|
ced2616da5 | ||
|
|
b90e4a8769 | ||
|
|
00b2c02bf4 | ||
|
|
33aea5d43f | ||
|
|
13d8b7979d | ||
|
|
57c1284df7 | ||
|
|
f0c2249086 | ||
|
|
6ba08b8612 | ||
|
|
c8d3e57418 | ||
|
|
d5cd026547 | ||
|
|
6c0a749a42 | ||
|
|
4b9fdb8475 | ||
|
|
dac20093c5 | ||
|
|
d211347d46 | ||
|
|
4837bc3546 | ||
|
|
69c51325bb | ||
|
|
05e4f10436 | ||
|
|
a98a750fc9 | ||
|
|
c09b62a088 | ||
|
|
a56c9ab61d | ||
|
|
97a218903c | ||
|
|
4627ac5709 | ||
|
|
1e7144eb63 | ||
|
|
f29e5b6e7d | ||
|
|
25a0e7e8aa | ||
|
|
262ba28dec | ||
|
|
74f6300875 | ||
|
|
86dcb54c38 | ||
|
|
25a0703b45 | ||
|
|
32d5af8fb6 | ||
|
|
44b603d2bd | ||
|
|
349112df6b | ||
|
|
fef8b98be2 | ||
|
|
6750af6167 | ||
|
|
8681ef36d6 | ||
|
|
ec9914205f | ||
|
|
ccecfa9cb1 | ||
|
|
c41812fc88 | ||
|
|
d98d1be3fe | ||
|
|
661dc568f3 | ||
|
|
1e4691f951 | ||
|
|
be674faff1 | ||
|
|
c68c919cea | ||
|
|
59dba1de88 | ||
|
|
49d6d6425c | ||
|
|
28cc2009d4 | ||
|
|
dd4fe9ff60 | ||
|
|
899f285319 | ||
|
|
4788545b05 | ||
|
|
1934426789 | ||
|
|
643192b347 | ||
|
|
1031bcfc5a | ||
|
|
ce00c0a0d9 | ||
|
|
1164eed2af | ||
|
|
557edecd40 | ||
|
|
b242b0a078 | ||
|
|
08b86cc94b | ||
|
|
56544bb2fd | ||
|
|
70e043e641 | ||
|
|
c49a71f438 | ||
|
|
5f07bbf8ce | ||
|
|
2f10472df3 | ||
|
|
ab89e93968 | ||
|
|
070a8bfcd8 | ||
|
|
8fe87c8157 | ||
|
|
8fb44a822d | ||
|
|
3cff258577 | ||
|
|
66347aff2a | ||
|
|
b8b12a4000 | ||
|
|
8c038326b9 | ||
|
|
fd4b25932c | ||
|
|
4374fd1df1 | ||
|
|
b6065561cf | ||
|
|
ef7bfd3f03 | ||
|
|
ae2edc3b5b | ||
|
|
0baafb158f | ||
|
|
ba121eddf0 | ||
|
|
2e80e035c9 | ||
|
|
ea9b6087cf | ||
|
|
6959c997e2 | ||
|
|
25786cafd3 | ||
|
|
23dc313fa5 | ||
|
|
1a16849df0 | ||
|
|
3b68340eac | ||
|
|
7982aaf151 | ||
|
|
7b29ed8ec1 | ||
|
|
c93e0ff8ee | ||
|
|
3b91fb6a2f | ||
|
|
7d8c15c030 | ||
|
|
bfbddab46b | ||
|
|
e09a4ff019 | ||
|
|
48e23d8c85 | ||
|
|
934440a9df | ||
|
|
29b4f211ab | ||
|
|
bd863f8868 | ||
|
|
66c23723e3 | ||
|
|
58a531a203 | ||
|
|
ba1daea072 | ||
|
|
bdcd0b4c64 | ||
|
|
94eb9a4014 | ||
|
|
e028c006fc | ||
|
|
3f3f038b73 | ||
|
|
2298834e83 | ||
|
|
07dfb3aa11 | ||
|
|
1382dba3c8 | ||
|
|
f1347139fa | ||
|
|
27a730ef8f | ||
|
|
d0c6e5cf5a | ||
|
|
cf9b973fe4 | ||
|
|
ffa1dac10b | ||
|
|
7b0966880e | ||
|
|
1c4e33d4ad | ||
|
|
530ba66d35 | ||
|
|
b3db38ae31 | ||
|
|
c0d1869204 | ||
|
|
89b6d89077 | ||
|
|
ef7b001626 | ||
|
|
f97a3e853e | ||
|
|
b71ac141cc | ||
|
|
5932acfee3 | ||
|
|
e2ce687f93 | ||
|
|
a3fb460c6b | ||
|
|
8d296d0e1d | ||
|
|
20a57aaccb | ||
|
|
50a4ed8fc4 | ||
|
|
e2b5ed6c7a | ||
|
|
16e7da2cb5 | ||
|
|
52df19ad34 | ||
|
|
693112d57e | ||
|
|
0edbc9578d | ||
|
|
7211c2dca7 | ||
|
|
af192d2507 | ||
|
|
d1a39dcc4b | ||
|
|
a6387e1f81 | ||
|
|
a992a910ef | ||
|
|
ce3340621f | ||
|
|
73e010aff9 | ||
|
|
a3faf98aa0 | ||
|
|
ed85092edb | ||
|
|
193c30d570 | ||
|
|
beb8d5c134 | ||
|
|
93810a739d | ||
|
|
5d4d5d2b07 | ||
|
|
f02fc5d5b5 | ||
|
|
eab999f631 | ||
|
|
bd61eb89bc | ||
|
|
077b45322d | ||
|
|
67fae720d7 | ||
|
|
39ae7c7ac0 | ||
|
|
f67798d73e | ||
|
|
a1ca65bd80 | ||
|
|
566aa0fca7 | ||
|
|
8159658e67 | ||
|
|
6f16588123 | ||
|
|
e339c9ff8f | ||
|
|
3247e69cf5 | ||
|
|
341d880027 | ||
|
|
941dde6940 | ||
|
|
40cc8180f0 | ||
|
|
159f2e29a8 | ||
|
|
efd826ad4b | ||
|
|
5d6593de4f | ||
|
|
82c6c77e07 | ||
|
|
badc8b3293 | ||
|
|
27a9d0f570 | ||
|
|
6ca00c21a4 | ||
|
|
b619430bcf | ||
|
|
8a0775ce3c | ||
|
|
d8e9b1a67c | ||
|
|
e0e0e0c7bd | ||
|
|
eaaf2ded94 | ||
|
|
eaeef4811f | ||
|
|
d266a171c2 | ||
|
|
df8bdf0dcb | ||
|
|
743dabf159 | ||
|
|
9f549f848d | ||
|
|
af3c47d282 | ||
|
|
ba0e1ea6ae | ||
|
|
82b3bfec3c | ||
|
|
898782ac35 | ||
|
|
4e43fa746a | ||
|
|
acc9dadcdc | ||
|
|
712f7e38f7 | ||
|
|
24161d12ab | ||
|
|
fa539b9d9b | ||
|
|
3ea82032e7 | ||
|
|
71e172a139 | ||
|
|
6929f5d6e6 | ||
|
|
c2050172aa | ||
|
|
a72ef7ca0e | ||
|
|
b84cc0cae7 | ||
|
|
93228dfcc9 | ||
|
|
eb087a3b04 | ||
|
|
ec8e0a6c58 | ||
|
|
f0e0d6cc3c | ||
|
|
752d43d6fa | ||
|
|
7c146e2618 | ||
|
|
f9ceade9b4 | ||
|
|
ae9c0e56c8 | ||
|
|
402aaca7fe | ||
|
|
106cf1852d | ||
|
|
50b8f15b5d | ||
|
|
1e7bc359be | ||
|
|
23a0332185 | ||
|
|
6812844b3d | ||
|
|
3a04d0d1a9 | ||
|
|
6f4b86e569 | ||
|
|
9aa889bfa2 | ||
|
|
8247c8a6af | ||
|
|
535f5f3c99 | ||
|
|
7f7946564d | ||
|
|
bbb8d43716 | ||
|
|
5e0a30509c | ||
|
|
cd7ca2a320 | ||
|
|
a808e98fe1 | ||
|
|
3ebcb555f4 | ||
|
|
a1263e70cf | ||
|
|
f47e5220a2 | ||
|
|
4db742dc77 | ||
|
|
3ecbd603ab | ||
|
|
0693deea1c | ||
|
|
99eaa76dc8 | ||
|
|
ba3b0a175e | ||
|
|
01c0c0b009 | ||
|
|
7d85ccb11e | ||
|
|
0c1eaf1bcb | ||
|
|
873e87fc38 | ||
|
|
33677ff367 | ||
|
|
5195075677 | ||
|
|
f396550934 | ||
|
|
6f87267b34 | ||
|
|
9d1fb2f4e7 | ||
|
|
99b3154abd | ||
|
|
6c38bddf3e | ||
|
|
a00a0471a8 | ||
|
|
9e81fc343e | ||
|
|
fdef567da6 | ||
|
|
d377842395 | ||
|
|
c014b2e66b | ||
|
|
62b769a0a7 | ||
|
|
84b5da089e | ||
|
|
d0c65b4c5e | ||
|
|
e502be475a | ||
|
|
27a075e9fc | ||
|
|
5065c422b4 | ||
|
|
72d5b11d1b | ||
|
|
526a3347ac | ||
|
|
23910ba53b | ||
|
|
ee7101e6af | ||
|
|
36c1b37dd9 | ||
|
|
72782bdda6 | ||
|
|
b94eef16c1 | ||
|
|
d75fbe4852 | ||
|
|
e6ab237fcd | ||
|
|
a7eec91d69 | ||
|
|
b3e94b018c | ||
|
|
ca0e9ea55d | ||
|
|
53e3c2e263 | ||
|
|
02eb747d71 | ||
|
|
d51a970932 | ||
|
|
a9438cf364 | ||
|
|
5ef3c988eb | ||
|
|
78150e82a2 | ||
|
|
6f0cc51eeb | ||
|
|
84e2806c4b | ||
|
|
0386d22cc9 | ||
|
|
0be14120e4 | ||
|
|
95af1f9ccf | ||
|
|
629b7eacd8 | ||
|
|
d3149acc32 | ||
|
|
6a3e301303 | ||
|
|
5be968c0ca | ||
|
|
f1a687c540 | ||
|
|
94ee43fe54 | ||
|
|
c2635e39cc | ||
|
|
8c511ec9cd | ||
|
|
ac0dce78d0 | ||
|
|
f347514f62 | ||
|
|
57d5de6fba | ||
|
|
4ba6532915 | ||
|
|
ff235e4e56 | ||
|
|
68e641f6cf | ||
|
|
53a1a0e3ef | ||
|
|
8243ff8bc8 | ||
|
|
be0464f5f1 | ||
|
|
2d561b51db | ||
|
|
9241a93c2d | ||
|
|
fb32f77bac | ||
|
|
520fb03bfd | ||
|
|
a3449bda30 | ||
|
|
ccc416e62b | ||
|
|
a35aa1360e | ||
|
|
3df9dbf887 | ||
|
|
9af0a704af | ||
|
|
691e5ae5f0 | ||
|
|
5a44bafa4e | ||
|
|
8fdce31700 | ||
|
|
493dfb68fd | ||
|
|
71587344c6 | ||
|
|
8e8b78d7e5 | ||
|
|
266600dba7 | ||
|
|
e4f6ccbff2 | ||
|
|
1f1ab179a6 | ||
|
|
c642531a1e | ||
|
|
19ae053168 | ||
|
|
def790986c | ||
|
|
0a1169e659 | ||
|
|
5433021e8b | ||
|
|
c9f77719e4 | ||
|
|
3cd63a00be | ||
|
|
d7016866e0 | ||
|
|
d72e4105fb | ||
|
|
b4266da4eb | ||
|
|
3f5767b94e | ||
|
|
1510e12659 | ||
|
|
ede03258bc | ||
|
|
7fcbb47b1c | ||
|
|
9cafeeb4b6 | ||
|
|
a1cfe61ffd | ||
|
|
5eebbaaac4 | ||
|
|
bc70bff125 | ||
|
|
cf15b88efa | ||
|
|
dcaee0016a | ||
|
|
387b496d1e | ||
|
|
734f504d5f | ||
|
|
7153909390 | ||
|
|
ea35e807db | ||
|
|
5df5a3b78e | ||
|
|
37c1144b46 | ||
|
|
8d116ba0c9 | ||
|
|
6a3c3d9b89 | ||
|
|
a6dca4c13f | ||
|
|
cc0800a72e | ||
|
|
1be1fc073e | ||
|
|
70c6b01f54 | ||
|
|
7b2b396d37 | ||
|
|
af2596f98b | ||
|
|
61fb326a80 | ||
|
|
de14378734 | ||
|
|
eea1b6de32 | ||
|
|
6bae3595a8 | ||
|
|
dde4dd0198 | ||
|
|
2d0e9885bd | ||
|
|
9ed81ac451 | ||
|
|
3245c0ae0d | ||
|
|
6ff7b2eaab | ||
|
|
38ebdf54be | ||
|
|
6cd7c3b774 | ||
|
|
07e2c3a50f | ||
|
|
cd762f04b8 | ||
|
|
6907242cae | ||
|
|
d61ba7ef78 | ||
|
|
b221d79273 | ||
|
|
940d88b695 | ||
|
|
ca324b5084 | ||
|
|
9f4589a997 | ||
|
|
fc44eb4093 | ||
|
|
a1840f6fc7 | ||
|
|
0cb7130dd2 | ||
|
|
2655bea86f | ||
|
|
08bf8faa2f | ||
|
|
4e64ee38e2 | ||
|
|
276f8cccf6 | ||
|
|
0ae844d1f8 | ||
|
|
4ee6de5c3e | ||
|
|
71a19a1972 | ||
|
|
ba72e62b41 | ||
|
|
5935cb0a29 | ||
|
|
f78cd1e043 | ||
|
|
a2c317b46e | ||
|
|
6a2a075c14 | ||
|
|
628530362a | ||
|
|
4549305fec | ||
|
|
245fed513a | ||
|
|
52332a4b24 | ||
|
|
3087c5d559 | ||
|
|
75606dcc27 | ||
|
|
f3719fe269 | ||
|
|
d2be792d5e | ||
|
|
2793d4b4cc | ||
|
|
30ac9d920a | ||
|
|
6e8e620e71 | ||
|
|
5597d6d871 | ||
|
|
622e0d19ce | ||
|
|
ce400a8fdc | ||
|
|
49c05cb89b | ||
|
|
d533de0f5c | ||
|
|
1a4fe4bc6c | ||
|
|
93207ead9c | ||
|
|
22368b997c | ||
|
|
a5bed67016 | ||
|
|
44f6491731 | ||
|
|
12c2a750f5 | ||
|
|
92bbae5cca | ||
|
|
939b19c3b7 | ||
|
|
64fb4effa7 | ||
|
|
4d195d5a52 | ||
|
|
976a020a2f | ||
|
|
550ab441c5 | ||
|
|
e24cadc7a1 | ||
|
|
903ede52cd | ||
|
|
f681d32996 | ||
|
|
2c72e7f0a2 | ||
|
|
db8cd1a993 | ||
|
|
2890b69c48 | ||
|
|
66b3795eb8 | ||
|
|
45f41c2c4a | ||
|
|
34f03ce590 | ||
|
|
e2fde62cd9 | ||
|
|
4b27c6719b | ||
|
|
fb6966b5fe | ||
|
|
454dfd3c9e | ||
|
|
e1cf551ded | ||
|
|
bd10344d65 | ||
|
|
1aa65d60e1 | ||
|
|
aa81957586 | ||
|
|
b7800e96d7 | ||
|
|
fb1bbecb41 | ||
|
|
e4c2468244 | ||
|
|
ac4c8d8dfc | ||
|
|
e2b6172f7d | ||
|
|
32f2895472 | ||
|
|
1124c423ee | ||
|
|
cd5a2d80ca | ||
|
|
1fe0773da6 | ||
|
|
5a941cdcdc | ||
|
|
62681e45fb | ||
|
|
1a2fb52266 | ||
|
|
ec4e7316f2 | ||
|
|
11264c4fb8 | ||
|
|
25f7f2b60a | ||
|
|
e7c20e0bce | ||
|
|
8ee6034b23 | ||
|
|
206e1caa99 | ||
|
|
f0e439de0d | ||
|
|
e5464a2a35 | ||
|
|
78d38dda56 | ||
|
|
60bb01b22c | ||
|
|
95a74e02c7 | ||
|
|
d014aef011 | ||
|
|
be8c23f0b4 | ||
|
|
da3b685cd8 | ||
|
|
9aac2d6965 | ||
|
|
81fad0f0e3 | ||
|
|
cff85f0b95 | ||
|
|
9c0dac4ccd | ||
|
|
5ccc2dcb8f | ||
|
|
8c5503631a | ||
|
|
2f3d794ec6 | ||
|
|
abeb12c6df | ||
|
|
9c6f3ae82c | ||
|
|
870b15313e | ||
|
|
62a7e44e86 | ||
|
|
296e4936a0 | ||
|
|
a0b9d4a239 | ||
|
|
99bc013c0a | ||
|
|
d9cad9d10b | ||
|
|
0e23c4542f | ||
|
|
f544234e26 | ||
|
|
dbf9800cbc | ||
|
|
1f19b63264 | ||
|
|
5c0e5b85f7 | ||
|
|
edda6d91cd | ||
|
|
1fefa6adfd | ||
|
|
af030f74f5 | ||
|
|
ada8c22a97 | ||
|
|
610466c18c | ||
|
|
9950bb9b7c | ||
|
|
7d70e92664 | ||
|
|
687cbf3ded | ||
|
|
c3af0a1eca | ||
|
|
822483aac5 | ||
|
|
2eb31a4f1d | ||
|
|
0655738da6 | ||
|
|
7c4fe3eb75 | ||
|
|
72721f4c8d | ||
|
|
0c60c00187 | ||
|
|
0d511b7878 | ||
|
|
bd2a7ffcf4 | ||
|
|
7a5ee968e7 | ||
|
|
c809334b3d | ||
|
|
b88e50cc36 | ||
|
|
bbe28df800 | ||
|
|
f865280afa | ||
|
|
8beab1aaf2 | ||
|
|
b9e16b36e5 | ||
|
|
b68c3ce74d | ||
|
|
d04b0b856a | ||
|
|
d0ff07bdb0 | ||
|
|
577fda059d | ||
|
|
49d2ab512d | ||
|
|
9df322e889 | ||
|
|
8f89b03d7b | ||
|
|
48c09608ea | ||
|
|
7963320a29 | ||
|
|
81f8a5e0d9 | ||
|
|
2763598bd1 | ||
|
|
49d7b0d278 | ||
|
|
3d475dc0ee | ||
|
|
2657d70567 | ||
|
|
45df37f55f | ||
|
|
81007c10cb | ||
|
|
aba15f11d8 | ||
|
|
a57756a05c | ||
|
|
eeab7a0a43 | ||
|
|
ac8d1db8d3 | ||
|
|
cd0d43fffb | ||
|
|
cdf12b1fc8 | ||
|
|
7981e450a4 | ||
|
|
e7fc3dcd31 | ||
|
|
2386c5adc1 | ||
|
|
2f21aa86b4 | ||
|
|
16d8014cbb | ||
|
|
613a9bb86b | ||
|
|
8190a81201 | ||
|
|
f5795db6d2 | ||
|
|
e2a2eb349f |
@@ -1,49 +0,0 @@
|
||||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
clone_folder: c:\gopath\src\github.com\ncw\rclone
|
||||
|
||||
cache:
|
||||
- '%LocalAppData%\go-build'
|
||||
|
||||
environment:
|
||||
GOPATH: C:\gopath
|
||||
CPATH: C:\Program Files (x86)\WinFsp\inc\fuse
|
||||
ORIGPATH: '%PATH%'
|
||||
NOCCPATH: C:\MinGW\bin;%GOPATH%\bin;%PATH%
|
||||
PATHCC64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin;%NOCCPATH%
|
||||
PATHCC32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin;%NOCCPATH%
|
||||
PATH: '%PATHCC64%'
|
||||
RCLONE_CONFIG_PASS:
|
||||
secure: HbzxSy9zQ8NYWN9NNPf6ALQO9Q0mwRNqwehsLcOEHy0=
|
||||
|
||||
install:
|
||||
- choco install winfsp -y
|
||||
- choco install zip -y
|
||||
- copy c:\MinGW\bin\mingw32-make.exe c:\MinGW\bin\make.exe
|
||||
|
||||
build_script:
|
||||
- echo %PATH%
|
||||
- echo %GOPATH%
|
||||
- go version
|
||||
- go env
|
||||
- go install
|
||||
- go build
|
||||
- make log_since_last_release > %TEMP%\git-log.txt
|
||||
- make version > %TEMP%\version
|
||||
- set /p RCLONE_VERSION=<%TEMP%\version
|
||||
- set PATH=%PATHCC32%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/386" -cgo -tags cmount %RCLONE_VERSION%
|
||||
- set PATH=%PATHCC64%
|
||||
- go run bin/cross-compile.go -release beta-latest -git-log %TEMP%\git-log.txt -include "^windows/amd64" -cgo -no-clean -tags cmount %RCLONE_VERSION%
|
||||
|
||||
test_script:
|
||||
- make GOTAGS=cmount quicktest
|
||||
|
||||
artifacts:
|
||||
- path: rclone.exe
|
||||
- path: build/*-v*.zip
|
||||
|
||||
deploy_script:
|
||||
- IF "%APPVEYOR_REPO_NAME%" == "ncw/rclone" IF "%APPVEYOR_PULL_REQUEST_NUMBER%" == "" make appveyor_upload
|
||||
@@ -1,50 +0,0 @@
|
||||
---
|
||||
version: 2
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
machine: true
|
||||
|
||||
working_directory: ~/.go_workspace/src/github.com/ncw/rclone
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
|
||||
- run:
|
||||
name: Cross-compile rclone
|
||||
command: |
|
||||
docker pull rclone/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
--image=rclone/xgo-cgofuse \
|
||||
--targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
.
|
||||
xgo \
|
||||
--targets=android/*,ios/* \
|
||||
.
|
||||
|
||||
- run:
|
||||
name: Prepare artifacts
|
||||
command: |
|
||||
mkdir -p /tmp/rclone.dist
|
||||
cp -R rclone-* /tmp/rclone.dist
|
||||
mkdir build
|
||||
cp -R rclone-* build/
|
||||
|
||||
- run:
|
||||
name: Build rclone
|
||||
command: |
|
||||
go version
|
||||
go build
|
||||
|
||||
- run:
|
||||
name: Upload artifacts
|
||||
command: |
|
||||
if [[ $CIRCLE_PULL_REQUEST != "" ]]; then
|
||||
make circleci_upload
|
||||
fi
|
||||
|
||||
- store_artifacts:
|
||||
path: /tmp/rclone.dist
|
||||
7
.gitattributes
vendored
Normal file
7
.gitattributes
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# Ignore generated files in GitHub language statistics and diffs
|
||||
/MANUAL.* linguist-generated=true
|
||||
/rclone.1 linguist-generated=true
|
||||
|
||||
# Don't fiddle with the line endings of test data
|
||||
**/testdata/** -text
|
||||
**/test/** -text
|
||||
2
.github/ISSUE_TEMPLATE.md
vendored
2
.github/ISSUE_TEMPLATE.md
vendored
@@ -10,7 +10,7 @@ instead of filing an issue for a quick response.
|
||||
|
||||
If you are reporting a bug or asking for a new feature then please use one of the templates here:
|
||||
|
||||
https://github.com/ncw/rclone/issues/new
|
||||
https://github.com/rclone/rclone/issues/new
|
||||
|
||||
otherwise fill in the form below.
|
||||
|
||||
|
||||
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -22,8 +22,8 @@ Link issues and relevant forum posts here.
|
||||
|
||||
#### Checklist
|
||||
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||
- [ ] I have read the [contribution guidelines](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#submitting-a-pull-request).
|
||||
- [ ] I have added tests for all changes in this PR if appropriate.
|
||||
- [ ] I have added documentation for the changes if appropriate.
|
||||
- [ ] All commit messages are in [house style](https://github.com/ncw/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||
- [ ] All commit messages are in [house style](https://github.com/rclone/rclone/blob/master/CONTRIBUTING.md#commit-messages).
|
||||
- [ ] I'm done, this Pull Request is ready for review :-)
|
||||
|
||||
254
.github/workflows/build.yml
vendored
Normal file
254
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,254 @@
|
||||
---
|
||||
# Github Actions build for rclone
|
||||
# -*- compile-command: "yamllint -f parsable build.yml" -*-
|
||||
|
||||
name: build
|
||||
|
||||
# Trigger the workflow on push or pull request
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- '*'
|
||||
tags:
|
||||
- '*'
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
job_name: ['linux', 'mac', 'windows_amd64', 'windows_386', 'other_os', 'modules_race', 'go1.11', 'go1.12', 'go1.13']
|
||||
|
||||
include:
|
||||
- job_name: linux
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^linux/"'
|
||||
check: true
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: mac
|
||||
os: macOS-latest
|
||||
go: '1.14.x'
|
||||
modules: 'off'
|
||||
gotags: '' # cmount doesn't work on osx travis for some reason
|
||||
build_flags: '-include "^darwin/amd64" -cgo'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_amd64
|
||||
os: windows-latest
|
||||
go: '1.14.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
build_flags: '-include "^windows/amd64" -cgo'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: windows_386
|
||||
os: windows-latest
|
||||
go: '1.14.x'
|
||||
modules: 'off'
|
||||
gotags: cmount
|
||||
goarch: '386'
|
||||
cgo: '1'
|
||||
build_flags: '-include "^windows/386" -cgo'
|
||||
quicktest: true
|
||||
deploy: true
|
||||
|
||||
- job_name: other_os
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
modules: 'off'
|
||||
build_flags: '-exclude "^(windows/|darwin/amd64|linux/)"'
|
||||
compile_all: true
|
||||
deploy: true
|
||||
|
||||
- job_name: modules_race
|
||||
os: ubuntu-latest
|
||||
go: '1.14.x'
|
||||
modules: 'on'
|
||||
quicktest: true
|
||||
racequicktest: true
|
||||
|
||||
- job_name: go1.11
|
||||
os: ubuntu-latest
|
||||
go: '1.11.x'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.12
|
||||
os: ubuntu-latest
|
||||
go: '1.12.x'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
- job_name: go1.13
|
||||
os: ubuntu-latest
|
||||
go: '1.13.x'
|
||||
modules: 'off'
|
||||
quicktest: true
|
||||
|
||||
name: ${{ matrix.job_name }}
|
||||
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
with:
|
||||
# Checkout into a fixed path to avoid import path problems on go < 1.11
|
||||
path: ./src/github.com/rclone/rclone
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v1
|
||||
with:
|
||||
go-version: ${{ matrix.go }}
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
echo '::set-env name=GO111MODULE::${{ matrix.modules }}'
|
||||
echo '::set-env name=GOTAGS::${{ matrix.gotags }}'
|
||||
echo '::set-env name=BUILD_FLAGS::${{ matrix.build_flags }}'
|
||||
if [[ "${{ matrix.goarch }}" != "" ]]; then echo '::set-env name=GOARCH::${{ matrix.goarch }}' ; fi
|
||||
if [[ "${{ matrix.cgo }}" != "" ]]; then echo '::set-env name=CGO_ENABLED::${{ matrix.cgo }}' ; fi
|
||||
|
||||
- name: Install Libraries on Linux
|
||||
shell: bash
|
||||
run: |
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
sudo apt-get install fuse libfuse-dev rpm pkg-config
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
|
||||
- name: Install Libraries on macOS
|
||||
shell: bash
|
||||
run: |
|
||||
brew update
|
||||
brew cask install osxfuse
|
||||
if: matrix.os == 'macOS-latest'
|
||||
|
||||
- name: Install Libraries on Windows
|
||||
shell: powershell
|
||||
run: |
|
||||
$ProgressPreference = 'SilentlyContinue'
|
||||
choco install -y winfsp zip
|
||||
Write-Host "::set-env name=CPATH::C:\Program Files\WinFsp\inc\fuse;C:\Program Files (x86)\WinFsp\inc\fuse"
|
||||
if ($env:GOARCH -eq "386") {
|
||||
choco install -y mingw --forcex86 --force
|
||||
Write-Host "::add-path::C:\\ProgramData\\chocolatey\\lib\\mingw\\tools\\install\\mingw32\\bin"
|
||||
}
|
||||
# Copy mingw32-make.exe to make.exe so the same command line
|
||||
# can be used on Windows as on macOS and Linux
|
||||
$path = (get-command mingw32-make.exe).Path
|
||||
Copy-Item -Path $path -Destination (Join-Path (Split-Path -Path $path) 'make.exe')
|
||||
if: matrix.os == 'windows-latest'
|
||||
|
||||
- name: Print Go version and environment
|
||||
shell: bash
|
||||
run: |
|
||||
printf "Using go at: $(which go)\n"
|
||||
printf "Go version: $(go version)\n"
|
||||
printf "\n\nGo environment:\n\n"
|
||||
go env
|
||||
printf "\n\nRclone environment:\n\n"
|
||||
make vars
|
||||
printf "\n\nSystem environment:\n\n"
|
||||
env
|
||||
|
||||
- name: Run tests
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
make quicktest
|
||||
if: matrix.quicktest
|
||||
|
||||
- name: Race test
|
||||
shell: bash
|
||||
run: |
|
||||
make racequicktest
|
||||
if: matrix.racequicktest
|
||||
|
||||
- name: Code quality test
|
||||
shell: bash
|
||||
run: |
|
||||
make build_dep
|
||||
make check
|
||||
if: matrix.check
|
||||
|
||||
- name: Compile all architectures test
|
||||
shell: bash
|
||||
run: |
|
||||
make
|
||||
make compile_all
|
||||
if: matrix.compile_all
|
||||
|
||||
- name: Deploy built binaries
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then make release_dep ; fi
|
||||
make travis_beta
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# working-directory: '$(modulePath)'
|
||||
# Deploy binaries if enabled in config && not a PR && not a fork
|
||||
if: matrix.deploy && github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
|
||||
xgo:
|
||||
timeout-minutes: 60
|
||||
name: "xgo cross compile"
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
with:
|
||||
# Checkout into a fixed path to avoid import path problems on go < 1.11
|
||||
path: ./src/github.com/rclone/rclone
|
||||
|
||||
- name: Set environment variables
|
||||
shell: bash
|
||||
run: |
|
||||
echo '::set-env name=GOPATH::${{ runner.workspace }}'
|
||||
echo '::add-path::${{ runner.workspace }}/bin'
|
||||
|
||||
- name: Cross-compile rclone
|
||||
run: |
|
||||
docker pull billziss/xgo-cgofuse
|
||||
go get -v github.com/karalabe/xgo
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=darwin/386,darwin/amd64,linux/386,linux/amd64,windows/386,windows/amd64 \
|
||||
-tags cmount \
|
||||
-dest build \
|
||||
.
|
||||
xgo \
|
||||
-image=billziss/xgo-cgofuse \
|
||||
-targets=android/*,ios/* \
|
||||
-dest build \
|
||||
.
|
||||
|
||||
- name: Build rclone
|
||||
run: |
|
||||
docker pull golang
|
||||
docker run --rm -v "$PWD":/usr/src/rclone -w /usr/src/rclone golang go build -mod=vendor -v
|
||||
|
||||
- name: Upload artifacts
|
||||
run: |
|
||||
make circleci_upload
|
||||
env:
|
||||
RCLONE_CONFIG_PASS: ${{ secrets.RCLONE_CONFIG_PASS }}
|
||||
# Upload artifacts if not a PR && not a fork
|
||||
if: github.head_ref == '' && github.repository == 'rclone/rclone'
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -5,3 +5,7 @@ build
|
||||
docs/public
|
||||
rclone.iml
|
||||
.idea
|
||||
.history
|
||||
*.test
|
||||
*.log
|
||||
*.iml
|
||||
|
||||
109
.travis.yml
109
.travis.yml
@@ -1,109 +0,0 @@
|
||||
---
|
||||
language: go
|
||||
sudo: required
|
||||
dist: xenial
|
||||
os:
|
||||
- linux
|
||||
go_import_path: github.com/ncw/rclone
|
||||
before_install:
|
||||
- git fetch --unshallow --tags
|
||||
- |
|
||||
if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then
|
||||
sudo modprobe fuse
|
||||
sudo chmod 666 /dev/fuse
|
||||
sudo chown root:$USER /etc/fuse.conf
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
|
||||
brew update
|
||||
brew tap caskroom/cask
|
||||
brew cask install osxfuse
|
||||
fi
|
||||
if [[ "$TRAVIS_OS_NAME" == "windows" ]]; then
|
||||
choco install -y winfsp zip make
|
||||
cd ../.. # fix crlf in git checkout
|
||||
mv $TRAVIS_REPO_SLUG _old
|
||||
git config --global core.autocrlf false
|
||||
git clone _old $TRAVIS_REPO_SLUG
|
||||
cd $TRAVIS_REPO_SLUG
|
||||
fi
|
||||
install:
|
||||
- make vars
|
||||
env:
|
||||
global:
|
||||
- GOTAGS=cmount
|
||||
- GO111MODULE=off
|
||||
- secure: gU8gCV9R8Kv/Gn0SmCP37edpfIbPoSvsub48GK7qxJdTU628H0KOMiZW/T0gtV5d67XJZ4eKnhJYlxwwxgSgfejO32Rh5GlYEKT/FuVoH0BD72dM1GDFLSrUiUYOdoHvf/BKIFA3dJFT4lk2ASy4Zh7SEoXHG6goBlqUpYx8hVA=
|
||||
- secure: AMjrMAksDy3QwqGqnvtUg8FL/GNVgNqTqhntLF9HSU0njHhX6YurGGnfKdD9vNHlajPQOewvmBjwNLcDWGn2WObdvmh9Ohep0EmOjZ63kliaRaSSQueSd8y0idfqMQAxep0SObOYbEDVmQh0RCAE9wOVKRaPgw98XvgqWGDq5Tw=
|
||||
- secure: Uaiveq+/rvQjO03GzvQZV2J6pZfedoFuhdXrLVhhHSeP4ZBca0olw7xaqkabUyP3LkVYXMDSX8EbyeuQT1jfEe5wp5sBdfaDtuYW6heFyjiHIIIbVyBfGXon6db4ETBjOaX/Xt8uktrgNge6qFlj+kpnmpFGxf0jmDLw1zgg7tk=
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- fuse
|
||||
- libfuse-dev
|
||||
- rpm
|
||||
- pkg-config
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.cache/go-build
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
include:
|
||||
- go: 1.8.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.9.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.10.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.11.x
|
||||
script:
|
||||
- make quicktest
|
||||
- go: 1.12.x
|
||||
env:
|
||||
- GOTAGS=cmount
|
||||
script:
|
||||
- make build_dep
|
||||
- make check
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
- make compile_all
|
||||
- os: osx
|
||||
go: 1.12.x
|
||||
env:
|
||||
- GOTAGS= # cmount doesn't work on osx travis for some reason
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/Library/Caches/go-build
|
||||
script:
|
||||
- make
|
||||
- make quicktest
|
||||
- make racequicktest
|
||||
# - os: windows
|
||||
# go: 1.12.x
|
||||
# env:
|
||||
# - GOTAGS=cmount
|
||||
# - CPATH='C:\Program Files (x86)\WinFsp\inc\fuse'
|
||||
# #filter_secrets: false # works around a problem with secrets under windows
|
||||
# cache:
|
||||
# directories:
|
||||
# - ${LocalAppData}/go-build
|
||||
# script:
|
||||
# - make
|
||||
# - make quicktest
|
||||
# - make racequicktest
|
||||
- go: tip
|
||||
script:
|
||||
- make quicktest
|
||||
|
||||
deploy:
|
||||
provider: script
|
||||
script: make travis_beta
|
||||
skip_cleanup: true
|
||||
on:
|
||||
repo: ncw/rclone
|
||||
all_branches: true
|
||||
go: 1.12.x
|
||||
condition: $TRAVIS_PULL_REQUEST == false && $TRAVIS_OS_NAME != "windows"
|
||||
@@ -29,12 +29,12 @@ You'll need a Go environment set up with GOPATH set. See [the Go
|
||||
getting started docs](https://golang.org/doc/install) for more info.
|
||||
|
||||
First in your web browser press the fork button on [rclone's GitHub
|
||||
page](https://github.com/ncw/rclone).
|
||||
page](https://github.com/rclone/rclone).
|
||||
|
||||
Now in your terminal
|
||||
|
||||
go get -u github.com/ncw/rclone
|
||||
cd $GOPATH/src/github.com/ncw/rclone
|
||||
go get -u github.com/rclone/rclone
|
||||
cd $GOPATH/src/github.com/rclone/rclone
|
||||
git remote rename origin upstream
|
||||
git remote add origin git@github.com:YOURUSER/rclone.git
|
||||
|
||||
@@ -82,13 +82,9 @@ You patch will get reviewed and you might get asked to fix some stuff.
|
||||
If so, then make the changes in the same branch, squash the commits,
|
||||
rebase it to master then push it to GitHub with `--force`.
|
||||
|
||||
## Enabling CI for your fork ##
|
||||
## CI for your fork ##
|
||||
|
||||
The CI config files for rclone have taken care of forks of the project, so you can enable CI for your fork repo easily.
|
||||
|
||||
rclone currently uses [Travis CI](https://travis-ci.org/), [AppVeyor](https://ci.appveyor.com/), and
|
||||
[Circle CI](https://circleci.com/) to build the project. To enable them for your fork, simply go into their
|
||||
websites, find your fork of rclone, and enable building there.
|
||||
rclone currently uses [GitHub Actions](https://github.com/rclone/rclone/actions) to build and test the project, which should be automatically available for your fork too from the `Actions` tab in your repository.
|
||||
|
||||
## Testing ##
|
||||
|
||||
@@ -118,7 +114,7 @@ but they can be run against any of the remotes.
|
||||
|
||||
cd fs/sync
|
||||
go test -v -remote TestDrive:
|
||||
go test -v -remote TestDrive: -subdir
|
||||
go test -v -remote TestDrive: -fast-list
|
||||
|
||||
cd fs/operations
|
||||
go test -v -remote TestDrive:
|
||||
@@ -127,7 +123,7 @@ If you want to use the integration test framework to run these tests
|
||||
all together with an HTML report and test retries then from the
|
||||
project root:
|
||||
|
||||
go install github.com/ncw/rclone/fstest/test_all
|
||||
go install github.com/rclone/rclone/fstest/test_all
|
||||
test_all -backend drive
|
||||
|
||||
If you want to run all the integration tests against all the remotes,
|
||||
@@ -152,6 +148,7 @@ with modules beneath.
|
||||
* ...commands
|
||||
* docs - the documentation and website
|
||||
* content - adjust these docs only - everything else is autogenerated
|
||||
* command - these are auto generated - edit the corresponding .go file
|
||||
* fs - main rclone definitions - minimal amount of code
|
||||
* accounting - bandwidth limiting and statistics
|
||||
* asyncreader - an io.Reader which reads ahead
|
||||
@@ -207,6 +204,9 @@ don't need to run these when adding a feature.
|
||||
Documentation for rclone sub commands is with their code, eg
|
||||
`cmd/ls/ls.go`.
|
||||
|
||||
Note that you can use [GitHub's online editor](https://help.github.com/en/github/managing-files-in-a-repository/editing-files-in-another-users-repository)
|
||||
for small changes in the docs which makes it very easy.
|
||||
|
||||
## Making a release ##
|
||||
|
||||
There are separate instructions for making a release in the RELEASE.md
|
||||
@@ -341,6 +341,11 @@ Getting going
|
||||
* Add your remote to the imports in `backend/all/all.go`
|
||||
* HTTP based remotes are easiest to maintain if they use rclone's rest module, but if there is a really good go SDK then use that instead.
|
||||
* Try to implement as many optional methods as possible as it makes the remote more usable.
|
||||
* Use lib/encoder to make sure we can encode any path name and `rclone info` to help determine the encodings needed
|
||||
* `rclone purge -v TestRemote:rclone-info`
|
||||
* `rclone info --remote-encoding None -vv --write-json remote.json TestRemote:rclone-info`
|
||||
* `go run cmd/info/internal/build_csv/main.go -o remote.csv remote.json`
|
||||
* open `remote.csv` in a spreadsheet and examine
|
||||
|
||||
Unit tests
|
||||
|
||||
@@ -353,7 +358,7 @@ Integration tests
|
||||
* Add your backend to `fstest/test_all/config.yaml`
|
||||
* Once you've done that then you can use the integration test framework from the project root:
|
||||
* go install ./...
|
||||
* test_all -backend remote
|
||||
* test_all -backends remote
|
||||
|
||||
Or if you want to run the integration tests manually:
|
||||
|
||||
@@ -362,19 +367,59 @@ Or if you want to run the integration tests manually:
|
||||
* `go test -v -remote TestRemote:`
|
||||
* `cd fs/sync`
|
||||
* `go test -v -remote TestRemote:`
|
||||
* If you are making a bucket based remote, then check with this also
|
||||
* `go test -v -remote TestRemote: -subdir`
|
||||
* And if your remote defines `ListR` this also
|
||||
* If your remote defines `ListR` check with this also
|
||||
* `go test -v -remote TestRemote: -fast-list`
|
||||
|
||||
See the [testing](#testing) section for more information on integration tests.
|
||||
|
||||
Add your fs to the docs - you'll need to pick an icon for it from [fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in alphabetical order but with the local file system last.
|
||||
Add your fs to the docs - you'll need to pick an icon for it from
|
||||
[fontawesome](http://fontawesome.io/icons/). Keep lists of remotes in
|
||||
alphabetical order of full name of remote (eg `drive` is ordered as
|
||||
`Google Drive`) but with the local file system last.
|
||||
|
||||
* `README.md` - main GitHub page
|
||||
* `docs/content/remote.md` - main docs page (note the backend options are automatically added to this file with `make backenddocs`)
|
||||
* make sure this has the `autogenerated options` comments in (see your reference backend docs)
|
||||
* update them with `make backenddocs` - revert any changes in other backends
|
||||
* `docs/content/overview.md` - overview docs
|
||||
* `docs/content/docs.md` - list of remotes in config section
|
||||
* `docs/content/about.md` - front page of rclone.org
|
||||
* `docs/layouts/chrome/navbar.html` - add it to the website navigation
|
||||
* `bin/make_manual.py` - add the page to the `docs` constant
|
||||
|
||||
Once you've written the docs, run `make serve` and check they look OK
|
||||
in the web browser and the links (internal and external) all work.
|
||||
|
||||
## Writing a plugin ##
|
||||
|
||||
New features (backends, commands) can also be added "out-of-tree", through Go plugins.
|
||||
Changes will be kept in a dynamically loaded file instead of being compiled into the main binary.
|
||||
This is useful if you can't merge your changes upstream or don't want to maintain a fork of rclone.
|
||||
|
||||
Usage
|
||||
|
||||
- Naming
|
||||
- Plugins names must have the pattern `librcloneplugin_KIND_NAME.so`.
|
||||
- `KIND` should be one of `backend`, `command` or `bundle`.
|
||||
- Example: A plugin with backend support for PiFS would be called
|
||||
`librcloneplugin_backend_pifs.so`.
|
||||
- Loading
|
||||
- Supported on macOS & Linux as of now. ([Go issue for Windows support](https://github.com/golang/go/issues/19282))
|
||||
- Supported on rclone v1.50 or greater.
|
||||
- All plugins in the folder specified by variable `$RCLONE_PLUGIN_PATH` are loaded.
|
||||
- If this variable doesn't exist, plugin support is disabled.
|
||||
- Plugins must be compiled against the exact version of rclone to work.
|
||||
(The rclone used during building the plugin must be the same as the source of rclone)
|
||||
|
||||
Building
|
||||
|
||||
To turn your existing additions into a Go plugin, move them to an external repository
|
||||
and change the top-level package name to `main`.
|
||||
|
||||
Check `rclone --version` and make sure that the plugin's rclone dependency and host Go version match.
|
||||
|
||||
Then, run `go build -buildmode=plugin -o PLUGIN_NAME.so .` to build the plugin.
|
||||
|
||||
[Go reference](https://godoc.org/github.com/rclone/rclone/lib/plugin)
|
||||
|
||||
[Minimal example](https://gist.github.com/terorie/21b517ee347828e899e1913efc1d684f)
|
||||
|
||||
22
Dockerfile
Normal file
22
Dockerfile
Normal file
@@ -0,0 +1,22 @@
|
||||
FROM golang AS builder
|
||||
|
||||
COPY . /go/src/github.com/rclone/rclone/
|
||||
WORKDIR /go/src/github.com/rclone/rclone/
|
||||
|
||||
RUN make quicktest
|
||||
RUN \
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
|
||||
make
|
||||
RUN ./rclone version
|
||||
|
||||
# Begin final image
|
||||
FROM alpine:latest
|
||||
|
||||
RUN apk --no-cache add ca-certificates fuse
|
||||
|
||||
COPY --from=builder /go/src/github.com/rclone/rclone/rclone /usr/local/bin/
|
||||
|
||||
ENTRYPOINT [ "rclone" ]
|
||||
|
||||
WORKDIR /data
|
||||
ENV XDG_CONFIG_HOME=/config
|
||||
@@ -12,6 +12,7 @@ Current active maintainers of rclone are:
|
||||
| Alex Chen | @Cnly | onedrive backend |
|
||||
| Sandeep Ummadi | @sandeepkru | azureblob backend |
|
||||
| Sebastian Bünger | @buengese | jottacloud & yandex backends |
|
||||
| Ivan Andreev | @ivandeex | chunker & mailru backends |
|
||||
|
||||
**This is a work in progress Draft**
|
||||
|
||||
@@ -51,7 +52,7 @@ The milestones have these meanings:
|
||||
* Help wanted - blue sky stuff that might get moved up, or someone could help with
|
||||
* Known bugs - bugs waiting on external factors or we aren't going to fix for the moment
|
||||
|
||||
Tickets [with no milestone](https://github.com/ncw/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||
Tickets [with no milestone](https://github.com/rclone/rclone/issues?utf8=✓&q=is%3Aissue%20is%3Aopen%20no%3Amile) are good candidates for ones that have slipped between the gaps and need following up.
|
||||
|
||||
## Closing Tickets ##
|
||||
|
||||
|
||||
8466
MANUAL.html
generated
8466
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
7952
MANUAL.txt
generated
7952
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
105
Makefile
105
Makefile
@@ -1,23 +1,37 @@
|
||||
SHELL = bash
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
# Branch we are working on
|
||||
BRANCH := $(or $(APPVEYOR_REPO_BRANCH),$(TRAVIS_BRANCH),$(BUILD_SOURCEBRANCHNAME),$(lastword $(subst /, ,$(GITHUB_REF))),$(shell git rev-parse --abbrev-ref HEAD))
|
||||
# Tag of the current commit, if any. If this is not "" then we are building a release
|
||||
RELEASE_TAG := $(shell git tag -l --points-at HEAD)
|
||||
# Version of last release (may not be on this branch)
|
||||
VERSION := $(shell cat VERSION)
|
||||
# Last tag on this branch
|
||||
LAST_TAG := $(shell git describe --tags --abbrev=0)
|
||||
ifeq ($(BRANCH),$(LAST_TAG))
|
||||
# If we are working on a release, override branch to master
|
||||
ifdef RELEASE_TAG
|
||||
BRANCH := master
|
||||
endif
|
||||
TAG_BRANCH := -$(BRANCH)
|
||||
BRANCH_PATH := branch/
|
||||
# If building HEAD or master then unset TAG_BRANCH and BRANCH_PATH
|
||||
ifeq ($(subst HEAD,,$(subst master,,$(BRANCH))),)
|
||||
TAG_BRANCH :=
|
||||
BRANCH_PATH :=
|
||||
endif
|
||||
TAG := $(shell echo $$(git describe --abbrev=8 --tags | sed 's/-\([0-9]\)-/-00\1-/; s/-\([0-9][0-9]\)-/-0\1-/'))$(TAG_BRANCH)
|
||||
NEW_TAG := $(shell echo $(LAST_TAG) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifneq ($(TAG),$(LAST_TAG))
|
||||
# Make version suffix -DDD-gCCCCCCCC (D=commits since last relase, C=Commit) or blank
|
||||
VERSION_SUFFIX := $(shell git describe --abbrev=8 --tags | perl -lpe 's/^v\d+\.\d+\.\d+//; s/^-(\d+)/"-".sprintf("%03d",$$1)/e;')
|
||||
# TAG is current version + number of commits since last release + branch
|
||||
TAG := $(VERSION)$(VERSION_SUFFIX)$(TAG_BRANCH)
|
||||
NEXT_VERSION := $(shell echo $(VERSION) | perl -lpe 's/v//; $$_ += 0.01; $$_ = sprintf("v%.2f.0", $$_)')
|
||||
ifndef RELEASE_TAG
|
||||
TAG := $(TAG)-beta
|
||||
endif
|
||||
GO_VERSION := $(shell go version)
|
||||
GO_FILES := $(shell go list ./... | grep -v /vendor/ )
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)
|
||||
ifdef BETA_SUBDIR
|
||||
BETA_SUBDIR := /$(BETA_SUBDIR)
|
||||
endif
|
||||
BETA_PATH := $(BRANCH_PATH)$(TAG)$(BETA_SUBDIR)
|
||||
BETA_URL := https://beta.rclone.org/$(BETA_PATH)/
|
||||
BETA_UPLOAD_ROOT := memstore:beta-rclone-org
|
||||
BETA_UPLOAD := $(BETA_UPLOAD_ROOT)/$(BETA_PATH)
|
||||
@@ -27,19 +41,23 @@ BUILDTAGS=-tags "$(GOTAGS)"
|
||||
LINTTAGS=--build-tags "$(GOTAGS)"
|
||||
endif
|
||||
|
||||
.PHONY: rclone vars version
|
||||
.PHONY: rclone test_all vars version
|
||||
|
||||
rclone:
|
||||
touch fs/version.go
|
||||
go install -v --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
cp -av `go env GOPATH`/bin/rclone .
|
||||
go build -v --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS)
|
||||
mkdir -p `go env GOPATH`/bin/
|
||||
cp -av rclone`go env GOEXE` `go env GOPATH`/bin/rclone`go env GOEXE`.new
|
||||
mv -v `go env GOPATH`/bin/rclone`go env GOEXE`.new `go env GOPATH`/bin/rclone`go env GOEXE`
|
||||
|
||||
test_all:
|
||||
go install --ldflags "-s -X github.com/rclone/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/rclone/rclone/fstest/test_all
|
||||
|
||||
vars:
|
||||
@echo SHELL="'$(SHELL)'"
|
||||
@echo BRANCH="'$(BRANCH)'"
|
||||
@echo TAG="'$(TAG)'"
|
||||
@echo LAST_TAG="'$(LAST_TAG)'"
|
||||
@echo NEW_TAG="'$(NEW_TAG)'"
|
||||
@echo VERSION="'$(VERSION)'"
|
||||
@echo NEXT_VERSION="'$(NEXT_VERSION)'"
|
||||
@echo GO_VERSION="'$(GO_VERSION)'"
|
||||
@echo BETA_URL="'$(BETA_URL)'"
|
||||
|
||||
@@ -47,8 +65,7 @@ version:
|
||||
@echo '$(TAG)'
|
||||
|
||||
# Full suite of integration tests
|
||||
test: rclone
|
||||
go install --ldflags "-s -X github.com/ncw/rclone/fs.Version=$(TAG)" $(BUILDTAGS) github.com/ncw/rclone/fstest/test_all
|
||||
test: rclone test_all
|
||||
-test_all 2>&1 | tee test_all.log
|
||||
@echo "Written logs in test_all.log"
|
||||
|
||||
@@ -61,10 +78,7 @@ racequicktest:
|
||||
|
||||
# Do source code quality checks
|
||||
check: rclone
|
||||
@# we still run go vet for -printfuncs which golangci-lint doesn't do yet
|
||||
@# see: https://github.com/golangci/golangci-lint/issues/204
|
||||
@echo "-- START CODE QUALITY REPORT -------------------------------"
|
||||
@go vet $(BUILDTAGS) -printfuncs Debugf,Infof,Logf,Errorf ./...
|
||||
@golangci-lint run $(LINTTAGS) ./...
|
||||
@echo "-- END CODE QUALITY REPORT ---------------------------------"
|
||||
|
||||
@@ -74,8 +88,8 @@ build_dep:
|
||||
|
||||
# Get the release dependencies
|
||||
release_dep:
|
||||
go get -u github.com/goreleaser/nfpm/...
|
||||
go get -u github.com/aktau/github-release
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*_Linux_x86_64.tar.gz'
|
||||
go run bin/get-github-release.go -extract github-release aktau/github-release 'linux-amd64-github-release.tar.bz2'
|
||||
|
||||
# Update dependencies
|
||||
update:
|
||||
@@ -83,6 +97,11 @@ update:
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
# Tidy the module dependencies
|
||||
tidy:
|
||||
GO111MODULE=on go mod tidy
|
||||
GO111MODULE=on go mod vendor
|
||||
|
||||
doc: rclone.1 MANUAL.html MANUAL.txt rcdocs commanddocs
|
||||
|
||||
rclone.1: MANUAL.md
|
||||
@@ -98,10 +117,10 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/commands/
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs docs/content/
|
||||
|
||||
backenddocs: rclone bin/make_backend_docs.py
|
||||
./bin/make_backend_docs.py
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" ./bin/make_backend_docs.py
|
||||
|
||||
rcdocs: rclone
|
||||
bin/make_rc_docs.sh
|
||||
@@ -154,7 +173,7 @@ log_since_last_release:
|
||||
git log $(LAST_TAG)..
|
||||
|
||||
compile_all:
|
||||
go run bin/cross-compile.go -parallel 8 -compile-only $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -compile-only $(BUILDTAGS) $(TAG)
|
||||
|
||||
appveyor_upload:
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
@@ -164,60 +183,54 @@ endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
circleci_upload:
|
||||
sudo chown -R $$USER build
|
||||
find build -type l -delete
|
||||
gzip -r9v build
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD)/testbuilds
|
||||
ifndef BRANCH_PATH
|
||||
./rclone --config bin/travis.rclone.conf -v copy build/ $(BETA_UPLOAD_ROOT)/test/testbuilds-latest
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)/testbuilds
|
||||
|
||||
BUILD_FLAGS := -exclude "^(windows|darwin)/"
|
||||
ifeq ($(TRAVIS_OS_NAME),osx)
|
||||
BUILD_FLAGS := -include "^darwin/" -cgo
|
||||
endif
|
||||
ifeq ($(TRAVIS_OS_NAME),windows)
|
||||
# BUILD_FLAGS := -include "^windows/" -cgo
|
||||
# 386 doesn't build yet
|
||||
BUILD_FLAGS := -include "^windows/amd64" -cgo
|
||||
endif
|
||||
|
||||
travis_beta:
|
||||
ifeq ($(TRAVIS_OS_NAME),linux)
|
||||
ifeq (linux,$(filter linux,$(subst Linux,linux,$(TRAVIS_OS_NAME) $(AGENT_OS))))
|
||||
go run bin/get-github-release.go -extract nfpm goreleaser/nfpm 'nfpm_.*\.tar.gz'
|
||||
endif
|
||||
git log $(LAST_TAG).. > /tmp/git-log.txt
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) -parallel 8 $(BUILDTAGS) $(TAG)
|
||||
go run bin/cross-compile.go -release beta-latest -git-log /tmp/git-log.txt $(BUILD_FLAGS) $(BUILDTAGS) $(TAG)
|
||||
rclone --config bin/travis.rclone.conf -v copy --exclude '*beta-latest*' build/ $(BETA_UPLOAD)
|
||||
ifndef BRANCH_PATH
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)
|
||||
rclone --config bin/travis.rclone.conf -v copy --include '*beta-latest*' --include version.txt build/ $(BETA_UPLOAD_ROOT)$(BETA_SUBDIR)
|
||||
endif
|
||||
@echo Beta release ready at $(BETA_URL)
|
||||
|
||||
# Fetch the binary builds from travis and appveyor
|
||||
fetch_binaries:
|
||||
rclone -P sync $(BETA_UPLOAD) build/
|
||||
rclone -P sync --exclude "/testbuilds/**" --delete-excluded $(BETA_UPLOAD) build/
|
||||
|
||||
serve: website
|
||||
cd docs && hugo server -v -w
|
||||
|
||||
tag: doc
|
||||
@echo "Old tag is $(LAST_TAG)"
|
||||
@echo "New tag is $(NEW_TAG)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEW_TAG)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEW_TAG)" > docs/layouts/partials/version.html
|
||||
git tag -s -m "Version $(NEW_TAG)" $(NEW_TAG)
|
||||
bin/make_changelog.py $(LAST_TAG) $(NEW_TAG) > docs/content/changelog.md.new
|
||||
@echo "Old tag is $(VERSION)"
|
||||
@echo "New tag is $(NEXT_VERSION)"
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(NEXT_VERSION)\"\n" | gofmt > fs/version.go
|
||||
echo -n "$(NEXT_VERSION)" > docs/layouts/partials/version.html
|
||||
echo "$(NEXT_VERSION)" > VERSION
|
||||
git tag -s -m "Version $(NEXT_VERSION)" $(NEXT_VERSION)
|
||||
bin/make_changelog.py $(LAST_TAG) $(NEXT_VERSION) > docs/content/changelog.md.new
|
||||
mv docs/content/changelog.md.new docs/content/changelog.md
|
||||
@echo "Edit the new changelog in docs/content/changelog.md"
|
||||
@echo "Then commit all the changes"
|
||||
@echo git commit -m \"Version $(NEW_TAG)\" -a -v
|
||||
@echo git commit -m \"Version $(NEXT_VERSION)\" -a -v
|
||||
@echo "And finally run make retag before make cross etc"
|
||||
|
||||
retag:
|
||||
git tag -f -s -m "Version $(LAST_TAG)" $(LAST_TAG)
|
||||
git tag -f -s -m "Version $(VERSION)" $(VERSION)
|
||||
|
||||
startdev:
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(LAST_TAG)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(LAST_TAG)-DEV development" fs/version.go
|
||||
echo -e "package fs\n\n// Version of rclone\nvar Version = \"$(VERSION)-DEV\"\n" | gofmt > fs/version.go
|
||||
git commit -m "Start $(VERSION)-DEV development" fs/version.go
|
||||
|
||||
winzip:
|
||||
zip -9 rclone-$(TAG).zip rclone.exe
|
||||
|
||||
27
README.md
27
README.md
@@ -1,4 +1,4 @@
|
||||
[](https://rclone.org/)
|
||||
[<img src="https://rclone.org/img/logo_on_light__horizontal_color.svg" width="50%" alt="rclone logo">](https://rclone.org/)
|
||||
|
||||
[Website](https://rclone.org) |
|
||||
[Documentation](https://rclone.org/docs/) |
|
||||
@@ -6,13 +6,12 @@
|
||||
[Contributing](CONTRIBUTING.md) |
|
||||
[Changelog](https://rclone.org/changelog/) |
|
||||
[Installation](https://rclone.org/install/) |
|
||||
[Forum](https://forum.rclone.org/) |
|
||||
[Forum](https://forum.rclone.org/)
|
||||
|
||||
[](https://travis-ci.org/ncw/rclone)
|
||||
[](https://ci.appveyor.com/project/ncw/rclone)
|
||||
[](https://circleci.com/gh/ncw/rclone/tree/master)
|
||||
[](https://goreportcard.com/report/github.com/ncw/rclone)
|
||||
[](https://godoc.org/github.com/ncw/rclone)
|
||||
[](https://github.com/rclone/rclone/actions?query=workflow%3Abuild)
|
||||
[](https://goreportcard.com/report/github.com/rclone/rclone)
|
||||
[](https://godoc.org/github.com/rclone/rclone)
|
||||
[](https://hub.docker.com/r/rclone/rclone)
|
||||
|
||||
# Rclone
|
||||
|
||||
@@ -20,25 +19,31 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
|
||||
## Storage providers
|
||||
|
||||
* 1Fichier [:page_facing_up:](https://rclone.org/fichier/)
|
||||
* Alibaba Cloud (Aliyun) Object Storage System (OSS) [:page_facing_up:](https://rclone.org/s3/#alibaba-oss)
|
||||
* Amazon Drive [:page_facing_up:](https://rclone.org/amazonclouddrive/) ([See note](https://rclone.org/amazonclouddrive/#status))
|
||||
* Amazon S3 [:page_facing_up:](https://rclone.org/s3/)
|
||||
* Backblaze B2 [:page_facing_up:](https://rclone.org/b2/)
|
||||
* Box [:page_facing_up:](https://rclone.org/box/)
|
||||
* Ceph [:page_facing_up:](https://rclone.org/s3/#ceph)
|
||||
* Citrix ShareFile [:page_facing_up:](https://rclone.org/sharefile/)
|
||||
* DigitalOcean Spaces [:page_facing_up:](https://rclone.org/s3/#digitalocean-spaces)
|
||||
* Dreamhost [:page_facing_up:](https://rclone.org/s3/#dreamhost)
|
||||
* Dropbox [:page_facing_up:](https://rclone.org/dropbox/)
|
||||
* FTP [:page_facing_up:](https://rclone.org/ftp/)
|
||||
* GetSky [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* Google Cloud Storage [:page_facing_up:](https://rclone.org/googlecloudstorage/)
|
||||
* Google Drive [:page_facing_up:](https://rclone.org/drive/)
|
||||
* Google Photos [:page_facing_up:](https://rclone.org/googlephotos/)
|
||||
* HTTP [:page_facing_up:](https://rclone.org/http/)
|
||||
* Hubic [:page_facing_up:](https://rclone.org/hubic/)
|
||||
* Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
|
||||
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
|
||||
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
|
||||
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
|
||||
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Mega [:page_facing_up:](https://rclone.org/mega/)
|
||||
* Memory [:page_facing_up:](https://rclone.org/memory/)
|
||||
* Microsoft Azure Blob Storage [:page_facing_up:](https://rclone.org/azureblob/)
|
||||
* Microsoft OneDrive [:page_facing_up:](https://rclone.org/onedrive/)
|
||||
* Minio [:page_facing_up:](https://rclone.org/s3/#minio)
|
||||
@@ -49,11 +54,14 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
|
||||
* Oracle Cloud Storage [:page_facing_up:](https://rclone.org/swift/)
|
||||
* ownCloud [:page_facing_up:](https://rclone.org/webdav/#owncloud)
|
||||
* pCloud [:page_facing_up:](https://rclone.org/pcloud/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/webdav/#put-io)
|
||||
* premiumize.me [:page_facing_up:](https://rclone.org/premiumizeme/)
|
||||
* put.io [:page_facing_up:](https://rclone.org/putio/)
|
||||
* QingStor [:page_facing_up:](https://rclone.org/qingstor/)
|
||||
* Rackspace Cloud Files [:page_facing_up:](https://rclone.org/swift/)
|
||||
* Scaleway [:page_facing_up:](https://rclone.org/s3/#scaleway)
|
||||
* SFTP [:page_facing_up:](https://rclone.org/sftp/)
|
||||
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
|
||||
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
|
||||
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
|
||||
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
|
||||
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)
|
||||
@@ -70,9 +78,12 @@ Please see [the full list of all storage providers and their features](https://r
|
||||
* [Sync](https://rclone.org/commands/rclone_sync/) (one way) mode to make a directory identical
|
||||
* [Check](https://rclone.org/commands/rclone_check/) mode to check for file hash equality
|
||||
* Can sync to and from network, e.g. two different cloud accounts
|
||||
* Optional large file chunking ([Chunker](https://rclone.org/chunker/))
|
||||
* Optional encryption ([Crypt](https://rclone.org/crypt/))
|
||||
* Optional cache ([Cache](https://rclone.org/cache/))
|
||||
* Optional FUSE mount ([rclone mount](https://rclone.org/commands/rclone_mount/))
|
||||
* Multi-threaded downloads to local disk
|
||||
* Can [serve](https://rclone.org/commands/rclone_serve/) local or remote files over HTTP/WebDav/FTP/SFTP/dlna
|
||||
|
||||
## Installation & documentation
|
||||
|
||||
|
||||
87
RELEASE.md
87
RELEASE.md
@@ -1,20 +1,28 @@
|
||||
Extra required software for making a release
|
||||
# Release
|
||||
|
||||
This file describes how to make the various kinds of releases
|
||||
|
||||
## Extra required software for making a release
|
||||
|
||||
* [github-release](https://github.com/aktau/github-release) for uploading packages
|
||||
* pandoc for making the html and man pages
|
||||
|
||||
Making a release
|
||||
## Making a release
|
||||
|
||||
* git checkout master
|
||||
* git pull
|
||||
* git status - make sure everything is checked in
|
||||
* Check travis & appveyor builds are green
|
||||
* make check
|
||||
* Check GitHub actions build for master is Green
|
||||
* make test # see integration test server or run locally
|
||||
* make tag
|
||||
* edit docs/content/changelog.md
|
||||
* edit docs/content/changelog.md # make sure to remove duplicate logs from point releases
|
||||
* make tidy
|
||||
* make doc
|
||||
* git status - to check for new man pages - git add them
|
||||
* git commit -a -v -m "Version v1.XX.0"
|
||||
* make retag
|
||||
* git push --tags origin master
|
||||
* # Wait for the appveyor and travis builds to complete then...
|
||||
* # Wait for the GitHub builds to complete then...
|
||||
* make fetch_binaries
|
||||
* make tarball
|
||||
* make sign_upload
|
||||
@@ -26,6 +34,7 @@ Making a release
|
||||
* # announce with forum post, twitter post, G+ post
|
||||
|
||||
Early in the next release cycle update the vendored dependencies
|
||||
|
||||
* Review any pinned packages in go.mod and remove if possible
|
||||
* make update
|
||||
* git status
|
||||
@@ -47,24 +56,56 @@ Can be fixed with
|
||||
* GO111MODULE=on go mod vendor
|
||||
|
||||
|
||||
Making a point release. If rclone needs a point release due to some
|
||||
horrendous bug, then
|
||||
* git branch v1.XX v1.XX-fixes
|
||||
## Making a point release
|
||||
|
||||
If rclone needs a point release due to some horrendous bug:
|
||||
|
||||
First make the release branch. If this is a second point release then
|
||||
this will be done already.
|
||||
|
||||
* BASE_TAG=v1.XX # eg v1.49
|
||||
* NEW_TAG=${BASE_TAG}.Y # eg v1.49.1
|
||||
* echo $BASE_TAG $NEW_TAG # v1.49 v1.49.1
|
||||
* git branch ${BASE_TAG} ${BASE_TAG}-fixes
|
||||
|
||||
Now
|
||||
|
||||
* git co ${BASE_TAG}-fixes
|
||||
* git cherry-pick any fixes
|
||||
* Test (see above)
|
||||
* make NEW_TAG=v1.XX.1 tag
|
||||
* make NEXT_VERSION=${NEW_TAG} tag
|
||||
* edit docs/content/changelog.md
|
||||
* make TAG=v1.43.1 doc
|
||||
* git commit -a -v -m "Version v1.XX.1"
|
||||
* git tag -d -v1.XX.1
|
||||
* git tag -s -m "Version v1.XX.1" v1.XX.1
|
||||
* git push --tags -u origin v1.XX-fixes
|
||||
* make BRANCH_PATH= TAG=v1.43.1 fetch_binaries
|
||||
* make TAG=v1.43.1 tarball
|
||||
* make TAG=v1.43.1 sign_upload
|
||||
* make TAG=v1.43.1 check_sign
|
||||
* make TAG=v1.43.1 upload
|
||||
* make TAG=v1.43.1 upload_website
|
||||
* make TAG=v1.43.1 upload_github
|
||||
* NB this overwrites the current beta so after the release, rebuild the last travis build
|
||||
* make TAG=${NEW_TAG} doc
|
||||
* git commit -a -v -m "Version ${NEW_TAG}"
|
||||
* git tag -d ${NEW_TAG}
|
||||
* git tag -s -m "Version ${NEW_TAG}" ${NEW_TAG}
|
||||
* git push --tags -u origin ${BASE_TAG}-fixes
|
||||
* Wait for builds to complete
|
||||
* make BRANCH_PATH= TAG=${NEW_TAG} fetch_binaries
|
||||
* make TAG=${NEW_TAG} tarball
|
||||
* make TAG=${NEW_TAG} sign_upload
|
||||
* make TAG=${NEW_TAG} check_sign
|
||||
* make TAG=${NEW_TAG} upload
|
||||
* make TAG=${NEW_TAG} upload_website
|
||||
* make TAG=${NEW_TAG} upload_github
|
||||
* NB this overwrites the current beta so we need to do this
|
||||
* git co master
|
||||
* make VERSION=${NEW_TAG} startdev
|
||||
* # cherry pick the changes to the changelog and VERSION
|
||||
* git checkout ${BASE_TAG}-fixes VERSION docs/content/changelog.md
|
||||
* git commit --amend
|
||||
* git push
|
||||
* Announce!
|
||||
|
||||
## Making a manual build of docker
|
||||
|
||||
The rclone docker image should autobuild on docker hub. If it doesn't
|
||||
or needs to be updated then rebuild like this.
|
||||
|
||||
```
|
||||
docker build -t rclone/rclone:1.49.1 -t rclone/rclone:1.49 -t rclone/rclone:1 -t rclone/rclone:latest .
|
||||
docker push rclone/rclone:1.49.1
|
||||
docker push rclone/rclone:1.49
|
||||
docker push rclone/rclone:1
|
||||
docker push rclone/rclone:latest
|
||||
```
|
||||
|
||||
@@ -4,17 +4,17 @@ import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
fsi := &fs.RegInfo{
|
||||
Name: "alias",
|
||||
Description: "Alias for a existing remote",
|
||||
Description: "Alias for an existing remote",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Name: "remote",
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
package alias
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/local" // pull in test backend
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
_ "github.com/rclone/rclone/backend/local" // pull in test backend
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -69,7 +70,7 @@ func TestNewFS(t *testing.T) {
|
||||
prepare(t, remoteRoot)
|
||||
f, err := fs.NewFs(fmt.Sprintf("%s:%s", remoteName, test.fsRoot))
|
||||
require.NoError(t, err, what)
|
||||
gotEntries, err := f.List(test.fsList)
|
||||
gotEntries, err := f.List(context.Background(), test.fsList)
|
||||
require.NoError(t, err, what)
|
||||
|
||||
sort.Sort(gotEntries)
|
||||
|
||||
@@ -2,31 +2,40 @@ package all
|
||||
|
||||
import (
|
||||
// Active file systems
|
||||
_ "github.com/ncw/rclone/backend/alias"
|
||||
_ "github.com/ncw/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/ncw/rclone/backend/azureblob"
|
||||
_ "github.com/ncw/rclone/backend/b2"
|
||||
_ "github.com/ncw/rclone/backend/box"
|
||||
_ "github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
_ "github.com/ncw/rclone/backend/dropbox"
|
||||
_ "github.com/ncw/rclone/backend/ftp"
|
||||
_ "github.com/ncw/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/ncw/rclone/backend/http"
|
||||
_ "github.com/ncw/rclone/backend/hubic"
|
||||
_ "github.com/ncw/rclone/backend/jottacloud"
|
||||
_ "github.com/ncw/rclone/backend/koofr"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/mega"
|
||||
_ "github.com/ncw/rclone/backend/onedrive"
|
||||
_ "github.com/ncw/rclone/backend/opendrive"
|
||||
_ "github.com/ncw/rclone/backend/pcloud"
|
||||
_ "github.com/ncw/rclone/backend/qingstor"
|
||||
_ "github.com/ncw/rclone/backend/s3"
|
||||
_ "github.com/ncw/rclone/backend/sftp"
|
||||
_ "github.com/ncw/rclone/backend/swift"
|
||||
_ "github.com/ncw/rclone/backend/union"
|
||||
_ "github.com/ncw/rclone/backend/webdav"
|
||||
_ "github.com/ncw/rclone/backend/yandex"
|
||||
_ "github.com/rclone/rclone/backend/alias"
|
||||
_ "github.com/rclone/rclone/backend/amazonclouddrive"
|
||||
_ "github.com/rclone/rclone/backend/azureblob"
|
||||
_ "github.com/rclone/rclone/backend/b2"
|
||||
_ "github.com/rclone/rclone/backend/box"
|
||||
_ "github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/chunker"
|
||||
_ "github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
_ "github.com/rclone/rclone/backend/dropbox"
|
||||
_ "github.com/rclone/rclone/backend/fichier"
|
||||
_ "github.com/rclone/rclone/backend/ftp"
|
||||
_ "github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
_ "github.com/rclone/rclone/backend/googlephotos"
|
||||
_ "github.com/rclone/rclone/backend/http"
|
||||
_ "github.com/rclone/rclone/backend/hubic"
|
||||
_ "github.com/rclone/rclone/backend/jottacloud"
|
||||
_ "github.com/rclone/rclone/backend/koofr"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/mailru"
|
||||
_ "github.com/rclone/rclone/backend/mega"
|
||||
_ "github.com/rclone/rclone/backend/memory"
|
||||
_ "github.com/rclone/rclone/backend/onedrive"
|
||||
_ "github.com/rclone/rclone/backend/opendrive"
|
||||
_ "github.com/rclone/rclone/backend/pcloud"
|
||||
_ "github.com/rclone/rclone/backend/premiumizeme"
|
||||
_ "github.com/rclone/rclone/backend/putio"
|
||||
_ "github.com/rclone/rclone/backend/qingstor"
|
||||
_ "github.com/rclone/rclone/backend/s3"
|
||||
_ "github.com/rclone/rclone/backend/sftp"
|
||||
_ "github.com/rclone/rclone/backend/sharefile"
|
||||
_ "github.com/rclone/rclone/backend/sugarsync"
|
||||
_ "github.com/rclone/rclone/backend/swift"
|
||||
_ "github.com/rclone/rclone/backend/union"
|
||||
_ "github.com/rclone/rclone/backend/webdav"
|
||||
_ "github.com/rclone/rclone/backend/yandex"
|
||||
)
|
||||
|
||||
@@ -12,6 +12,7 @@ we ignore assets completely!
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -22,18 +23,18 @@ import (
|
||||
"time"
|
||||
|
||||
acd "github.com/ncw/go-acd"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
@@ -135,15 +136,23 @@ which downloads the file through a temporary URL directly from the
|
||||
underlying S3 storage.`,
|
||||
Default: defaultTempLinkThreshold,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Checkpoint string `config:"checkpoint"`
|
||||
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
|
||||
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
|
||||
Checkpoint string `config:"checkpoint"`
|
||||
UploadWaitPerGB fs.Duration `config:"upload_wait_per_gb"`
|
||||
TempLinkThreshold fs.SizeSuffix `config:"templink_threshold"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote acd server
|
||||
@@ -247,6 +256,7 @@ func filterRequest(req *http.Request) {
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -308,7 +318,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.dirCache = dircache.New(root, f.trueRootID, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(false)
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
@@ -316,12 +326,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
tempF.dirCache = dircache.New(newRoot, f.trueRootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -331,7 +341,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
@@ -353,7 +363,7 @@ func (f *Fs) getRootInfo() (rootInfo *acd.Folder, err error) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *acd.Node) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -362,7 +372,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error)
|
||||
// Set info but not meta
|
||||
o.info = info
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -372,18 +382,18 @@ func (f *Fs) newObjectWithInfo(remote string, info *acd.Node) (fs.Object, error)
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
//fs.Debugf(f, "FindLeaf(%q, %q)", pathID, leaf)
|
||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||
var resp *http.Response
|
||||
var subFolder *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
subFolder, resp, err = folder.GetFolder(leaf)
|
||||
subFolder, resp, err = folder.GetFolder(f.opt.Enc.FromStandardName(leaf))
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -404,13 +414,13 @@ func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err er
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
//fmt.Printf("CreateDir(%q, %q)\n", pathID, leaf)
|
||||
folder := acd.FolderFromId(pathID, f.c.Nodes)
|
||||
var resp *http.Response
|
||||
var info *acd.Folder
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.CreateFolder(leaf)
|
||||
info, resp, err = folder.CreateFolder(f.opt.Enc.FromStandardName(leaf))
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -478,6 +488,7 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
||||
if !hasValidParent {
|
||||
continue
|
||||
}
|
||||
*node.Name = f.opt.Enc.ToStandardName(*node.Name)
|
||||
// Store the nodes up in case we have to retry the listing
|
||||
out = append(out, node)
|
||||
}
|
||||
@@ -502,12 +513,12 @@ func (f *Fs) listAll(dirID string, title string, directoriesOnly bool, filesOnly
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(false)
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -525,7 +536,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
d := fs.NewDir(remote, when).SetID(*node.Id)
|
||||
entries = append(entries, d)
|
||||
case fileKind:
|
||||
o, err := f.newObjectWithInfo(remote, node)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, node)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
@@ -569,7 +580,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// At the end of large uploads. The speculation is that the timeout
|
||||
// is waiting for the sha1 hashing to complete and the file may well
|
||||
// be properly uploaded.
|
||||
func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
|
||||
func (f *Fs) checkUpload(ctx context.Context, resp *http.Response, in io.Reader, src fs.ObjectInfo, inInfo *acd.File, inErr error, uploadTime time.Duration) (fixedError bool, info *acd.File, err error) {
|
||||
// Return if no error - all is well
|
||||
if inErr == nil {
|
||||
return false, inInfo, inErr
|
||||
@@ -609,7 +620,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
|
||||
fs.Debugf(src, "Error detected after finished upload - waiting to see if object was uploaded correctly: %v (%q)", inErr, httpStatus)
|
||||
remote := src.Remote()
|
||||
for i := 1; i <= retries; i++ {
|
||||
o, err := f.NewObject(remote)
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
fs.Debugf(src, "Object not found - waiting (%d/%d)", i, retries)
|
||||
} else if err != nil {
|
||||
@@ -635,7 +646,7 @@ func (f *Fs) checkUpload(resp *http.Response, in io.Reader, src fs.ObjectInfo, i
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
size := src.Size()
|
||||
// Temporary Object under construction
|
||||
@@ -644,17 +655,17 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
||||
remote: remote,
|
||||
}
|
||||
// Check if object already exists
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(ctx)
|
||||
switch err {
|
||||
case nil:
|
||||
return o, o.Update(in, src, options...)
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
// If not create it
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(remote, true)
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -667,10 +678,10 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
start := time.Now()
|
||||
f.tokenRenewer.Start()
|
||||
info, resp, err = folder.Put(in, leaf)
|
||||
info, resp, err = folder.Put(in, f.opt.Enc.FromStandardName(leaf))
|
||||
f.tokenRenewer.Stop()
|
||||
var ok bool
|
||||
ok, info, err = f.checkUpload(resp, in, src, info, err, time.Since(start))
|
||||
ok, info, err = f.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
@@ -684,13 +695,13 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
err := f.dirCache.FindRoot(true)
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(dir, true)
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -704,7 +715,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
// go test -v -run '^Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsMove)$'
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
@@ -713,15 +724,15 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// create the destination directory if necessary
|
||||
err := f.dirCache.FindRoot(true)
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(srcObj.remote, false)
|
||||
srcLeaf, srcDirectoryID, err := srcObj.fs.dirCache.FindPath(ctx, srcObj.remote, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(remote, true)
|
||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -737,12 +748,12 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
srcErr, dstErr error
|
||||
)
|
||||
for i := 1; i <= fs.Config.LowLevelRetries; i++ {
|
||||
_, srcErr = srcObj.fs.NewObject(srcObj.remote) // try reading the object
|
||||
_, srcErr = srcObj.fs.NewObject(ctx, srcObj.remote) // try reading the object
|
||||
if srcErr != nil && srcErr != fs.ErrorObjectNotFound {
|
||||
// exit if error on source
|
||||
return nil, srcErr
|
||||
}
|
||||
dstObj, dstErr = f.NewObject(remote)
|
||||
dstObj, dstErr = f.NewObject(ctx, remote)
|
||||
if dstErr != nil && dstErr != fs.ErrorObjectNotFound {
|
||||
// exit if error on dst
|
||||
return nil, dstErr
|
||||
@@ -771,7 +782,7 @@ func (f *Fs) DirCacheFlush() {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(src, "DirMove error: not same remote type")
|
||||
@@ -787,14 +798,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err = srcFs.dirCache.FindRoot(false)
|
||||
err = srcFs.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(true)
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -809,14 +820,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(findPath, true)
|
||||
dstLeaf, dstDirectoryID, err := f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(dstRemote, false)
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
@@ -832,7 +843,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
if srcRemote == "" {
|
||||
srcDirectoryID, err = srcFs.dirCache.RootParentID()
|
||||
} else {
|
||||
_, srcDirectoryID, err = srcFs.dirCache.FindPath(findPath, false)
|
||||
_, srcDirectoryID, err = srcFs.dirCache.FindPath(ctx, findPath, false)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -840,7 +851,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcLeaf, _ := dircache.SplitPath(srcPath)
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -873,17 +884,17 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
|
||||
// purgeCheck remotes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
root := path.Join(f.root, dir)
|
||||
if root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(false)
|
||||
err := dc.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID, err := dc.FindDir(dir, false)
|
||||
rootID, err := dc.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -932,8 +943,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return f.purgeCheck(dir, true)
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
@@ -955,7 +966,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
//func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
//func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
// srcObj, ok := src.(*Object)
|
||||
// if !ok {
|
||||
// fs.Debugf(src, "Can't copy - not same remote type")
|
||||
@@ -966,7 +977,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
// return f.NewObject(remote), nil
|
||||
// return f.NewObject(ctx, remote), nil
|
||||
//}
|
||||
|
||||
// Purge deletes all the files and the container
|
||||
@@ -974,8 +985,8 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge() error {
|
||||
return f.purgeCheck("", false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -999,7 +1010,7 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -1022,11 +1033,11 @@ func (o *Object) Size() int64 {
|
||||
// it also sets the info
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.info != nil {
|
||||
return nil
|
||||
}
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(o.remote, false)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, o.remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
@@ -1037,7 +1048,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
var resp *http.Response
|
||||
var info *acd.File
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
info, resp, err = folder.GetFile(leaf)
|
||||
info, resp, err = folder.GetFile(o.fs.opt.Enc.FromStandardName(leaf))
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1055,8 +1066,8 @@ func (o *Object) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -1070,7 +1081,7 @@ func (o *Object) ModTime() time.Time {
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
// FIXME not implemented
|
||||
return fs.ErrorCantSetModTime
|
||||
}
|
||||
@@ -1081,7 +1092,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
bigObject := o.Size() >= int64(o.fs.opt.TempLinkThreshold)
|
||||
if bigObject {
|
||||
fs.Debugf(o, "Downloading large object via tempLink")
|
||||
@@ -1093,7 +1104,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if !bigObject {
|
||||
in, resp, err = file.OpenHeaders(headers)
|
||||
} else {
|
||||
in, resp, err = file.OpenTempURLHeaders(rest.ClientWithHeaderReset(o.fs.noAuthClient, headers), headers)
|
||||
in, resp, err = file.OpenTempURLHeaders(o.fs.noAuthClient, headers)
|
||||
}
|
||||
return o.fs.shouldRetry(resp, err)
|
||||
})
|
||||
@@ -1103,7 +1114,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
file := acd.File{Node: o.info}
|
||||
var info *acd.File
|
||||
var resp *http.Response
|
||||
@@ -1114,7 +1125,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
info, resp, err = file.Overwrite(in)
|
||||
o.fs.tokenRenewer.Stop()
|
||||
var ok bool
|
||||
ok, info, err = o.fs.checkUpload(resp, in, src, info, err, time.Since(start))
|
||||
ok, info, err = o.fs.checkUpload(ctx, resp, in, src, info, err, time.Since(start))
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
@@ -1139,7 +1150,7 @@ func (f *Fs) removeNode(info *acd.Node) error {
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.removeNode(o.info)
|
||||
}
|
||||
|
||||
@@ -1157,7 +1168,7 @@ func (f *Fs) restoreNode(info *acd.Node) (newInfo *acd.Node, err error) {
|
||||
func (f *Fs) renameNode(info *acd.Node, newName string) (newInfo *acd.Node, err error) {
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newInfo, resp, err = info.Rename(newName)
|
||||
newInfo, resp, err = info.Rename(f.opt.Enc.FromStandardName(newName))
|
||||
return f.shouldRetry(resp, err)
|
||||
})
|
||||
return newInfo, err
|
||||
@@ -1261,7 +1272,7 @@ OnConflict:
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
if o.info.ContentProperties != nil && o.info.ContentProperties.ContentType != nil {
|
||||
return *o.info.ContentProperties.ContentType
|
||||
}
|
||||
@@ -1274,7 +1285,7 @@ func (o *Object) MimeType() string {
|
||||
// Automatically restarts itself in case of unexpected behaviour of the remote.
|
||||
//
|
||||
// Close the returned channel to stop being notified.
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
checkpoint := f.opt.Checkpoint
|
||||
|
||||
go func() {
|
||||
@@ -1353,10 +1364,11 @@ func (f *Fs) changeNotifyRunner(notifyFunc func(string, fs.EntryType), checkpoin
|
||||
if len(node.Parents) > 0 {
|
||||
if path, ok := f.dirCache.GetInv(node.Parents[0]); ok {
|
||||
// and append the drive file name to compute the full file name
|
||||
name := f.opt.Enc.ToStandardName(*node.Name)
|
||||
if len(path) > 0 {
|
||||
path = path + "/" + *node.Name
|
||||
path = path + "/" + name
|
||||
} else {
|
||||
path = *node.Name
|
||||
path = name
|
||||
}
|
||||
// this will now clear the actual file too
|
||||
pathsToClear = append(pathsToClear, entryType{path: path, entryType: fs.EntryObject})
|
||||
|
||||
@@ -7,9 +7,9 @@ package amazonclouddrive_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/amazonclouddrive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/amazonclouddrive"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
// +build !plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris
|
||||
|
||||
package azureblob
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
// Test AzureBlob filesystem interface
|
||||
|
||||
// +build !plan9,!solaris,go1.8
|
||||
// +build !plan9,!solaris
|
||||
|
||||
package azureblob
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// Build for azureblob for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build plan9 solaris !go1.8
|
||||
// +build plan9 solaris
|
||||
|
||||
package azureblob
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
)
|
||||
|
||||
// Error describes a B2 error response
|
||||
@@ -50,7 +50,7 @@ type Timestamp time.Time
|
||||
// MarshalJSON turns a Timestamp into JSON (in UTC)
|
||||
func (t *Timestamp) MarshalJSON() (out []byte, err error) {
|
||||
timestamp := (*time.Time)(t).UTC().UnixNano()
|
||||
return []byte(strconv.FormatInt(timestamp/1E6, 10)), nil
|
||||
return []byte(strconv.FormatInt(timestamp/1e6, 10)), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON turns JSON into a Timestamp
|
||||
@@ -59,7 +59,7 @@ func (t *Timestamp) UnmarshalJSON(data []byte) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = Timestamp(time.Unix(timestamp/1E3, (timestamp%1E3)*1E6).UTC())
|
||||
*t = Timestamp(time.Unix(timestamp/1e3, (timestamp%1e3)*1e6).UTC())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -189,6 +189,21 @@ type GetUploadURLResponse struct {
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when uploading files to this bucket, see b2_upload_file.
|
||||
}
|
||||
|
||||
// GetDownloadAuthorizationRequest is passed to b2_get_download_authorization
|
||||
type GetDownloadAuthorizationRequest struct {
|
||||
BucketID string `json:"bucketId"` // The ID of the bucket that you want to upload to.
|
||||
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
|
||||
ValidDurationInSeconds int64 `json:"validDurationInSeconds"` // The number of seconds before the authorization token will expire. The minimum value is 1 second. The maximum value is 604800 which is one week in seconds.
|
||||
B2ContentDisposition string `json:"b2ContentDisposition,omitempty"` // optional - If this is present, download requests using the returned authorization must include the same value for b2ContentDisposition.
|
||||
}
|
||||
|
||||
// GetDownloadAuthorizationResponse is received from b2_get_download_authorization
|
||||
type GetDownloadAuthorizationResponse struct {
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
FileNamePrefix string `json:"fileNamePrefix"` // The file name prefix of files the download authorization token will allow access to.
|
||||
AuthorizationToken string `json:"authorizationToken"` // The authorizationToken that must be used when downloading files, see b2_download_file_by_name.
|
||||
}
|
||||
|
||||
// FileInfo is received from b2_upload_file, b2_get_file_info and b2_finish_large_file
|
||||
type FileInfo struct {
|
||||
ID string `json:"fileId"` // The unique identifier for this version of this file. Used with b2_get_file_info, b2_download_file_by_id, and b2_delete_file_version.
|
||||
@@ -311,3 +326,14 @@ type CancelLargeFileResponse struct {
|
||||
AccountID string `json:"accountId"` // The identifier for the account.
|
||||
BucketID string `json:"bucketId"` // The unique ID of the bucket.
|
||||
}
|
||||
|
||||
// CopyFileRequest is as passed to b2_copy_file
|
||||
type CopyFileRequest struct {
|
||||
SourceID string `json:"sourceFileId"` // The ID of the source file being copied.
|
||||
Name string `json:"fileName"` // The name of the new file being created.
|
||||
Range string `json:"range,omitempty"` // The range of bytes to copy. If not provided, the whole source file will be copied.
|
||||
MetadataDirective string `json:"metadataDirective,omitempty"` // The strategy for how to populate metadata for the new file: COPY or REPLACE
|
||||
ContentType string `json:"contentType,omitempty"` // The MIME type of the content of the file (REPLACE only)
|
||||
Info map[string]string `json:"fileInfo,omitempty"` // This field stores the metadata that will be stored with the file. (REPLACE only)
|
||||
DestBucketID string `json:"destinationBucketId,omitempty"` // The destination ID of the bucket if set, if not the source bucket will be used
|
||||
}
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
988
backend/b2/b2.go
988
backend/b2/b2.go
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
)
|
||||
|
||||
// Test b2 string encoding
|
||||
|
||||
@@ -4,8 +4,8 @@ package b2
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
@@ -6,6 +6,7 @@ package b2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
@@ -14,12 +15,12 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/ncw/rclone/backend/b2/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/b2/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
type hashAppendingReader struct {
|
||||
@@ -80,7 +81,7 @@ type largeUpload struct {
|
||||
}
|
||||
|
||||
// newLargeUpload starts an upload of object o from in with metadata in src
|
||||
func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||
func (f *Fs) newLargeUpload(ctx context.Context, o *Object, in io.Reader, src fs.ObjectInfo) (up *largeUpload, err error) {
|
||||
remote := o.remote
|
||||
size := src.Size()
|
||||
parts := int64(0)
|
||||
@@ -98,33 +99,34 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
||||
sha1SliceSize = parts
|
||||
}
|
||||
|
||||
modTime := src.ModTime()
|
||||
modTime := src.ModTime(ctx)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_start_large_file",
|
||||
}
|
||||
bucketID, err := f.getBucketID()
|
||||
bucket, bucketPath := o.split()
|
||||
bucketID, err := f.getBucketID(ctx, bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var request = api.StartLargeFileRequest{
|
||||
BucketID: bucketID,
|
||||
Name: o.fs.root + remote,
|
||||
ContentType: fs.MimeType(src),
|
||||
Name: f.opt.Enc.FromStandardPath(bucketPath),
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Info: map[string]string{
|
||||
timeKey: timeString(modTime),
|
||||
},
|
||||
}
|
||||
// Set the SHA1 if known
|
||||
if !o.fs.opt.DisableCheckSum {
|
||||
if calculatedSha1, err := src.Hash(hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
if calculatedSha1, err := src.Hash(ctx, hash.SHA1); err == nil && calculatedSha1 != "" {
|
||||
request.Info[sha1Key] = calculatedSha1
|
||||
}
|
||||
}
|
||||
var response api.StartLargeFileResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.CallJSON(&opts, &request, &response)
|
||||
return f.shouldRetry(resp, err)
|
||||
resp, err := f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -148,7 +150,7 @@ func (f *Fs) newLargeUpload(o *Object, in io.Reader, src fs.ObjectInfo) (up *lar
|
||||
// getUploadURL returns the upload info with the UploadURL and the AuthorizationToken
|
||||
//
|
||||
// This should be returned with returnUploadURL when finished
|
||||
func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err error) {
|
||||
func (up *largeUpload) getUploadURL(ctx context.Context) (upload *api.GetUploadPartURLResponse, err error) {
|
||||
up.uploadMu.Lock()
|
||||
defer up.uploadMu.Unlock()
|
||||
if len(up.uploads) == 0 {
|
||||
@@ -160,8 +162,8 @@ func (up *largeUpload) getUploadURL() (upload *api.GetUploadPartURLResponse, err
|
||||
ID: up.id,
|
||||
}
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &upload)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &upload)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get upload URL")
|
||||
@@ -190,12 +192,12 @@ func (up *largeUpload) clearUploadURL() {
|
||||
}
|
||||
|
||||
// Transfer a chunk
|
||||
func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
func (up *largeUpload) transferChunk(ctx context.Context, part int64, body []byte) error {
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(up.o, "Sending chunk %d length %d", part, len(body))
|
||||
|
||||
// Get upload URL
|
||||
upload, err := up.getUploadURL()
|
||||
upload, err := up.getUploadURL(ctx)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -239,8 +241,8 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
|
||||
var response api.UploadPartResponse
|
||||
|
||||
resp, err := up.f.srv.CallJSON(&opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(resp, err)
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
retry, err := up.f.shouldRetry(ctx, resp, err)
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Error sending chunk %d (retry=%v): %v: %#v", part, retry, err, err)
|
||||
}
|
||||
@@ -262,7 +264,7 @@ func (up *largeUpload) transferChunk(part int64, body []byte) error {
|
||||
}
|
||||
|
||||
// finish closes off the large upload
|
||||
func (up *largeUpload) finish() error {
|
||||
func (up *largeUpload) finish(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Finishing large file upload with %d parts", up.parts)
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
@@ -274,8 +276,8 @@ func (up *largeUpload) finish() error {
|
||||
}
|
||||
var response api.FileInfo
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -284,7 +286,7 @@ func (up *largeUpload) finish() error {
|
||||
}
|
||||
|
||||
// cancel aborts the large upload
|
||||
func (up *largeUpload) cancel() error {
|
||||
func (up *largeUpload) cancel(ctx context.Context) error {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/b2_cancel_large_file",
|
||||
@@ -294,18 +296,18 @@ func (up *largeUpload) cancel() error {
|
||||
}
|
||||
var response api.CancelLargeFileResponse
|
||||
err := up.f.pacer.Call(func() (bool, error) {
|
||||
resp, err := up.f.srv.CallJSON(&opts, &request, &response)
|
||||
return up.f.shouldRetry(resp, err)
|
||||
resp, err := up.f.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return up.f.shouldRetry(ctx, resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||
func (up *largeUpload) managedTransferChunk(ctx context.Context, wg *sync.WaitGroup, errs chan error, part int64, buf []byte) {
|
||||
wg.Add(1)
|
||||
go func(part int64, buf []byte) {
|
||||
defer wg.Done()
|
||||
defer up.f.putUploadBlock(buf)
|
||||
err := up.transferChunk(part, buf)
|
||||
err := up.transferChunk(ctx, part, buf)
|
||||
if err != nil {
|
||||
select {
|
||||
case errs <- err:
|
||||
@@ -315,7 +317,7 @@ func (up *largeUpload) managedTransferChunk(wg *sync.WaitGroup, errs chan error,
|
||||
}(part, buf)
|
||||
}
|
||||
|
||||
func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
|
||||
func (up *largeUpload) finishOrCancelOnError(ctx context.Context, err error, errs chan error) error {
|
||||
if err == nil {
|
||||
select {
|
||||
case err = <-errs:
|
||||
@@ -324,19 +326,19 @@ func (up *largeUpload) finishOrCancelOnError(err error, errs chan error) error {
|
||||
}
|
||||
if err != nil {
|
||||
fs.Debugf(up.o, "Cancelling large file upload due to error: %v", err)
|
||||
cancelErr := up.cancel()
|
||||
cancelErr := up.cancel(ctx)
|
||||
if cancelErr != nil {
|
||||
fs.Errorf(up.o, "Failed to cancel large file upload: %v", cancelErr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return up.finish()
|
||||
return up.finish(ctx)
|
||||
}
|
||||
|
||||
// Stream uploads the chunks from the input, starting with a required initial
|
||||
// chunk. Assumes the file size is unknown and will upload until the input
|
||||
// reaches EOF.
|
||||
func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
|
||||
func (up *largeUpload) Stream(ctx context.Context, initialUploadBlock []byte) (err error) {
|
||||
fs.Debugf(up.o, "Starting streaming of large file (id %q)", up.id)
|
||||
errs := make(chan error, 1)
|
||||
hasMoreParts := true
|
||||
@@ -344,7 +346,7 @@ func (up *largeUpload) Stream(initialUploadBlock []byte) (err error) {
|
||||
|
||||
// Transfer initial chunk
|
||||
up.size = int64(len(initialUploadBlock))
|
||||
up.managedTransferChunk(&wg, errs, 1, initialUploadBlock)
|
||||
up.managedTransferChunk(ctx, &wg, errs, 1, initialUploadBlock)
|
||||
|
||||
outer:
|
||||
for part := int64(2); hasMoreParts; part++ {
|
||||
@@ -386,16 +388,16 @@ outer:
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
up.managedTransferChunk(ctx, &wg, errs, part, buf)
|
||||
}
|
||||
wg.Wait()
|
||||
up.sha1s = up.sha1s[:up.parts]
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
return up.finishOrCancelOnError(ctx, err, errs)
|
||||
}
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
func (up *largeUpload) Upload() error {
|
||||
func (up *largeUpload) Upload(ctx context.Context) error {
|
||||
fs.Debugf(up.o, "Starting upload of large file in %d chunks (id %q)", up.parts, up.id)
|
||||
remaining := up.size
|
||||
errs := make(chan error, 1)
|
||||
@@ -426,10 +428,10 @@ outer:
|
||||
}
|
||||
|
||||
// Transfer the chunk
|
||||
up.managedTransferChunk(&wg, errs, part, buf)
|
||||
up.managedTransferChunk(ctx, &wg, errs, part, buf)
|
||||
remaining -= reqSize
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return up.finishOrCancelOnError(err, errs)
|
||||
return up.finishOrCancelOnError(ctx, err, errs)
|
||||
}
|
||||
|
||||
@@ -202,3 +202,23 @@ type CommitUpload struct {
|
||||
ContentModifiedAt Time `json:"content_modified_at"`
|
||||
} `json:"attributes"`
|
||||
}
|
||||
|
||||
// ConfigJSON defines the shape of a box config.json
|
||||
type ConfigJSON struct {
|
||||
BoxAppSettings AppSettings `json:"boxAppSettings"`
|
||||
EnterpriseID string `json:"enterpriseID"`
|
||||
}
|
||||
|
||||
// AppSettings defines the shape of the boxAppSettings within box config.json
|
||||
type AppSettings struct {
|
||||
ClientID string `json:"clientID"`
|
||||
ClientSecret string `json:"clientSecret"`
|
||||
AppAuth AppAuth `json:"appAuth"`
|
||||
}
|
||||
|
||||
// AppAuth defines the shape of the appAuth within boxAppSettings in config.json
|
||||
type AppAuth struct {
|
||||
PublicKeyID string `json:"publicKeyID"`
|
||||
PrivateKey string `json:"privateKey"`
|
||||
Passphrase string `json:"passphrase"`
|
||||
}
|
||||
|
||||
@@ -10,8 +10,13 @@ package box
|
||||
// FIXME box can copy a directory
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rsa"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -20,20 +25,27 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/box/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/dircache"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/jwtutil"
|
||||
|
||||
"github.com/youmark/pkcs8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/jws"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -48,6 +60,7 @@ const (
|
||||
listChunks = 1000 // chunk size to read directory listings
|
||||
minUploadCutoff = 50000000 // upload cutoff can be no lower than this
|
||||
defaultUploadCutoff = 50 * 1024 * 1024
|
||||
tokenURL = "https://api.box.com/oauth2/token"
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -72,9 +85,34 @@ func init() {
|
||||
Description: "Box",
|
||||
NewFs: NewFs,
|
||||
Config: func(name string, m configmap.Mapper) {
|
||||
err := oauthutil.Config("box", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
jsonFile, ok := m.Get("box_config_file")
|
||||
boxSubType, boxSubTypeOk := m.Get("box_sub_type")
|
||||
var err error
|
||||
if ok && boxSubTypeOk && jsonFile != "" && boxSubType != "" {
|
||||
boxConfig, err := getBoxConfig(jsonFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
privateKey, err := getDecryptedPrivateKey(boxConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
claims, err := getClaims(boxConfig, boxSubType)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token: %v", err)
|
||||
}
|
||||
signingHeaders := getSigningHeaders(boxConfig)
|
||||
queryParams := getQueryParams(boxConfig)
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
err = jwtutil.Config("box", name, claims, signingHeaders, queryParams, privateKey, m, client)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with jwt authentication: %v", err)
|
||||
}
|
||||
} else {
|
||||
err = oauthutil.Config("box", name, m, oauthConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to configure token with oauth authentication: %v", err)
|
||||
}
|
||||
}
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
@@ -83,6 +121,19 @@ func init() {
|
||||
}, {
|
||||
Name: config.ConfigClientSecret,
|
||||
Help: "Box App Client Secret\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "box_config_file",
|
||||
Help: "Box App config.json location\nLeave blank normally.",
|
||||
}, {
|
||||
Name: "box_sub_type",
|
||||
Default: "user",
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "user",
|
||||
Help: "Rclone should act on behalf of a user",
|
||||
}, {
|
||||
Value: "enterprise",
|
||||
Help: "Rclone should act on behalf of a service account",
|
||||
}},
|
||||
}, {
|
||||
Name: "upload_cutoff",
|
||||
Help: "Cutoff for switching to multipart upload (>= 50MB).",
|
||||
@@ -93,14 +144,98 @@ func init() {
|
||||
Help: "Max number of times to try committing a multipart file.",
|
||||
Default: 100,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// From https://developer.box.com/docs/error-codes#section-400-bad-request :
|
||||
// > Box only supports file or folder names that are 255 characters or less.
|
||||
// > File names containing non-printable ascii, "/" or "\", names with leading
|
||||
// > or trailing spaces, and the special names “.” and “..” are also unsupported.
|
||||
//
|
||||
// Testing revealed names with leading spaces work fine.
|
||||
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
func getBoxConfig(configFile string) (boxConfig *api.ConfigJSON, err error) {
|
||||
file, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to read Box config")
|
||||
}
|
||||
err = json.Unmarshal(file, &boxConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to parse Box config")
|
||||
}
|
||||
return boxConfig, nil
|
||||
}
|
||||
|
||||
func getClaims(boxConfig *api.ConfigJSON, boxSubType string) (claims *jws.ClaimSet, err error) {
|
||||
val, err := jwtutil.RandomHex(20)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to generate random string for jti")
|
||||
}
|
||||
|
||||
claims = &jws.ClaimSet{
|
||||
Iss: boxConfig.BoxAppSettings.ClientID,
|
||||
Sub: boxConfig.EnterpriseID,
|
||||
Aud: tokenURL,
|
||||
Iat: time.Now().Unix(),
|
||||
Exp: time.Now().Add(time.Second * 45).Unix(),
|
||||
PrivateClaims: map[string]interface{}{
|
||||
"box_sub_type": boxSubType,
|
||||
"aud": tokenURL,
|
||||
"jti": val,
|
||||
},
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
func getSigningHeaders(boxConfig *api.ConfigJSON) *jws.Header {
|
||||
signingHeaders := &jws.Header{
|
||||
Algorithm: "RS256",
|
||||
Typ: "JWT",
|
||||
KeyID: boxConfig.BoxAppSettings.AppAuth.PublicKeyID,
|
||||
}
|
||||
|
||||
return signingHeaders
|
||||
}
|
||||
|
||||
func getQueryParams(boxConfig *api.ConfigJSON) map[string]string {
|
||||
queryParams := map[string]string{
|
||||
"client_id": boxConfig.BoxAppSettings.ClientID,
|
||||
"client_secret": boxConfig.BoxAppSettings.ClientSecret,
|
||||
}
|
||||
|
||||
return queryParams
|
||||
}
|
||||
|
||||
func getDecryptedPrivateKey(boxConfig *api.ConfigJSON) (key *rsa.PrivateKey, err error) {
|
||||
|
||||
block, rest := pem.Decode([]byte(boxConfig.BoxAppSettings.AppAuth.PrivateKey))
|
||||
if len(rest) > 0 {
|
||||
return nil, errors.Wrap(err, "box: extra data included in private key")
|
||||
}
|
||||
|
||||
rsaKey, err := pkcs8.ParsePKCS8PrivateKey(block.Bytes, []byte(boxConfig.BoxAppSettings.AppAuth.Passphrase))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "box: failed to decrypt private key")
|
||||
}
|
||||
|
||||
return rsaKey.(*rsa.PrivateKey), nil
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CommitRetries int `config:"commit_retries"`
|
||||
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
|
||||
CommitRetries int `config:"commit_retries"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote box
|
||||
@@ -180,22 +315,10 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return authRetry || fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
// substitute reserved characters for box
|
||||
func replaceReservedChars(x string) string {
|
||||
// Backslash for FULLWIDTH REVERSE SOLIDUS
|
||||
return strings.Replace(x, "\\", "\", -1)
|
||||
}
|
||||
|
||||
// restore reserved characters for box
|
||||
func restoreReservedChars(x string) string {
|
||||
// FULLWIDTH REVERSE SOLIDUS for Backslash
|
||||
return strings.Replace(x, "\", "\\", -1)
|
||||
}
|
||||
|
||||
// readMetaDataForPath reads the metadata from the path
|
||||
func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) {
|
||||
func (f *Fs) readMetaDataForPath(ctx context.Context, path string) (info *api.Item, err error) {
|
||||
// defer fs.Trace(f, "path=%q", path)("info=%+v, err=%v", &info, &err)
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(path, false)
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, path, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
@@ -203,7 +326,7 @@ func (f *Fs) readMetaDataForPath(path string) (info *api.Item, err error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
found, err := f.listAll(directoryID, false, true, func(item *api.Item) bool {
|
||||
found, err := f.listAll(ctx, directoryID, false, true, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
info = item
|
||||
return true
|
||||
@@ -238,6 +361,7 @@ func errorHandler(resp *http.Response) error {
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.Background()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -271,7 +395,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
|
||||
// Renew the token in the background
|
||||
f.tokenRenewer = oauthutil.NewRenew(f.String(), ts, func() error {
|
||||
_, err := f.readMetaDataForPath("")
|
||||
_, err := f.readMetaDataForPath(ctx, "")
|
||||
return err
|
||||
})
|
||||
|
||||
@@ -279,7 +403,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(false)
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
@@ -287,12 +411,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(false)
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.newObjectWithInfo(remote, nil)
|
||||
_, err := tempF.newObjectWithInfo(ctx, remote, nil)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
@@ -303,7 +427,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
f.features.Fill(&tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/ncw/rclone/issues/2182
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
@@ -323,7 +447,7 @@ func (f *Fs) rootSlash() string {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *api.Item) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -333,7 +457,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error)
|
||||
// Set info
|
||||
err = o.setMetaData(info)
|
||||
} else {
|
||||
err = o.readMetaData() // reads info and meta, returning an error
|
||||
err = o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -343,14 +467,14 @@ func (f *Fs) newObjectWithInfo(remote string, info *api.Item) (fs.Object, error)
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
// Find the leaf in pathID
|
||||
found, err = f.listAll(pathID, true, false, func(item *api.Item) bool {
|
||||
found, err = f.listAll(ctx, pathID, true, false, func(item *api.Item) bool {
|
||||
if item.Name == leaf {
|
||||
pathIDOut = item.ID
|
||||
return true
|
||||
@@ -368,7 +492,7 @@ func fieldsValue() url.Values {
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
// fs.Debugf(f, "CreateDir(%q, %q)\n", pathID, leaf)
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
@@ -378,13 +502,13 @@ func (f *Fs) CreateDir(pathID, leaf string) (newID string, err error) {
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
mkdir := api.CreateFolder{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: pathID,
|
||||
},
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &mkdir, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &mkdir, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -406,7 +530,7 @@ type listAllFn func(*api.Item) bool
|
||||
// Lists the directory required calling the user function on each item found
|
||||
//
|
||||
// If the user fn ever returns true then it early exits with found = true
|
||||
func (f *Fs) listAll(dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
func (f *Fs) listAll(ctx context.Context, dirID string, directoriesOnly bool, filesOnly bool, fn listAllFn) (found bool, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/folders/" + dirID + "/items",
|
||||
@@ -421,7 +545,7 @@ OUTER:
|
||||
var result api.FolderItems
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, nil, &result)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, nil, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -444,7 +568,7 @@ OUTER:
|
||||
if item.ItemStatus != api.ItemStatusActive {
|
||||
continue
|
||||
}
|
||||
item.Name = restoreReservedChars(item.Name)
|
||||
item.Name = f.opt.Enc.ToStandardName(item.Name)
|
||||
if fn(item) {
|
||||
found = true
|
||||
break OUTER
|
||||
@@ -467,17 +591,17 @@ OUTER:
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(false)
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
directoryID, err := f.dirCache.FindDir(dir, false)
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var iErr error
|
||||
_, err = f.listAll(directoryID, false, false, func(info *api.Item) bool {
|
||||
_, err = f.listAll(ctx, directoryID, false, false, func(info *api.Item) bool {
|
||||
remote := path.Join(dir, info.Name)
|
||||
if info.Type == api.ItemTypeFolder {
|
||||
// cache the directory ID for later lookups
|
||||
@@ -486,7 +610,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// FIXME more info from dir?
|
||||
entries = append(entries, d)
|
||||
} else if info.Type == api.ItemTypeFile {
|
||||
o, err := f.newObjectWithInfo(remote, info)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, info)
|
||||
if err != nil {
|
||||
iErr = err
|
||||
return true
|
||||
@@ -510,9 +634,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// Returns the object, leaf, directoryID and error
|
||||
//
|
||||
// Used to create new objects
|
||||
func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
func (f *Fs) createObject(ctx context.Context, remote string, modTime time.Time, size int64) (o *Object, leaf string, directoryID string, err error) {
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(remote, true)
|
||||
leaf, directoryID, err = f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -529,22 +653,22 @@ func (f *Fs) createObject(remote string, modTime time.Time, size int64) (o *Obje
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
existingObj, err := f.newObjectWithInfo(src.Remote(), nil)
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
existingObj, err := f.newObjectWithInfo(ctx, src.Remote(), nil)
|
||||
switch err {
|
||||
case nil:
|
||||
return existingObj, existingObj.Update(in, src, options...)
|
||||
return existingObj, existingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(in, src)
|
||||
return f.PutUnchecked(ctx, in, src)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutUnchecked the object into the container
|
||||
@@ -554,56 +678,56 @@ func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
remote := src.Remote()
|
||||
size := src.Size()
|
||||
modTime := src.ModTime()
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
o, _, _, err := f.createObject(remote, modTime, size)
|
||||
o, _, _, err := f.createObject(ctx, remote, modTime, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, o.Update(in, src, options...)
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
err := f.dirCache.FindRoot(true)
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(dir, true)
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// deleteObject removes an object by ID
|
||||
func (f *Fs) deleteObject(id string) error {
|
||||
func (f *Fs) deleteObject(ctx context.Context, id string) error {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/files/" + id,
|
||||
NoResponse: true,
|
||||
}
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.srv.Call(&opts)
|
||||
resp, err := f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
}
|
||||
|
||||
// purgeCheck removes the root directory, if check is set then it
|
||||
// refuses to do so if it has anything in
|
||||
func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) error {
|
||||
root := path.Join(f.root, dir)
|
||||
if root == "" {
|
||||
return errors.New("can't purge root directory")
|
||||
}
|
||||
dc := f.dirCache
|
||||
err := dc.FindRoot(false)
|
||||
err := dc.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootID, err := dc.FindDir(dir, false)
|
||||
rootID, err := dc.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -617,7 +741,7 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
opts.Parameters.Set("recursive", strconv.FormatBool(!check))
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.Call(&opts)
|
||||
resp, err = f.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -633,8 +757,8 @@ func (f *Fs) purgeCheck(dir string, check bool) error {
|
||||
// Rmdir deletes the root folder
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return f.purgeCheck(dir, true)
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.purgeCheck(ctx, dir, true)
|
||||
}
|
||||
|
||||
// Precision return the precision of this Fs
|
||||
@@ -651,13 +775,13 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
err := srcObj.readMetaData()
|
||||
err := srcObj.readMetaData(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -669,7 +793,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -680,9 +804,8 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
Path: "/files/" + srcObj.id + "/copy",
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
replacedLeaf := replaceReservedChars(leaf)
|
||||
copyFile := api.CopyFile{
|
||||
Name: replacedLeaf,
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
},
|
||||
@@ -690,7 +813,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
var resp *http.Response
|
||||
var info *api.Item
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, ©File, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, ©File, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -708,12 +831,12 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge() error {
|
||||
return f.purgeCheck("", false)
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
return f.purgeCheck(ctx, "", false)
|
||||
}
|
||||
|
||||
// move a file or folder
|
||||
func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
|
||||
func (f *Fs) move(ctx context.Context, endpoint, id, leaf, directoryID string) (info *api.Item, err error) {
|
||||
// Move the object
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
@@ -721,14 +844,14 @@ func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err e
|
||||
Parameters: fieldsValue(),
|
||||
}
|
||||
move := api.UpdateFileMove{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: f.opt.Enc.FromStandardName(leaf),
|
||||
Parent: api.Parent{
|
||||
ID: directoryID,
|
||||
},
|
||||
}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &move, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &move, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -746,7 +869,7 @@ func (f *Fs) move(endpoint, id, leaf, directoryID string) (info *api.Item, err e
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
@@ -754,13 +877,13 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Create temporary object
|
||||
dstObj, leaf, directoryID, err := f.createObject(remote, srcObj.modTime, srcObj.size)
|
||||
dstObj, leaf, directoryID, err := f.createObject(ctx, remote, srcObj.modTime, srcObj.size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
info, err := f.move("/files/", srcObj.id, leaf, directoryID)
|
||||
info, err := f.move(ctx, "/files/", srcObj.id, leaf, directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -780,7 +903,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
@@ -796,14 +919,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
// find the root src directory
|
||||
err := srcFs.dirCache.FindRoot(false)
|
||||
err := srcFs.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// find the root dst directory
|
||||
if dstRemote != "" {
|
||||
err = f.dirCache.FindRoot(true)
|
||||
err = f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -819,14 +942,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
if dstRemote == "" {
|
||||
findPath = f.root
|
||||
}
|
||||
leaf, directoryID, err = f.dirCache.FindPath(findPath, true)
|
||||
leaf, directoryID, err = f.dirCache.FindPath(ctx, findPath, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check destination does not exist
|
||||
if dstRemote != "" {
|
||||
_, err = f.dirCache.FindDir(dstRemote, false)
|
||||
_, err = f.dirCache.FindDir(ctx, dstRemote, false)
|
||||
if err == fs.ErrorDirNotFound {
|
||||
// OK
|
||||
} else if err != nil {
|
||||
@@ -837,13 +960,13 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
// Find ID of src
|
||||
srcID, err := srcFs.dirCache.FindDir(srcRemote, false)
|
||||
srcID, err := srcFs.dirCache.FindDir(ctx, srcRemote, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
_, err = f.move("/folders/", srcID, leaf, directoryID)
|
||||
_, err = f.move(ctx, "/folders/", srcID, leaf, directoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -852,8 +975,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
||||
id, err := f.dirCache.FindDir(remote, false)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
id, err := f.dirCache.FindDir(ctx, remote, false)
|
||||
var opts rest.Opts
|
||||
if err == nil {
|
||||
fs.Debugf(f, "attempting to share directory '%s'", remote)
|
||||
@@ -865,7 +988,7 @@ func (f *Fs) PublicLink(remote string) (string, error) {
|
||||
}
|
||||
} else {
|
||||
fs.Debugf(f, "attempting to share single file '%s'", remote)
|
||||
o, err := f.NewObject(remote)
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -885,7 +1008,7 @@ func (f *Fs) PublicLink(remote string) (string, error) {
|
||||
var info api.Item
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.srv.CallJSON(&opts, &shareLink, &info)
|
||||
resp, err = f.srv.CallJSON(ctx, &opts, &shareLink, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info.SharedLink.URL, err
|
||||
@@ -922,13 +1045,8 @@ func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// srvPath returns a path for use in server
|
||||
func (o *Object) srvPath() string {
|
||||
return replaceReservedChars(o.fs.rootSlash() + o.remote)
|
||||
}
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.SHA1 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -937,7 +1055,7 @@ func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
|
||||
// Size returns the size of an object in bytes
|
||||
func (o *Object) Size() int64 {
|
||||
err := o.readMetaData()
|
||||
err := o.readMetaData(context.TODO())
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return 0
|
||||
@@ -962,11 +1080,11 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if o.hasMetaData {
|
||||
return nil
|
||||
}
|
||||
info, err := o.fs.readMetaDataForPath(o.remote)
|
||||
info, err := o.fs.readMetaDataForPath(ctx, o.remote)
|
||||
if err != nil {
|
||||
if apiErr, ok := err.(*api.Error); ok {
|
||||
if apiErr.Code == "not_found" || apiErr.Code == "trashed" {
|
||||
@@ -983,8 +1101,8 @@ func (o *Object) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -993,7 +1111,7 @@ func (o *Object) ModTime() time.Time {
|
||||
}
|
||||
|
||||
// setModTime sets the modification time of the local fs object
|
||||
func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
func (o *Object) setModTime(ctx context.Context, modTime time.Time) (*api.Item, error) {
|
||||
opts := rest.Opts{
|
||||
Method: "PUT",
|
||||
Path: "/files/" + o.id,
|
||||
@@ -1004,15 +1122,15 @@ func (o *Object) setModTime(modTime time.Time) (*api.Item, error) {
|
||||
}
|
||||
var info *api.Item
|
||||
err := o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err := o.fs.srv.CallJSON(&opts, &update, &info)
|
||||
resp, err := o.fs.srv.CallJSON(ctx, &opts, &update, &info)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return info, err
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
info, err := o.setModTime(modTime)
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
info, err := o.setModTime(ctx, modTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1025,7 +1143,7 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
if o.id == "" {
|
||||
return nil, errors.New("can't download - no id")
|
||||
}
|
||||
@@ -1037,7 +1155,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
Options: options,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1049,9 +1167,9 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// upload does a single non-multipart upload
|
||||
//
|
||||
// This is recommended for less than 50 MB of content
|
||||
func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
|
||||
func (o *Object) upload(ctx context.Context, in io.Reader, leaf, directoryID string, modTime time.Time) (err error) {
|
||||
upload := api.UploadFile{
|
||||
Name: replaceReservedChars(leaf),
|
||||
Name: o.fs.opt.Enc.FromStandardName(leaf),
|
||||
ContentModifiedAt: api.Time(modTime),
|
||||
ContentCreatedAt: api.Time(modTime),
|
||||
Parent: api.Parent{
|
||||
@@ -1076,7 +1194,7 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
|
||||
opts.Path = "/files/content"
|
||||
}
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &upload, &result)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &upload, &result)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1093,32 +1211,32 @@ func (o *Object) upload(in io.Reader, leaf, directoryID string, modTime time.Tim
|
||||
// If existing is set then it updates the object rather than creating a new one
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
o.fs.tokenRenewer.Start()
|
||||
defer o.fs.tokenRenewer.Stop()
|
||||
|
||||
size := src.Size()
|
||||
modTime := src.ModTime()
|
||||
modTime := src.ModTime(ctx)
|
||||
remote := o.Remote()
|
||||
|
||||
// Create the directory for the object if it doesn't exist
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(remote, true)
|
||||
leaf, directoryID, err := o.fs.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Upload with simple or multipart
|
||||
if size <= int64(o.fs.opt.UploadCutoff) {
|
||||
err = o.upload(in, leaf, directoryID, modTime)
|
||||
err = o.upload(ctx, in, leaf, directoryID, modTime)
|
||||
} else {
|
||||
err = o.uploadMultipart(in, leaf, directoryID, size, modTime)
|
||||
err = o.uploadMultipart(ctx, in, leaf, directoryID, size, modTime)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() error {
|
||||
return o.fs.deleteObject(o.id)
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.deleteObject(ctx, o.id)
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
|
||||
@@ -4,8 +4,8 @@ package box_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/box"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/box"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
@@ -4,6 +4,7 @@ package box
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
@@ -14,15 +15,15 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/box/api"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/box/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// createUploadSession creates an upload session for the object
|
||||
func (o *Object) createUploadSession(leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
|
||||
func (o *Object) createUploadSession(ctx context.Context, leaf, directoryID string, size int64) (response *api.UploadSessionResponse, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions",
|
||||
@@ -37,11 +38,11 @@ func (o *Object) createUploadSession(leaf, directoryID string, size int64) (resp
|
||||
} else {
|
||||
opts.Path = "/files/upload_sessions"
|
||||
request.FolderID = directoryID
|
||||
request.FileName = replaceReservedChars(leaf)
|
||||
request.FileName = o.fs.opt.Enc.FromStandardName(leaf)
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, &response)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return
|
||||
@@ -53,7 +54,7 @@ func sha1Digest(digest []byte) string {
|
||||
}
|
||||
|
||||
// uploadPart uploads a part in an upload session
|
||||
func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
||||
func (o *Object) uploadPart(ctx context.Context, SessionID string, offset, totalSize int64, chunk []byte, wrap accounting.WrapFn) (response *api.UploadPartResponse, err error) {
|
||||
chunkSize := int64(len(chunk))
|
||||
sha1sum := sha1.Sum(chunk)
|
||||
opts := rest.Opts{
|
||||
@@ -70,7 +71,7 @@ func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []b
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
opts.Body = wrap(bytes.NewReader(chunk))
|
||||
resp, err = o.fs.srv.CallJSON(&opts, nil, &response)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, nil, &response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -80,7 +81,7 @@ func (o *Object) uploadPart(SessionID string, offset, totalSize int64, chunk []b
|
||||
}
|
||||
|
||||
// commitUpload finishes an upload session
|
||||
func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
|
||||
func (o *Object) commitUpload(ctx context.Context, SessionID string, parts []api.Part, modTime time.Time, sha1sum []byte) (result *api.FolderItems, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/files/upload_sessions/" + SessionID + "/commit",
|
||||
@@ -97,14 +98,14 @@ func (o *Object) commitUpload(SessionID string, parts []api.Part, modTime time.T
|
||||
var body []byte
|
||||
var resp *http.Response
|
||||
// For discussion of this value see:
|
||||
// https://github.com/ncw/rclone/issues/2054
|
||||
// https://github.com/rclone/rclone/issues/2054
|
||||
maxTries := o.fs.opt.CommitRetries
|
||||
const defaultDelay = 10
|
||||
var tries int
|
||||
outer:
|
||||
for tries = 0; tries < maxTries; tries++ {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.CallJSON(&opts, &request, nil)
|
||||
resp, err = o.fs.srv.CallJSON(ctx, &opts, &request, nil)
|
||||
if err != nil {
|
||||
return shouldRetry(resp, err)
|
||||
}
|
||||
@@ -112,7 +113,7 @@ outer:
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
delay := defaultDelay
|
||||
why := "unknown"
|
||||
var why string
|
||||
if err != nil {
|
||||
// Sometimes we get 400 Error with
|
||||
// parts_mismatch immediately after uploading
|
||||
@@ -154,7 +155,7 @@ outer:
|
||||
}
|
||||
|
||||
// abortUpload cancels an upload session
|
||||
func (o *Object) abortUpload(SessionID string) (err error) {
|
||||
func (o *Object) abortUpload(ctx context.Context, SessionID string) (err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "DELETE",
|
||||
Path: "/files/upload_sessions/" + SessionID,
|
||||
@@ -163,16 +164,16 @@ func (o *Object) abortUpload(SessionID string) (err error) {
|
||||
}
|
||||
var resp *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.srv.Call(&opts)
|
||||
resp, err = o.fs.srv.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// uploadMultipart uploads a file using multipart upload
|
||||
func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||
func (o *Object) uploadMultipart(ctx context.Context, in io.Reader, leaf, directoryID string, size int64, modTime time.Time) (err error) {
|
||||
// Create upload session
|
||||
session, err := o.createUploadSession(leaf, directoryID, size)
|
||||
session, err := o.createUploadSession(ctx, leaf, directoryID, size)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload create session failed")
|
||||
}
|
||||
@@ -183,7 +184,7 @@ func (o *Object) uploadMultipart(in io.Reader, leaf, directoryID string, size in
|
||||
defer func() {
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Cancelling multipart upload: %v", err)
|
||||
cancelErr := o.abortUpload(session.ID)
|
||||
cancelErr := o.abortUpload(ctx, session.ID)
|
||||
if cancelErr != nil {
|
||||
fs.Logf(o, "Failed to cancel multipart upload: %v", err)
|
||||
}
|
||||
@@ -235,7 +236,7 @@ outer:
|
||||
defer wg.Done()
|
||||
defer o.fs.uploadToken.Put()
|
||||
fs.Debugf(o, "Uploading part %d/%d offset %v/%v part size %v", part+1, session.TotalParts, fs.SizeSuffix(position), fs.SizeSuffix(size), fs.SizeSuffix(chunkSize))
|
||||
partResponse, err := o.uploadPart(session.ID, position, size, buf, wrap)
|
||||
partResponse, err := o.uploadPart(ctx, session.ID, position, size, buf, wrap)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "multipart upload failed to upload part")
|
||||
select {
|
||||
@@ -263,7 +264,7 @@ outer:
|
||||
}
|
||||
|
||||
// Finalise the upload session
|
||||
result, err := o.commitUpload(session.ID, parts, modTime, hash.Sum(nil))
|
||||
result, err := o.commitUpload(ctx, session.ID, parts, modTime, hash.Sum(nil))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "multipart upload failed to finalize")
|
||||
}
|
||||
|
||||
185
backend/cache/cache.go
vendored
185
backend/cache/cache.go
vendored
@@ -18,18 +18,19 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/atexit"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
@@ -481,7 +482,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrapf(err, "failed to create cache directory %v", f.opt.TempWritePath)
|
||||
}
|
||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||
f.tempFs, err = fs.NewFs(f.opt.TempWritePath)
|
||||
f.tempFs, err = cache.Get(f.opt.TempWritePath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to create temp fs: %v", err)
|
||||
}
|
||||
@@ -508,7 +509,7 @@ func NewFs(name, rootPath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if doChangeNotify := wrappedFs.Features().ChangeNotify; doChangeNotify != nil {
|
||||
pollInterval := make(chan time.Duration, 1)
|
||||
pollInterval <- time.Duration(f.opt.ChunkCleanInterval)
|
||||
doChangeNotify(f.receiveChangeNotify, pollInterval)
|
||||
doChangeNotify(context.Background(), f.receiveChangeNotify, pollInterval)
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
@@ -599,7 +600,7 @@ is used on top of the cache.
|
||||
return f, fsErr
|
||||
}
|
||||
|
||||
func (f *Fs) httpStats(in rc.Params) (out rc.Params, err error) {
|
||||
func (f *Fs) httpStats(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
out = make(rc.Params)
|
||||
m, err := f.Stats()
|
||||
if err != nil {
|
||||
@@ -626,7 +627,7 @@ func (f *Fs) unwrapRemote(remote string) string {
|
||||
return remote
|
||||
}
|
||||
|
||||
func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
||||
func (f *Fs) httpExpireRemote(ctx context.Context, in rc.Params) (out rc.Params, err error) {
|
||||
out = make(rc.Params)
|
||||
remoteInt, ok := in["remote"]
|
||||
if !ok {
|
||||
@@ -671,7 +672,7 @@ func (f *Fs) httpExpireRemote(in rc.Params) (out rc.Params, err error) {
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
|
||||
func (f *Fs) rcFetch(ctx context.Context, in rc.Params) (rc.Params, error) {
|
||||
type chunkRange struct {
|
||||
start, end int64
|
||||
}
|
||||
@@ -776,18 +777,18 @@ func (f *Fs) rcFetch(in rc.Params) (rc.Params, error) {
|
||||
for _, pair := range files {
|
||||
file, remote := pair[0], pair[1]
|
||||
var status fileStatus
|
||||
o, err := f.NewObject(remote)
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
fetchedChunks[file] = fileStatus{Error: err.Error()}
|
||||
continue
|
||||
}
|
||||
co := o.(*Object)
|
||||
err = co.refreshFromSource(true)
|
||||
err = co.refreshFromSource(ctx, true)
|
||||
if err != nil {
|
||||
fetchedChunks[file] = fileStatus{Error: err.Error()}
|
||||
continue
|
||||
}
|
||||
handle := NewObjectHandle(co, f)
|
||||
handle := NewObjectHandle(ctx, co, f)
|
||||
handle.UseMemory = false
|
||||
handle.scaleWorkers(1)
|
||||
walkChunkRanges(crs, co.Size(), func(chunk int64) {
|
||||
@@ -873,7 +874,7 @@ func (f *Fs) notifyChangeUpstream(remote string, entryType fs.EntryType) {
|
||||
// ChangeNotify can subscribe multiple callers
|
||||
// this is coupled with the wrapped fs ChangeNotify (if it supports it)
|
||||
// and also notifies other caches (i.e VFS) to clear out whenever something changes
|
||||
func (f *Fs) ChangeNotify(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
f.parentsForgetMu.Lock()
|
||||
defer f.parentsForgetMu.Unlock()
|
||||
fs.Debugf(f, "subscribing to ChangeNotify")
|
||||
@@ -920,7 +921,7 @@ func (f *Fs) TempUploadWaitTime() time.Duration {
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
var err error
|
||||
|
||||
fs.Debugf(f, "new object '%s'", remote)
|
||||
@@ -939,16 +940,16 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
// search for entry in source or temp fs
|
||||
var obj fs.Object
|
||||
if f.opt.TempWritePath != "" {
|
||||
obj, err = f.tempFs.NewObject(remote)
|
||||
obj, err = f.tempFs.NewObject(ctx, remote)
|
||||
// not found in temp fs
|
||||
if err != nil {
|
||||
fs.Debugf(remote, "find: not found in local cache fs")
|
||||
obj, err = f.Fs.NewObject(remote)
|
||||
obj, err = f.Fs.NewObject(ctx, remote)
|
||||
} else {
|
||||
fs.Debugf(obj, "find: found in local cache fs")
|
||||
}
|
||||
} else {
|
||||
obj, err = f.Fs.NewObject(remote)
|
||||
obj, err = f.Fs.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// not found in either fs
|
||||
@@ -958,13 +959,13 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// cache the new entry
|
||||
co = ObjectFromOriginal(f, obj).persist()
|
||||
co = ObjectFromOriginal(ctx, f, obj).persist()
|
||||
fs.Debugf(co, "find: cached object")
|
||||
return co, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(f, "list '%s'", dir)
|
||||
cd := ShallowDirectory(f, dir)
|
||||
|
||||
@@ -994,12 +995,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
fs.Debugf(dir, "list: temp fs entries: %v", queuedEntries)
|
||||
|
||||
for _, queuedRemote := range queuedEntries {
|
||||
queuedEntry, err := f.tempFs.NewObject(f.cleanRootFromPath(queuedRemote))
|
||||
queuedEntry, err := f.tempFs.NewObject(ctx, f.cleanRootFromPath(queuedRemote))
|
||||
if err != nil {
|
||||
fs.Debugf(dir, "list: temp file not found in local fs: %v", err)
|
||||
continue
|
||||
}
|
||||
co := ObjectFromOriginal(f, queuedEntry).persist()
|
||||
co := ObjectFromOriginal(ctx, f, queuedEntry).persist()
|
||||
fs.Debugf(co, "list: cached temp object")
|
||||
cachedEntries = append(cachedEntries, co)
|
||||
}
|
||||
@@ -1007,7 +1008,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
|
||||
// search from the source
|
||||
sourceEntries, err := f.Fs.List(dir)
|
||||
sourceEntries, err := f.Fs.List(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1045,11 +1046,11 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if i < tmpCnt && cachedEntries[i].Remote() == oRemote {
|
||||
continue
|
||||
}
|
||||
co := ObjectFromOriginal(f, o).persist()
|
||||
co := ObjectFromOriginal(ctx, f, o).persist()
|
||||
cachedEntries = append(cachedEntries, co)
|
||||
fs.Debugf(dir, "list: cached object: %v", co)
|
||||
case fs.Directory:
|
||||
cdd := DirectoryFromOriginal(f, o)
|
||||
cdd := DirectoryFromOriginal(ctx, f, o)
|
||||
// check if the dir isn't expired and add it in cache if it isn't
|
||||
if cdd2, err := f.cache.GetDir(cdd.abs()); err != nil || time.Now().Before(cdd2.CacheTs.Add(time.Duration(f.opt.InfoAge))) {
|
||||
batchDirectories = append(batchDirectories, cdd)
|
||||
@@ -1079,8 +1080,8 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
return cachedEntries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
|
||||
entries, err := f.List(dir)
|
||||
func (f *Fs) recurse(ctx context.Context, dir string, list *walk.ListRHelper) error {
|
||||
entries, err := f.List(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1088,7 +1089,7 @@ func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
|
||||
for i := 0; i < len(entries); i++ {
|
||||
innerDir, ok := entries[i].(fs.Directory)
|
||||
if ok {
|
||||
err := f.recurse(innerDir.Remote(), list)
|
||||
err := f.recurse(ctx, innerDir.Remote(), list)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1105,21 +1106,21 @@ func (f *Fs) recurse(dir string, list *walk.ListRHelper) error {
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
// from dir recursively into out.
|
||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
fs.Debugf(f, "list recursively from '%s'", dir)
|
||||
|
||||
// we check if the source FS supports ListR
|
||||
// if it does, we'll use that to get all the entries, cache them and return
|
||||
do := f.Fs.Features().ListR
|
||||
if do != nil {
|
||||
return do(dir, func(entries fs.DirEntries) error {
|
||||
return do(ctx, dir, func(entries fs.DirEntries) error {
|
||||
// we got called back with a set of entries so let's cache them and call the original callback
|
||||
for _, entry := range entries {
|
||||
switch o := entry.(type) {
|
||||
case fs.Object:
|
||||
_ = f.cache.AddObject(ObjectFromOriginal(f, o))
|
||||
_ = f.cache.AddObject(ObjectFromOriginal(ctx, f, o))
|
||||
case fs.Directory:
|
||||
_ = f.cache.AddDir(DirectoryFromOriginal(f, o))
|
||||
_ = f.cache.AddDir(DirectoryFromOriginal(ctx, f, o))
|
||||
default:
|
||||
return errors.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
@@ -1132,7 +1133,7 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
|
||||
// if we're here, we're gonna do a standard recursive traversal and cache everything
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.recurse(dir, list)
|
||||
err = f.recurse(ctx, dir, list)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1141,9 +1142,9 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
fs.Debugf(f, "mkdir '%s'", dir)
|
||||
err := f.Fs.Mkdir(dir)
|
||||
err := f.Fs.Mkdir(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1171,7 +1172,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
fs.Debugf(f, "rmdir '%s'", dir)
|
||||
|
||||
if f.opt.TempWritePath != "" {
|
||||
@@ -1181,9 +1182,9 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
|
||||
// we check if the source exists on the remote and make the same move on it too if it does
|
||||
// otherwise, we skip this step
|
||||
_, err := f.UnWrap().List(dir)
|
||||
_, err := f.UnWrap().List(ctx, dir)
|
||||
if err == nil {
|
||||
err := f.Fs.Rmdir(dir)
|
||||
err := f.Fs.Rmdir(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1191,10 +1192,10 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
}
|
||||
|
||||
var queuedEntries []*Object
|
||||
err = walk.ListR(f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
err = walk.ListR(ctx, f.tempFs, dir, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
for _, o := range entries {
|
||||
if oo, ok := o.(fs.Object); ok {
|
||||
co := ObjectFromOriginal(f, oo)
|
||||
co := ObjectFromOriginal(ctx, f, oo)
|
||||
queuedEntries = append(queuedEntries, co)
|
||||
}
|
||||
}
|
||||
@@ -1211,7 +1212,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := f.Fs.Rmdir(dir)
|
||||
err := f.Fs.Rmdir(ctx, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1242,7 +1243,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server side move operations.
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
fs.Debugf(f, "move dir '%s'/'%s' -> '%s'/'%s'", src.Root(), srcRemote, f.Root(), dstRemote)
|
||||
|
||||
do := f.Fs.Features().DirMove
|
||||
@@ -1264,8 +1265,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
f.backgroundRunner.pause()
|
||||
defer f.backgroundRunner.play()
|
||||
|
||||
_, errInWrap := srcFs.UnWrap().List(srcRemote)
|
||||
_, errInTemp := f.tempFs.List(srcRemote)
|
||||
_, errInWrap := srcFs.UnWrap().List(ctx, srcRemote)
|
||||
_, errInTemp := f.tempFs.List(ctx, srcRemote)
|
||||
// not found in either fs
|
||||
if errInWrap != nil && errInTemp != nil {
|
||||
return fs.ErrorDirNotFound
|
||||
@@ -1274,7 +1275,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
// we check if the source exists on the remote and make the same move on it too if it does
|
||||
// otherwise, we skip this step
|
||||
if errInWrap == nil {
|
||||
err := do(srcFs.UnWrap(), srcRemote, dstRemote)
|
||||
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1287,10 +1288,10 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
var queuedEntries []*Object
|
||||
err := walk.ListR(f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
err := walk.ListR(ctx, f.tempFs, srcRemote, true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
for _, o := range entries {
|
||||
if oo, ok := o.(fs.Object); ok {
|
||||
co := ObjectFromOriginal(f, oo)
|
||||
co := ObjectFromOriginal(ctx, f, oo)
|
||||
queuedEntries = append(queuedEntries, co)
|
||||
if co.tempFileStartedUpload() {
|
||||
fs.Errorf(co, "can't move - upload has already started. need to finish that")
|
||||
@@ -1311,16 +1312,16 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
fs.Errorf(srcRemote, "dirmove: can't move dir in temp fs")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
err = do(f.tempFs, srcRemote, dstRemote)
|
||||
err = do(ctx, f.tempFs, srcRemote, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = f.cache.ReconcileTempUploads(f)
|
||||
err = f.cache.ReconcileTempUploads(ctx, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
err := do(srcFs.UnWrap(), srcRemote, dstRemote)
|
||||
err := do(ctx, srcFs.UnWrap(), srcRemote, dstRemote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1426,10 +1427,10 @@ func (f *Fs) cacheReader(u io.Reader, src fs.ObjectInfo, originalRead func(inn i
|
||||
}
|
||||
}
|
||||
|
||||
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||
|
||||
// put in to the remote path
|
||||
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
var err error
|
||||
var obj fs.Object
|
||||
|
||||
@@ -1440,7 +1441,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
_ = f.cache.ExpireDir(parentCd)
|
||||
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
||||
|
||||
obj, err = f.tempFs.Put(in, src, options...)
|
||||
obj, err = f.tempFs.Put(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
fs.Errorf(obj, "put: failed to upload in temp fs: %v", err)
|
||||
return nil, err
|
||||
@@ -1455,14 +1456,14 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
// if cache writes is enabled write it first through cache
|
||||
} else if f.opt.StoreWrites {
|
||||
f.cacheReader(in, src, func(inn io.Reader) {
|
||||
obj, err = put(inn, src, options...)
|
||||
obj, err = put(ctx, inn, src, options...)
|
||||
})
|
||||
if err == nil {
|
||||
fs.Debugf(obj, "put: uploaded to remote fs and saved in cache")
|
||||
}
|
||||
// last option: save it directly in remote fs
|
||||
} else {
|
||||
obj, err = put(in, src, options...)
|
||||
obj, err = put(ctx, in, src, options...)
|
||||
if err == nil {
|
||||
fs.Debugf(obj, "put: uploaded to remote fs")
|
||||
}
|
||||
@@ -1474,7 +1475,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
}
|
||||
|
||||
// cache the new file
|
||||
cachedObj := ObjectFromOriginal(f, obj)
|
||||
cachedObj := ObjectFromOriginal(ctx, f, obj)
|
||||
|
||||
// deleting cached chunks and info to be replaced with new ones
|
||||
_ = f.cache.RemoveObject(cachedObj.abs())
|
||||
@@ -1497,33 +1498,33 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
fs.Debugf(f, "put data at '%s'", src.Remote())
|
||||
return f.put(in, src, options, f.Fs.Put)
|
||||
return f.put(ctx, in, src, options, f.Fs.Put)
|
||||
}
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
do := f.Fs.Features().PutUnchecked
|
||||
if do == nil {
|
||||
return nil, errors.New("can't PutUnchecked")
|
||||
}
|
||||
fs.Debugf(f, "put data unchecked in '%s'", src.Remote())
|
||||
return f.put(in, src, options, do)
|
||||
return f.put(ctx, in, src, options, do)
|
||||
}
|
||||
|
||||
// PutStream uploads the object
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
do := f.Fs.Features().PutStream
|
||||
if do == nil {
|
||||
return nil, errors.New("can't PutStream")
|
||||
}
|
||||
fs.Debugf(f, "put data streaming in '%s'", src.Remote())
|
||||
return f.put(in, src, options, do)
|
||||
return f.put(ctx, in, src, options, do)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(f, "copy obj '%s' -> '%s'", src, remote)
|
||||
|
||||
do := f.Fs.Features().Copy
|
||||
@@ -1543,7 +1544,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
// refresh from source or abort
|
||||
if err := srcObj.refreshFromSource(false); err != nil {
|
||||
if err := srcObj.refreshFromSource(ctx, false); err != nil {
|
||||
fs.Errorf(f, "can't copy %v - %v", src, err)
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
@@ -1562,7 +1563,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
}
|
||||
|
||||
obj, err := do(srcObj.Object, remote)
|
||||
obj, err := do(ctx, srcObj.Object, remote)
|
||||
if err != nil {
|
||||
fs.Errorf(srcObj, "error moving in cache: %v", err)
|
||||
return nil, err
|
||||
@@ -1570,7 +1571,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(obj, "copy: file copied")
|
||||
|
||||
// persist new
|
||||
co := ObjectFromOriginal(f, obj).persist()
|
||||
co := ObjectFromOriginal(ctx, f, obj).persist()
|
||||
fs.Debugf(co, "copy: added to cache")
|
||||
// expire the destination path
|
||||
parentCd := NewDirectory(f, cleanPath(path.Dir(co.Remote())))
|
||||
@@ -1597,7 +1598,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Move src to this remote using server side move operations.
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(f, "moving obj '%s' -> %s", src, remote)
|
||||
|
||||
// if source fs doesn't support move abort
|
||||
@@ -1618,7 +1619,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
// refresh from source or abort
|
||||
if err := srcObj.refreshFromSource(false); err != nil {
|
||||
if err := srcObj.refreshFromSource(ctx, false); err != nil {
|
||||
fs.Errorf(f, "can't move %v - %v", src, err)
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
@@ -1654,7 +1655,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(srcObj, "move: queued file moved to %v", remote)
|
||||
}
|
||||
|
||||
obj, err := do(srcObj.Object, remote)
|
||||
obj, err := do(ctx, srcObj.Object, remote)
|
||||
if err != nil {
|
||||
fs.Errorf(srcObj, "error moving: %v", err)
|
||||
return nil, err
|
||||
@@ -1679,7 +1680,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// advertise to ChangeNotify if wrapped doesn't do that
|
||||
f.notifyChangeUpstreamIfNeeded(parentCd.Remote(), fs.EntryDirectory)
|
||||
// persist new
|
||||
cachedObj := ObjectFromOriginal(f, obj).persist()
|
||||
cachedObj := ObjectFromOriginal(ctx, f, obj).persist()
|
||||
fs.Debugf(cachedObj, "move: added to cache")
|
||||
// expire new parent
|
||||
parentCd = NewDirectory(f, cleanPath(path.Dir(cachedObj.Remote())))
|
||||
@@ -1701,7 +1702,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
func (f *Fs) Purge() error {
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
fs.Infof(f, "purging cache")
|
||||
f.cache.Purge()
|
||||
|
||||
@@ -1710,7 +1711,7 @@ func (f *Fs) Purge() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := do()
|
||||
err := do(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1719,7 +1720,7 @@ func (f *Fs) Purge() error {
|
||||
}
|
||||
|
||||
// CleanUp the trash in the Fs
|
||||
func (f *Fs) CleanUp() error {
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
f.CleanUpCache(false)
|
||||
|
||||
do := f.Fs.Features().CleanUp
|
||||
@@ -1727,16 +1728,16 @@ func (f *Fs) CleanUp() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return do()
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
}
|
||||
return do()
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Stats returns stats about the cache storage
|
||||
@@ -1863,6 +1864,24 @@ func cleanPath(p string) string {
|
||||
return p
|
||||
}
|
||||
|
||||
// UserInfo returns info about the connected user
|
||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||
do := f.Fs.Features().UserInfo
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Disconnect the current user
|
||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
do := f.Fs.Features().Disconnect
|
||||
if do == nil {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -1878,4 +1897,6 @@ var (
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
)
|
||||
|
||||
185
backend/cache/cache_internal_test.go
vendored
185
backend/cache/cache_internal_test.go
vendored
@@ -1,9 +1,11 @@
|
||||
// +build !plan9
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
@@ -16,24 +18,24 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/object"
|
||||
"github.com/ncw/rclone/fs/rc"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/vfs"
|
||||
"github.com/ncw/rclone/vfs/vfsflags"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/cache"
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
"github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/vfs"
|
||||
"github.com/rclone/rclone/vfs/vfsflags"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -120,7 +122,7 @@ func TestInternalListRootAndInnerRemotes(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
listRootInner, err := runInstance.list(t, rootFs, innerFolder)
|
||||
require.NoError(t, err)
|
||||
listInner, err := rootFs2.List("")
|
||||
listInner, err := rootFs2.List(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, listRoot, 1)
|
||||
@@ -138,10 +140,10 @@ func TestInternalVfsCache(t *testing.T) {
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, true, true, nil, map[string]string{"writes": "true", "info_age": "1h"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
err := rootFs.Mkdir(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
runInstance.writeObjectString(t, rootFs, "test/second", "content")
|
||||
_, err = rootFs.List("test")
|
||||
_, err = rootFs.List(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
|
||||
testReader := runInstance.randomReader(t, testSize)
|
||||
@@ -266,7 +268,7 @@ func TestInternalObjNotFound(t *testing.T) {
|
||||
rootFs, boltDb := runInstance.newCacheFs(t, remoteName, id, false, true, nil, nil)
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
obj, err := rootFs.NewObject("404")
|
||||
obj, err := rootFs.NewObject(context.Background(), "404")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, obj)
|
||||
}
|
||||
@@ -354,8 +356,8 @@ func TestInternalCachedUpdatedContentMatches(t *testing.T) {
|
||||
testData2, err = base64.StdEncoding.DecodeString(cryptedText2Base64)
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
testData1 = []byte(fstest.RandomString(100))
|
||||
testData2 = []byte(fstest.RandomString(200))
|
||||
testData1 = []byte(random.String(100))
|
||||
testData2 = []byte(random.String(200))
|
||||
}
|
||||
|
||||
// write the object
|
||||
@@ -445,7 +447,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
log.Printf("original size: %v", originalSize)
|
||||
|
||||
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
expectedSize := int64(len([]byte("test content")))
|
||||
var data2 []byte
|
||||
@@ -457,7 +459,7 @@ func TestInternalWrappedFsChangeNotSeen(t *testing.T) {
|
||||
data2 = []byte("test content")
|
||||
}
|
||||
objInfo := object.NewStaticObjectInfo(runInstance.encryptRemoteIfNeeded(t, "data.bin"), time.Now(), int64(len(data2)), true, nil, cfs.UnWrap())
|
||||
err = o.Update(bytes.NewReader(data2), objInfo)
|
||||
err = o.Update(context.Background(), bytes.NewReader(data2), objInfo)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(data2)), o.Size())
|
||||
log.Printf("updated size: %v", len(data2))
|
||||
@@ -503,9 +505,9 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
} else {
|
||||
testData = []byte("test content")
|
||||
}
|
||||
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test"))
|
||||
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/one"))
|
||||
_ = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/second"))
|
||||
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test"))
|
||||
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/one"))
|
||||
_ = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/second"))
|
||||
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
||||
|
||||
// list in mount
|
||||
@@ -515,7 +517,7 @@ func TestInternalMoveWithNotify(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// move file
|
||||
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
|
||||
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = runInstance.retryBlock(func() error {
|
||||
@@ -589,9 +591,9 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
} else {
|
||||
testData = []byte("test content")
|
||||
}
|
||||
err = rootFs.Mkdir("test")
|
||||
err = rootFs.Mkdir(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("test/one")
|
||||
err = rootFs.Mkdir(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
srcObj := runInstance.writeObjectBytes(t, cfs.UnWrap(), srcName, testData)
|
||||
|
||||
@@ -608,7 +610,7 @@ func TestInternalNotifyCreatesEmptyParts(t *testing.T) {
|
||||
require.False(t, found)
|
||||
|
||||
// move file
|
||||
_, err = cfs.UnWrap().Features().Move(srcObj, dstName)
|
||||
_, err = cfs.UnWrap().Features().Move(context.Background(), srcObj, dstName)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = runInstance.retryBlock(func() error {
|
||||
@@ -670,23 +672,23 @@ func TestInternalChangeSeenAfterDirCacheFlush(t *testing.T) {
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
|
||||
// update in the wrapped fs
|
||||
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
wrappedTime := time.Now().Add(-1 * time.Hour)
|
||||
err = o.SetModTime(wrappedTime)
|
||||
err = o.SetModTime(context.Background(), wrappedTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new instance from the cache
|
||||
co, err := rootFs.NewObject("data.bin")
|
||||
co, err := rootFs.NewObject(context.Background(), "data.bin")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
||||
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
|
||||
|
||||
cfs.DirCacheFlush() // flush the cache
|
||||
|
||||
// get a new instance from the cache
|
||||
co, err = rootFs.NewObject("data.bin")
|
||||
co, err = rootFs.NewObject(context.Background(), "data.bin")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
||||
}
|
||||
|
||||
func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
@@ -713,19 +715,19 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
|
||||
// update in the wrapped fs
|
||||
o, err := cfs.UnWrap().NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
o, err := cfs.UnWrap().NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
wrappedTime := time.Now().Add(-1 * time.Hour)
|
||||
err = o.SetModTime(wrappedTime)
|
||||
err = o.SetModTime(context.Background(), wrappedTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a new instance from the cache
|
||||
co, err := rootFs.NewObject("data.bin")
|
||||
co, err := rootFs.NewObject(context.Background(), "data.bin")
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, o.ModTime().String(), co.ModTime().String())
|
||||
require.NotEqual(t, o.ModTime(context.Background()).String(), co.ModTime(context.Background()).String())
|
||||
|
||||
// Call the rc function
|
||||
m, err := cacheExpire.Fn(rc.Params{"remote": "data.bin"})
|
||||
m, err := cacheExpire.Fn(context.Background(), rc.Params{"remote": "data.bin"})
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
@@ -733,9 +735,9 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
require.Contains(t, m["message"], "cached file cleared")
|
||||
|
||||
// get a new instance from the cache
|
||||
co, err = rootFs.NewObject("data.bin")
|
||||
co, err = rootFs.NewObject(context.Background(), "data.bin")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime().Unix())
|
||||
require.Equal(t, wrappedTime.Unix(), co.ModTime(context.Background()).Unix())
|
||||
_, err = runInstance.list(t, rootFs, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -749,7 +751,7 @@ func TestInternalChangeSeenAfterRc(t *testing.T) {
|
||||
require.Len(t, li1, 1)
|
||||
|
||||
// Call the rc function
|
||||
m, err = cacheExpire.Fn(rc.Params{"remote": "/"})
|
||||
m, err = cacheExpire.Fn(context.Background(), rc.Params{"remote": "/"})
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, m, "status")
|
||||
require.Contains(t, m, "message")
|
||||
@@ -794,7 +796,7 @@ func TestInternalMaxChunkSizeRespected(t *testing.T) {
|
||||
// create some rand test data
|
||||
testData := randStringBytes(int(int64(totalChunks-1)*chunkSize + chunkSize/2))
|
||||
runInstance.writeRemoteBytes(t, rootFs, "data.bin", testData)
|
||||
o, err := cfs.NewObject(runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
o, err := cfs.NewObject(context.Background(), runInstance.encryptRemoteIfNeeded(t, "data.bin"))
|
||||
require.NoError(t, err)
|
||||
co, ok := o.(*cache.Object)
|
||||
require.True(t, ok)
|
||||
@@ -833,7 +835,7 @@ func TestInternalExpiredEntriesRemoved(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Len(t, l, 1)
|
||||
|
||||
err = cfs.UnWrap().Mkdir(runInstance.encryptRemoteIfNeeded(t, "test/third"))
|
||||
err = cfs.UnWrap().Mkdir(context.Background(), runInstance.encryptRemoteIfNeeded(t, "test/third"))
|
||||
require.NoError(t, err)
|
||||
|
||||
l, err = runInstance.list(t, rootFs, "test")
|
||||
@@ -868,14 +870,14 @@ func TestInternalBug2117(t *testing.T) {
|
||||
cfs, err := runInstance.getCacheFs(rootFs)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = cfs.UnWrap().Mkdir("test")
|
||||
err = cfs.UnWrap().Mkdir(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
for i := 1; i <= 4; i++ {
|
||||
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d", i))
|
||||
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d", i))
|
||||
require.NoError(t, err)
|
||||
|
||||
for j := 1; j <= 4; j++ {
|
||||
err = cfs.UnWrap().Mkdir(fmt.Sprintf("test/dir%d/dir%d", i, j))
|
||||
err = cfs.UnWrap().Mkdir(context.Background(), fmt.Sprintf("test/dir%d/dir%d", i, j))
|
||||
require.NoError(t, err)
|
||||
|
||||
runInstance.writeObjectString(t, cfs.UnWrap(), fmt.Sprintf("test/dir%d/dir%d/test.txt", i, j), "test")
|
||||
@@ -1080,10 +1082,10 @@ func (r *run) newCacheFs(t *testing.T, remote, id string, needRemote, purge bool
|
||||
}
|
||||
|
||||
if purge {
|
||||
_ = f.Features().Purge()
|
||||
_ = f.Features().Purge(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
err = f.Mkdir("")
|
||||
err = f.Mkdir(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
if r.useMount && !r.isMounted {
|
||||
r.mountFs(t, f)
|
||||
@@ -1097,7 +1099,7 @@ func (r *run) cleanupFs(t *testing.T, f fs.Fs, b *cache.Persistent) {
|
||||
r.unmountFs(t, f)
|
||||
}
|
||||
|
||||
err := f.Features().Purge()
|
||||
err := f.Features().Purge(context.Background())
|
||||
require.NoError(t, err)
|
||||
cfs, err := r.getCacheFs(f)
|
||||
require.NoError(t, err)
|
||||
@@ -1137,23 +1139,6 @@ func (r *run) randomReader(t *testing.T, size int64) io.ReadCloser {
|
||||
return f
|
||||
}
|
||||
|
||||
func (r *run) writeRemoteRandomBytes(t *testing.T, f fs.Fs, p string, size int64) string {
|
||||
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
|
||||
// create some rand test data
|
||||
testData := randStringBytes(int(size))
|
||||
|
||||
r.writeRemoteBytes(t, f, remote, testData)
|
||||
return remote
|
||||
}
|
||||
|
||||
func (r *run) writeObjectRandomBytes(t *testing.T, f fs.Fs, p string, size int64) fs.Object {
|
||||
remote := path.Join(p, strconv.Itoa(rand.Int())+".bin")
|
||||
// create some rand test data
|
||||
testData := randStringBytes(int(size))
|
||||
|
||||
return r.writeObjectBytes(t, f, remote, testData)
|
||||
}
|
||||
|
||||
func (r *run) writeRemoteString(t *testing.T, f fs.Fs, remote, content string) {
|
||||
r.writeRemoteBytes(t, f, remote, []byte(content))
|
||||
}
|
||||
@@ -1199,7 +1184,7 @@ func (r *run) writeRemoteReader(t *testing.T, f fs.Fs, remote string, in io.Read
|
||||
func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte) fs.Object {
|
||||
in := bytes.NewReader(data)
|
||||
_ = r.writeObjectReader(t, f, remote, in)
|
||||
o, err := f.NewObject(remote)
|
||||
o, err := f.NewObject(context.Background(), remote)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(len(data)), o.Size())
|
||||
return o
|
||||
@@ -1208,7 +1193,7 @@ func (r *run) writeObjectBytes(t *testing.T, f fs.Fs, remote string, data []byte
|
||||
func (r *run) writeObjectReader(t *testing.T, f fs.Fs, remote string, in io.Reader) fs.Object {
|
||||
modTime := time.Now()
|
||||
objInfo := object.NewStaticObjectInfo(remote, modTime, -1, true, nil, f)
|
||||
obj, err := f.Put(in, objInfo)
|
||||
obj, err := f.Put(context.Background(), in, objInfo)
|
||||
require.NoError(t, err)
|
||||
if r.useMount {
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
@@ -1228,18 +1213,18 @@ func (r *run) updateObjectRemote(t *testing.T, f fs.Fs, remote string, data1 []b
|
||||
err = ioutil.WriteFile(path.Join(r.mntDir, remote), data2, 0600)
|
||||
require.NoError(t, err)
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
obj, err = f.NewObject(remote)
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
} else {
|
||||
in1 := bytes.NewReader(data1)
|
||||
in2 := bytes.NewReader(data2)
|
||||
objInfo1 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data1)), true, nil, f)
|
||||
objInfo2 := object.NewStaticObjectInfo(remote, time.Now(), int64(len(data2)), true, nil, f)
|
||||
|
||||
obj, err = f.Put(in1, objInfo1)
|
||||
obj, err = f.Put(context.Background(), in1, objInfo1)
|
||||
require.NoError(t, err)
|
||||
obj, err = f.NewObject(remote)
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
require.NoError(t, err)
|
||||
err = obj.Update(in2, objInfo2)
|
||||
err = obj.Update(context.Background(), in2, objInfo2)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1268,7 +1253,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
||||
return checkSample, err
|
||||
}
|
||||
} else {
|
||||
co, err := f.NewObject(remote)
|
||||
co, err := f.NewObject(context.Background(), remote)
|
||||
if err != nil {
|
||||
return checkSample, err
|
||||
}
|
||||
@@ -1283,7 +1268,7 @@ func (r *run) readDataFromRemote(t *testing.T, f fs.Fs, remote string, offset, e
|
||||
func (r *run) readDataFromObj(t *testing.T, o fs.Object, offset, end int64, noLengthCheck bool) []byte {
|
||||
size := end - offset
|
||||
checkSample := make([]byte, size)
|
||||
reader, err := o.Open(&fs.SeekOption{Offset: offset})
|
||||
reader, err := o.Open(context.Background(), &fs.SeekOption{Offset: offset})
|
||||
require.NoError(t, err)
|
||||
totalRead, err := io.ReadFull(reader, checkSample)
|
||||
if (err == io.EOF || err == io.ErrUnexpectedEOF) && noLengthCheck {
|
||||
@@ -1300,7 +1285,7 @@ func (r *run) mkdir(t *testing.T, f fs.Fs, remote string) {
|
||||
if r.useMount {
|
||||
err = os.Mkdir(path.Join(r.mntDir, remote), 0700)
|
||||
} else {
|
||||
err = f.Mkdir(remote)
|
||||
err = f.Mkdir(context.Background(), remote)
|
||||
}
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@@ -1312,11 +1297,11 @@ func (r *run) rm(t *testing.T, f fs.Fs, remote string) error {
|
||||
err = os.Remove(path.Join(r.mntDir, remote))
|
||||
} else {
|
||||
var obj fs.Object
|
||||
obj, err = f.NewObject(remote)
|
||||
obj, err = f.NewObject(context.Background(), remote)
|
||||
if err != nil {
|
||||
err = f.Rmdir(remote)
|
||||
err = f.Rmdir(context.Background(), remote)
|
||||
} else {
|
||||
err = obj.Remove()
|
||||
err = obj.Remove(context.Background())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1334,7 +1319,7 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
|
||||
}
|
||||
} else {
|
||||
var list fs.DirEntries
|
||||
list, err = f.List(remote)
|
||||
list, err = f.List(context.Background(), remote)
|
||||
for _, ll := range list {
|
||||
l = append(l, ll)
|
||||
}
|
||||
@@ -1342,26 +1327,6 @@ func (r *run) list(t *testing.T, f fs.Fs, remote string) ([]interface{}, error)
|
||||
return l, err
|
||||
}
|
||||
|
||||
func (r *run) listPath(t *testing.T, f fs.Fs, remote string) []string {
|
||||
var err error
|
||||
var l []string
|
||||
if r.useMount {
|
||||
var list []os.FileInfo
|
||||
list, err = ioutil.ReadDir(path.Join(r.mntDir, remote))
|
||||
for _, ll := range list {
|
||||
l = append(l, ll.Name())
|
||||
}
|
||||
} else {
|
||||
var list fs.DirEntries
|
||||
list, err = f.List(remote)
|
||||
for _, ll := range list {
|
||||
l = append(l, ll.Remote())
|
||||
}
|
||||
}
|
||||
require.NoError(t, err)
|
||||
return l
|
||||
}
|
||||
|
||||
func (r *run) copyFile(t *testing.T, f fs.Fs, src, dst string) error {
|
||||
in, err := os.Open(src)
|
||||
if err != nil {
|
||||
@@ -1393,7 +1358,7 @@ func (r *run) dirMove(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
}
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else if rootFs.Features().DirMove != nil {
|
||||
err = rootFs.Features().DirMove(rootFs, src, dst)
|
||||
err = rootFs.Features().DirMove(context.Background(), rootFs, src, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1415,11 +1380,11 @@ func (r *run) move(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
}
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else if rootFs.Features().Move != nil {
|
||||
obj1, err := rootFs.NewObject(src)
|
||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = rootFs.Features().Move(obj1, dst)
|
||||
_, err = rootFs.Features().Move(context.Background(), obj1, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1441,11 +1406,11 @@ func (r *run) copy(t *testing.T, rootFs fs.Fs, src, dst string) error {
|
||||
}
|
||||
r.vfs.WaitForWriters(10 * time.Second)
|
||||
} else if rootFs.Features().Copy != nil {
|
||||
obj, err := rootFs.NewObject(src)
|
||||
obj, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = rootFs.Features().Copy(obj, dst)
|
||||
_, err = rootFs.Features().Copy(context.Background(), obj, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1467,11 +1432,11 @@ func (r *run) modTime(t *testing.T, rootFs fs.Fs, src string) (time.Time, error)
|
||||
}
|
||||
return fi.ModTime(), nil
|
||||
}
|
||||
obj1, err := rootFs.NewObject(src)
|
||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return obj1.ModTime(), nil
|
||||
return obj1.ModTime(context.Background()), nil
|
||||
}
|
||||
|
||||
func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
||||
@@ -1484,7 +1449,7 @@ func (r *run) size(t *testing.T, rootFs fs.Fs, src string) (int64, error) {
|
||||
}
|
||||
return fi.Size(), nil
|
||||
}
|
||||
obj1, err := rootFs.NewObject(src)
|
||||
obj1, err := rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return int64(0), err
|
||||
}
|
||||
@@ -1507,14 +1472,14 @@ func (r *run) updateData(t *testing.T, rootFs fs.Fs, src, data, append string) e
|
||||
_, err = f.WriteString(data + append)
|
||||
} else {
|
||||
var obj1 fs.Object
|
||||
obj1, err = rootFs.NewObject(src)
|
||||
obj1, err = rootFs.NewObject(context.Background(), src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data1 := []byte(data + append)
|
||||
r := bytes.NewReader(data1)
|
||||
objInfo1 := object.NewStaticObjectInfo(src, time.Now(), int64(len(data1)), true, nil, rootFs)
|
||||
err = obj1.Update(r, objInfo1)
|
||||
err = obj1.Update(context.Background(), r, objInfo1)
|
||||
}
|
||||
|
||||
return err
|
||||
|
||||
21
backend/cache/cache_mount_other_test.go
vendored
Normal file
21
backend/cache/cache_mount_other_test.go
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
// +build !linux !go1.13
|
||||
// +build !darwin !go1.13
|
||||
// +build !freebsd !go1.13
|
||||
// +build !windows
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
func (r *run) mountFs(t *testing.T, f fs.Fs) {
|
||||
panic("mountFs not defined for this platform")
|
||||
}
|
||||
|
||||
func (r *run) unmountFs(t *testing.T, f fs.Fs) {
|
||||
panic("unmountFs not defined for this platform")
|
||||
}
|
||||
9
backend/cache/cache_mount_unix_test.go
vendored
9
backend/cache/cache_mount_unix_test.go
vendored
@@ -1,4 +1,5 @@
|
||||
// +build !plan9,!windows
|
||||
// +build linux,go1.13 darwin,go1.13 freebsd,go1.13
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
@@ -9,9 +10,9 @@ import (
|
||||
|
||||
"bazil.org/fuse"
|
||||
fusefs "bazil.org/fuse/fs"
|
||||
"github.com/ncw/rclone/cmd/mount"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/rclone/rclone/cmd/mount"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
||||
7
backend/cache/cache_mount_windows_test.go
vendored
7
backend/cache/cache_mount_windows_test.go
vendored
@@ -1,4 +1,5 @@
|
||||
// +build windows
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
@@ -9,10 +10,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/billziss-gh/cgofuse/fuse"
|
||||
"github.com/ncw/rclone/cmd/cmount"
|
||||
"github.com/ncw/rclone/cmd/mountlib"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/cmd/cmount"
|
||||
"github.com/rclone/rclone/cmd/mountlib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
||||
14
backend/cache/cache_test.go
vendored
14
backend/cache/cache_test.go
vendored
@@ -1,21 +1,25 @@
|
||||
// Test Cache filesystem interface
|
||||
|
||||
// +build !plan9
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "MergeDirs", "OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
}
|
||||
|
||||
54
backend/cache/cache_upload_test.go
vendored
54
backend/cache/cache_upload_test.go
vendored
@@ -1,8 +1,10 @@
|
||||
// +build !plan9
|
||||
// +build !race
|
||||
|
||||
package cache_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
@@ -11,9 +13,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/cache"
|
||||
_ "github.com/ncw/rclone/backend/drive"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/rclone/rclone/backend/cache"
|
||||
_ "github.com/rclone/rclone/backend/drive"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -85,11 +87,11 @@ func TestInternalUploadMoveExistingFile(t *testing.T) {
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "3s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
err = rootFs.Mkdir(context.Background(), "one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
err = rootFs.Mkdir(context.Background(), "second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
@@ -122,11 +124,11 @@ func TestInternalUploadTempPathCleaned(t *testing.T) {
|
||||
map[string]string{"cache-tmp-upload-path": path.Join(runInstance.tmpUploadDir, id), "cache-tmp-wait-time": "5s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("one")
|
||||
err := rootFs.Mkdir(context.Background(), "one")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("one/test")
|
||||
err = rootFs.Mkdir(context.Background(), "one/test")
|
||||
require.NoError(t, err)
|
||||
err = rootFs.Mkdir("second")
|
||||
err = rootFs.Mkdir(context.Background(), "second")
|
||||
require.NoError(t, err)
|
||||
|
||||
// create some rand test data
|
||||
@@ -165,7 +167,7 @@ func TestInternalUploadQueueMoreFiles(t *testing.T) {
|
||||
map[string]string{"tmp_upload_path": path.Join(runInstance.tmpUploadDir, id), "tmp_wait_time": "1s"})
|
||||
defer runInstance.cleanupFs(t, rootFs, boltDb)
|
||||
|
||||
err := rootFs.Mkdir("test")
|
||||
err := rootFs.Mkdir(context.Background(), "test")
|
||||
require.NoError(t, err)
|
||||
minSize := 5242880
|
||||
maxSize := 10485760
|
||||
@@ -233,9 +235,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("second/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "second/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
@@ -256,7 +258,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "directory not empty")
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
@@ -270,9 +272,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/second")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/second", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
@@ -289,9 +291,9 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
@@ -306,7 +308,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
// test Remove -- allowed
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.Error(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
@@ -318,7 +320,7 @@ func TestInternalUploadTempFileOperations(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = runInstance.updateData(t, rootFs, "test/one", "one content", " updated")
|
||||
require.NoError(t, err)
|
||||
obj2, err := rootFs.NewObject("test/one")
|
||||
obj2, err := rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
data2 := runInstance.readDataFromObj(t, obj2, 0, int64(len("one content updated")), false)
|
||||
require.Equal(t, "one content updated", string(data2))
|
||||
@@ -366,7 +368,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
err = runInstance.dirMove(t, rootFs, "test", "second")
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
@@ -378,7 +380,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
// test Rmdir
|
||||
err = runInstance.rm(t, rootFs, "test")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
@@ -389,9 +391,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
if err != errNotSupported {
|
||||
require.Error(t, err)
|
||||
// try to read from it
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/second")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/second")
|
||||
require.Error(t, err)
|
||||
// validate that it exists in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
@@ -404,9 +406,9 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
err = runInstance.copy(t, rootFs, path.Join("test", "one"), path.Join("test", "third"))
|
||||
if err != errNotSupported {
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
_, err = rootFs.NewObject("test/third")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/third")
|
||||
require.NoError(t, err)
|
||||
data2, err := runInstance.readDataFromRemote(t, rootFs, "test/third", 0, int64(len([]byte("one content"))), false)
|
||||
require.NoError(t, err)
|
||||
@@ -421,7 +423,7 @@ func TestInternalUploadUploadingFileOperations(t *testing.T) {
|
||||
// test Remove
|
||||
err = runInstance.rm(t, rootFs, "test/one")
|
||||
require.Error(t, err)
|
||||
_, err = rootFs.NewObject("test/one")
|
||||
_, err = rootFs.NewObject(context.Background(), "test/one")
|
||||
require.NoError(t, err)
|
||||
// validate that it doesn't exist in temp fs
|
||||
_, err = os.Stat(path.Join(runInstance.tmpUploadDir, id, runInstance.encryptRemoteIfNeeded(t, "test/one")))
|
||||
|
||||
18
backend/cache/directory.go
vendored
18
backend/cache/directory.go
vendored
@@ -3,10 +3,11 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Directory is a generic dir that stores basic information about it
|
||||
@@ -55,7 +56,7 @@ func ShallowDirectory(f *Fs, remote string) *Directory {
|
||||
}
|
||||
|
||||
// DirectoryFromOriginal builds one from a generic fs.Directory
|
||||
func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
|
||||
func DirectoryFromOriginal(ctx context.Context, f *Fs, d fs.Directory) *Directory {
|
||||
var cd *Directory
|
||||
fullRemote := path.Join(f.Root(), d.Remote())
|
||||
|
||||
@@ -67,7 +68,7 @@ func DirectoryFromOriginal(f *Fs, d fs.Directory) *Directory {
|
||||
CacheFs: f,
|
||||
Name: name,
|
||||
Dir: dir,
|
||||
CacheModTime: d.ModTime().UnixNano(),
|
||||
CacheModTime: d.ModTime(ctx).UnixNano(),
|
||||
CacheSize: d.Size(),
|
||||
CacheItems: d.Items(),
|
||||
CacheType: "Directory",
|
||||
@@ -100,17 +101,8 @@ func (d *Directory) abs() string {
|
||||
return cleanPath(path.Join(d.Dir, d.Name))
|
||||
}
|
||||
|
||||
// parentRemote returns the absolute path parent remote
|
||||
func (d *Directory) parentRemote() string {
|
||||
absPath := d.abs()
|
||||
if absPath == "" {
|
||||
return ""
|
||||
}
|
||||
return cleanPath(path.Dir(absPath))
|
||||
}
|
||||
|
||||
// ModTime returns the cached ModTime
|
||||
func (d *Directory) ModTime() time.Time {
|
||||
func (d *Directory) ModTime(ctx context.Context) time.Time {
|
||||
return time.Unix(0, d.CacheModTime)
|
||||
}
|
||||
|
||||
|
||||
25
backend/cache/handle.go
vendored
25
backend/cache/handle.go
vendored
@@ -3,6 +3,7 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
@@ -11,9 +12,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
)
|
||||
|
||||
var uploaderMap = make(map[string]*backgroundWriter)
|
||||
@@ -40,6 +41,7 @@ func initBackgroundUploader(fs *Fs) (*backgroundWriter, error) {
|
||||
|
||||
// Handle is managing the read/write/seek operations on an open handle
|
||||
type Handle struct {
|
||||
ctx context.Context
|
||||
cachedObject *Object
|
||||
cfs *Fs
|
||||
memory *Memory
|
||||
@@ -58,8 +60,9 @@ type Handle struct {
|
||||
}
|
||||
|
||||
// NewObjectHandle returns a new Handle for an existing Object
|
||||
func NewObjectHandle(o *Object, cfs *Fs) *Handle {
|
||||
func NewObjectHandle(ctx context.Context, o *Object, cfs *Fs) *Handle {
|
||||
r := &Handle{
|
||||
ctx: ctx,
|
||||
cachedObject: o,
|
||||
cfs: cfs,
|
||||
offset: 0,
|
||||
@@ -351,7 +354,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
||||
r := w.rc
|
||||
if w.rc == nil {
|
||||
r, err = w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||
return w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
|
||||
return w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -361,7 +364,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
||||
|
||||
if !closeOpen {
|
||||
if do, ok := r.(fs.RangeSeeker); ok {
|
||||
_, err = do.RangeSeek(offset, io.SeekStart, end-offset)
|
||||
_, err = do.RangeSeek(w.r.ctx, offset, io.SeekStart, end-offset)
|
||||
return r, err
|
||||
} else if do, ok := r.(io.Seeker); ok {
|
||||
_, err = do.Seek(offset, io.SeekStart)
|
||||
@@ -371,7 +374,7 @@ func (w *worker) reader(offset, end int64, closeOpen bool) (io.ReadCloser, error
|
||||
|
||||
_ = w.rc.Close()
|
||||
return w.r.cacheFs().openRateLimited(func() (io.ReadCloser, error) {
|
||||
r, err = w.r.cachedObject.Object.Open(&fs.RangeOption{Start: offset, End: end - 1})
|
||||
r, err = w.r.cachedObject.Object.Open(w.r.ctx, &fs.RangeOption{Start: offset, End: end - 1})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -449,7 +452,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||
// we seem to be getting only errors so we abort
|
||||
if err != nil {
|
||||
fs.Errorf(w, "object open failed %v: %v", chunkStart, err)
|
||||
err = w.r.cachedObject.refreshFromSource(true)
|
||||
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "%v", err)
|
||||
}
|
||||
@@ -462,7 +465,7 @@ func (w *worker) download(chunkStart, chunkEnd int64, retry int) {
|
||||
sourceRead, err = io.ReadFull(w.rc, data)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
fs.Errorf(w, "failed to read chunk %v: %v", chunkStart, err)
|
||||
err = w.r.cachedObject.refreshFromSource(true)
|
||||
err = w.r.cachedObject.refreshFromSource(w.r.ctx, true)
|
||||
if err != nil {
|
||||
fs.Errorf(w, "%v", err)
|
||||
}
|
||||
@@ -588,7 +591,7 @@ func (b *backgroundWriter) run() {
|
||||
remote := b.fs.cleanRootFromPath(absPath)
|
||||
b.notify(remote, BackgroundUploadStarted, nil)
|
||||
fs.Infof(remote, "background upload: started upload")
|
||||
err = operations.MoveFile(b.fs.UnWrap(), b.fs.tempFs, remote, remote)
|
||||
err = operations.MoveFile(context.TODO(), b.fs.UnWrap(), b.fs.tempFs, remote, remote)
|
||||
if err != nil {
|
||||
b.notify(remote, BackgroundUploadError, err)
|
||||
_ = b.fs.cache.rollbackPendingUpload(absPath)
|
||||
@@ -598,14 +601,14 @@ func (b *backgroundWriter) run() {
|
||||
// clean empty dirs up to root
|
||||
thisDir := cleanPath(path.Dir(remote))
|
||||
for thisDir != "" {
|
||||
thisList, err := b.fs.tempFs.List(thisDir)
|
||||
thisList, err := b.fs.tempFs.List(context.TODO(), thisDir)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(thisList) > 0 {
|
||||
break
|
||||
}
|
||||
err = b.fs.tempFs.Rmdir(thisDir)
|
||||
err = b.fs.tempFs.Rmdir(context.TODO(), thisDir)
|
||||
fs.Debugf(thisDir, "cleaned from temp path")
|
||||
if err != nil {
|
||||
break
|
||||
|
||||
99
backend/cache/object.go
vendored
99
backend/cache/object.go
vendored
@@ -3,15 +3,16 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"path"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,15 +24,16 @@ const (
|
||||
type Object struct {
|
||||
fs.Object `json:"-"`
|
||||
|
||||
ParentFs fs.Fs `json:"-"` // parent fs
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
Dir string `json:"dir"` // abs path of the object
|
||||
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
|
||||
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
|
||||
CacheStorable bool `json:"storable"` // says whether this object can be stored
|
||||
CacheType string `json:"cacheType"`
|
||||
CacheTs time.Time `json:"cacheTs"`
|
||||
ParentFs fs.Fs `json:"-"` // parent fs
|
||||
CacheFs *Fs `json:"-"` // cache fs
|
||||
Name string `json:"name"` // name of the directory
|
||||
Dir string `json:"dir"` // abs path of the object
|
||||
CacheModTime int64 `json:"modTime"` // modification or creation time - IsZero for unknown
|
||||
CacheSize int64 `json:"size"` // size of directory and contents or -1 if unknown
|
||||
CacheStorable bool `json:"storable"` // says whether this object can be stored
|
||||
CacheType string `json:"cacheType"`
|
||||
CacheTs time.Time `json:"cacheTs"`
|
||||
cacheHashesMu sync.Mutex
|
||||
CacheHashes map[hash.Type]string // all supported hashes cached
|
||||
|
||||
refreshMutex sync.Mutex
|
||||
@@ -68,7 +70,7 @@ func NewObject(f *Fs, remote string) *Object {
|
||||
}
|
||||
|
||||
// ObjectFromOriginal builds one from a generic fs.Object
|
||||
func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
||||
func ObjectFromOriginal(ctx context.Context, f *Fs, o fs.Object) *Object {
|
||||
var co *Object
|
||||
fullRemote := cleanPath(path.Join(f.Root(), o.Remote()))
|
||||
dir, name := path.Split(fullRemote)
|
||||
@@ -92,17 +94,19 @@ func ObjectFromOriginal(f *Fs, o fs.Object) *Object {
|
||||
CacheType: cacheType,
|
||||
CacheTs: time.Now(),
|
||||
}
|
||||
co.updateData(o)
|
||||
co.updateData(ctx, o)
|
||||
return co
|
||||
}
|
||||
|
||||
func (o *Object) updateData(source fs.Object) {
|
||||
func (o *Object) updateData(ctx context.Context, source fs.Object) {
|
||||
o.Object = source
|
||||
o.CacheModTime = source.ModTime().UnixNano()
|
||||
o.CacheModTime = source.ModTime(ctx).UnixNano()
|
||||
o.CacheSize = source.Size()
|
||||
o.CacheStorable = source.Storable()
|
||||
o.CacheTs = time.Now()
|
||||
o.cacheHashesMu.Lock()
|
||||
o.CacheHashes = make(map[hash.Type]string)
|
||||
o.cacheHashesMu.Unlock()
|
||||
}
|
||||
|
||||
// Fs returns its FS info
|
||||
@@ -130,20 +134,20 @@ func (o *Object) abs() string {
|
||||
}
|
||||
|
||||
// ModTime returns the cached ModTime
|
||||
func (o *Object) ModTime() time.Time {
|
||||
_ = o.refresh()
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
_ = o.refresh(ctx)
|
||||
return time.Unix(0, o.CacheModTime)
|
||||
}
|
||||
|
||||
// Size returns the cached Size
|
||||
func (o *Object) Size() int64 {
|
||||
_ = o.refresh()
|
||||
_ = o.refresh(context.TODO())
|
||||
return o.CacheSize
|
||||
}
|
||||
|
||||
// Storable returns the cached Storable
|
||||
func (o *Object) Storable() bool {
|
||||
_ = o.refresh()
|
||||
_ = o.refresh(context.TODO())
|
||||
return o.CacheStorable
|
||||
}
|
||||
|
||||
@@ -151,18 +155,18 @@ func (o *Object) Storable() bool {
|
||||
// all these conditions must be true to ignore a refresh
|
||||
// 1. cache ts didn't expire yet
|
||||
// 2. is not pending a notification from the wrapped fs
|
||||
func (o *Object) refresh() error {
|
||||
func (o *Object) refresh(ctx context.Context) error {
|
||||
isNotified := o.CacheFs.isNotifiedRemote(o.Remote())
|
||||
isExpired := time.Now().After(o.CacheTs.Add(time.Duration(o.CacheFs.opt.InfoAge)))
|
||||
if !isExpired && !isNotified {
|
||||
return nil
|
||||
}
|
||||
|
||||
return o.refreshFromSource(true)
|
||||
return o.refreshFromSource(ctx, true)
|
||||
}
|
||||
|
||||
// refreshFromSource requests the original FS for the object in case it comes from a cached entry
|
||||
func (o *Object) refreshFromSource(force bool) error {
|
||||
func (o *Object) refreshFromSource(ctx context.Context, force bool) error {
|
||||
o.refreshMutex.Lock()
|
||||
defer o.refreshMutex.Unlock()
|
||||
var err error
|
||||
@@ -172,29 +176,29 @@ func (o *Object) refreshFromSource(force bool) error {
|
||||
return nil
|
||||
}
|
||||
if o.isTempFile() {
|
||||
liveObject, err = o.ParentFs.NewObject(o.Remote())
|
||||
liveObject, err = o.ParentFs.NewObject(ctx, o.Remote())
|
||||
err = errors.Wrapf(err, "in parent fs %v", o.ParentFs)
|
||||
} else {
|
||||
liveObject, err = o.CacheFs.Fs.NewObject(o.Remote())
|
||||
liveObject, err = o.CacheFs.Fs.NewObject(ctx, o.Remote())
|
||||
err = errors.Wrapf(err, "in cache fs %v", o.CacheFs.Fs)
|
||||
}
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error refreshing object in : %v", err)
|
||||
return err
|
||||
}
|
||||
o.updateData(liveObject)
|
||||
o.updateData(ctx, liveObject)
|
||||
o.persist()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetModTime sets the ModTime of this object
|
||||
func (o *Object) SetModTime(t time.Time) error {
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
func (o *Object) SetModTime(ctx context.Context, t time.Time) error {
|
||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err := o.Object.SetModTime(t)
|
||||
err := o.Object.SetModTime(ctx, t)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -207,19 +211,19 @@ func (o *Object) SetModTime(t time.Time) error {
|
||||
}
|
||||
|
||||
// Open is used to request a specific part of the file using fs.RangeOption
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var err error
|
||||
|
||||
if o.Object == nil {
|
||||
err = o.refreshFromSource(true)
|
||||
err = o.refreshFromSource(ctx, true)
|
||||
} else {
|
||||
err = o.refresh()
|
||||
err = o.refresh(ctx)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cacheReader := NewObjectHandle(o, o.CacheFs)
|
||||
cacheReader := NewObjectHandle(ctx, o, o.CacheFs)
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
@@ -238,8 +242,8 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
}
|
||||
|
||||
// Update will change the object data
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
@@ -254,7 +258,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
fs.Debugf(o, "updating object contents with size %v", src.Size())
|
||||
|
||||
// FIXME use reliable upload
|
||||
err := o.Object.Update(in, src, options...)
|
||||
err := o.Object.Update(ctx, in, src, options...)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "error updating source: %v", err)
|
||||
return err
|
||||
@@ -265,9 +269,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// advertise to ChangeNotify if wrapped doesn't do that
|
||||
o.CacheFs.notifyChangeUpstreamIfNeeded(o.Remote(), fs.EntryObject)
|
||||
|
||||
o.CacheModTime = src.ModTime().UnixNano()
|
||||
o.CacheModTime = src.ModTime(ctx).UnixNano()
|
||||
o.CacheSize = src.Size()
|
||||
o.cacheHashesMu.Lock()
|
||||
o.CacheHashes = make(map[hash.Type]string)
|
||||
o.cacheHashesMu.Unlock()
|
||||
o.CacheTs = time.Now()
|
||||
o.persist()
|
||||
|
||||
@@ -275,8 +281,8 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove deletes the object from both the cache and the source
|
||||
func (o *Object) Remove() error {
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
// pause background uploads if active
|
||||
@@ -288,7 +294,7 @@ func (o *Object) Remove() error {
|
||||
return errors.Errorf("%v is currently uploading, can't delete", o)
|
||||
}
|
||||
}
|
||||
err := o.Object.Remove()
|
||||
err := o.Object.Remove(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -306,24 +312,27 @@ func (o *Object) Remove() error {
|
||||
|
||||
// Hash requests a hash of the object and stores in the cache
|
||||
// since it might or might not be called, this is lazy loaded
|
||||
func (o *Object) Hash(ht hash.Type) (string, error) {
|
||||
_ = o.refresh()
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
_ = o.refresh(ctx)
|
||||
o.cacheHashesMu.Lock()
|
||||
if o.CacheHashes == nil {
|
||||
o.CacheHashes = make(map[hash.Type]string)
|
||||
}
|
||||
|
||||
cachedHash, found := o.CacheHashes[ht]
|
||||
o.cacheHashesMu.Unlock()
|
||||
if found {
|
||||
return cachedHash, nil
|
||||
}
|
||||
if err := o.refreshFromSource(false); err != nil {
|
||||
if err := o.refreshFromSource(ctx, false); err != nil {
|
||||
return "", err
|
||||
}
|
||||
liveHash, err := o.Object.Hash(ht)
|
||||
liveHash, err := o.Object.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
o.cacheHashesMu.Lock()
|
||||
o.CacheHashes[ht] = liveHash
|
||||
o.cacheHashesMu.Unlock()
|
||||
|
||||
o.persist()
|
||||
fs.Debugf(o, "object hash cached: %v", liveHash)
|
||||
|
||||
2
backend/cache/plex.go
vendored
2
backend/cache/plex.go
vendored
@@ -14,8 +14,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
|
||||
2
backend/cache/storage_memory.go
vendored
2
backend/cache/storage_memory.go
vendored
@@ -7,9 +7,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
cache "github.com/patrickmn/go-cache"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// Memory is a wrapper of transient storage for a go-cache store
|
||||
|
||||
36
backend/cache/storage_persistent.go
vendored
36
backend/cache/storage_persistent.go
vendored
@@ -4,6 +4,7 @@ package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
@@ -15,10 +16,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
bolt "github.com/coreos/bbolt"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
bolt "github.com/etcd-io/bbolt"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
)
|
||||
|
||||
// Constants
|
||||
@@ -766,31 +767,6 @@ func (b *Persistent) iterateBuckets(buk *bolt.Bucket, bucketFn func(name string)
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *Persistent) dumpRoot() string {
|
||||
var itBuckets func(buk *bolt.Bucket) map[string]interface{}
|
||||
|
||||
itBuckets = func(buk *bolt.Bucket) map[string]interface{} {
|
||||
m := make(map[string]interface{})
|
||||
c := buk.Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if v == nil {
|
||||
buk2 := buk.Bucket(k)
|
||||
m[string(k)] = itBuckets(buk2)
|
||||
} else {
|
||||
m[string(k)] = "-"
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
var mm map[string]interface{}
|
||||
_ = b.db.View(func(tx *bolt.Tx) error {
|
||||
mm = itBuckets(tx.Bucket([]byte(RootBucket)))
|
||||
return nil
|
||||
})
|
||||
raw, _ := json.MarshalIndent(mm, "", " ")
|
||||
return string(raw)
|
||||
}
|
||||
|
||||
// addPendingUpload adds a new file to the pending queue of uploads
|
||||
func (b *Persistent) addPendingUpload(destPath string, started bool) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
@@ -1014,7 +990,7 @@ func (b *Persistent) SetPendingUploadToStarted(remote string) error {
|
||||
}
|
||||
|
||||
// ReconcileTempUploads will recursively look for all the files in the temp directory and add them to the queue
|
||||
func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
||||
func (b *Persistent) ReconcileTempUploads(ctx context.Context, cacheFs *Fs) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
_ = tx.DeleteBucket([]byte(tempBucket))
|
||||
bucket, err := tx.CreateBucketIfNotExists([]byte(tempBucket))
|
||||
@@ -1023,7 +999,7 @@ func (b *Persistent) ReconcileTempUploads(cacheFs *Fs) error {
|
||||
}
|
||||
|
||||
var queuedEntries []fs.Object
|
||||
err = walk.ListR(cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
err = walk.ListR(ctx, cacheFs.tempFs, "", true, -1, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
for _, o := range entries {
|
||||
if oo, ok := o.(fs.Object); ok {
|
||||
queuedEntries = append(queuedEntries, oo)
|
||||
|
||||
2280
backend/chunker/chunker.go
Normal file
2280
backend/chunker/chunker.go
Normal file
File diff suppressed because it is too large
Load Diff
691
backend/chunker/chunker_internal_test.go
Normal file
691
backend/chunker/chunker_internal_test.go
Normal file
@@ -0,0 +1,691 @@
|
||||
package chunker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// Command line flags
|
||||
var (
|
||||
UploadKilobytes = flag.Int("upload-kilobytes", 0, "Upload size in Kilobytes, set this to test large uploads")
|
||||
)
|
||||
|
||||
// test that chunking does not break large uploads
|
||||
func testPutLarge(t *testing.T, f *Fs, kilobytes int) {
|
||||
t.Run(fmt.Sprintf("PutLarge%dk", kilobytes), func(t *testing.T) {
|
||||
fstests.TestPutLarge(context.Background(), t, f, &fstest.Item{
|
||||
ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"),
|
||||
Path: fmt.Sprintf("chunker-upload-%dk", kilobytes),
|
||||
Size: int64(kilobytes) * int64(fs.KibiByte),
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// test chunk name parser
|
||||
func testChunkNameFormat(t *testing.T, f *Fs) {
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
// restore original settings (f is pointer, f.opt is struct)
|
||||
f.opt = saveOpt
|
||||
_ = f.setChunkNameFormat(f.opt.NameFormat)
|
||||
}()
|
||||
|
||||
assertFormat := func(pattern, wantDataFormat, wantCtrlFormat, wantNameRegexp string) {
|
||||
err := f.setChunkNameFormat(pattern)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, wantDataFormat, f.dataNameFmt)
|
||||
assert.Equal(t, wantCtrlFormat, f.ctrlNameFmt)
|
||||
assert.Equal(t, wantNameRegexp, f.nameRegexp.String())
|
||||
}
|
||||
|
||||
assertFormatValid := func(pattern string) {
|
||||
err := f.setChunkNameFormat(pattern)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
assertFormatInvalid := func(pattern string) {
|
||||
err := f.setChunkNameFormat(pattern)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType, xactID string) {
|
||||
gotChunkName := ""
|
||||
assert.NotPanics(t, func() {
|
||||
gotChunkName = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
|
||||
}, "makeChunkName(%q,%d,%q,%q) must not panic", mainName, chunkNo, ctrlType, xactID)
|
||||
if gotChunkName != "" {
|
||||
assert.Equal(t, wantChunkName, gotChunkName)
|
||||
}
|
||||
}
|
||||
|
||||
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType, xactID string) {
|
||||
assert.Panics(t, func() {
|
||||
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
|
||||
}, "makeChunkName(%q,%d,%q,%q) should panic", mainName, chunkNo, ctrlType, xactID)
|
||||
}
|
||||
|
||||
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType, wantXactID string) {
|
||||
gotMainName, gotChunkNo, gotCtrlType, gotXactID := f.parseChunkName(fileName)
|
||||
assert.Equal(t, wantMainName, gotMainName)
|
||||
assert.Equal(t, wantChunkNo, gotChunkNo)
|
||||
assert.Equal(t, wantCtrlType, gotCtrlType)
|
||||
assert.Equal(t, wantXactID, gotXactID)
|
||||
}
|
||||
|
||||
const newFormatSupported = false // support for patterns not starting with base name (*)
|
||||
|
||||
// valid formats
|
||||
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
if newFormatSupported {
|
||||
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z][a-z0-9]{2,6})),(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
}
|
||||
|
||||
// invalid formats
|
||||
assertFormatInvalid(`chunk-#`)
|
||||
assertFormatInvalid(`*-chunk`)
|
||||
assertFormatInvalid(`*-*-chunk-#`)
|
||||
assertFormatInvalid(`*-chunk-#-#`)
|
||||
assertFormatInvalid(`#-chunk-*`)
|
||||
assertFormatInvalid(`*/#`)
|
||||
|
||||
assertFormatValid(`*#`)
|
||||
assertFormatInvalid(`**#`)
|
||||
assertFormatInvalid(`#*`)
|
||||
assertFormatInvalid(``)
|
||||
assertFormatInvalid(`-`)
|
||||
|
||||
// quick tests
|
||||
if newFormatSupported {
|
||||
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9][0-9a-z]{3,8})\.\.tmp_([0-9]{10,13}))?$`)
|
||||
f.opt.StartFrom = 1
|
||||
|
||||
assertMakeName(`part_fish_1`, "fish", 0, "", "")
|
||||
assertParseName(`part_fish_43`, "fish", 42, "", "")
|
||||
assertMakeName(`part_fish__locks`, "fish", -2, "locks", "")
|
||||
assertParseName(`part_fish__locks`, "fish", -1, "locks", "")
|
||||
assertMakeName(`part_fish__x2y`, "fish", -2, "x2y", "")
|
||||
assertParseName(`part_fish__x2y`, "fish", -1, "x2y", "")
|
||||
assertMakeName(`part_fish_3_0004`, "fish", 2, "", "4")
|
||||
assertParseName(`part_fish_4_0005`, "fish", 3, "", "0005")
|
||||
assertMakeName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -3, "blkinfo", "jj5fvo3wr")
|
||||
assertParseName(`part_fish__blkinfo_zz9fvo3wr`, "fish", -1, "blkinfo", "zz9fvo3wr")
|
||||
|
||||
// old-style temporary suffix (parse only)
|
||||
assertParseName(`part_fish_4..tmp_0000000011`, "fish", 3, "", "000b")
|
||||
assertParseName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -1, "blkinfo", "jj5fvo3wr")
|
||||
}
|
||||
|
||||
// prepare format for long tests
|
||||
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
|
||||
f.opt.StartFrom = 2
|
||||
|
||||
// valid data chunks
|
||||
assertMakeName(`fish.chunk.003`, "fish", 1, "", "")
|
||||
assertParseName(`fish.chunk.003`, "fish", 1, "", "")
|
||||
assertMakeName(`fish.chunk.021`, "fish", 19, "", "")
|
||||
assertParseName(`fish.chunk.021`, "fish", 19, "", "")
|
||||
|
||||
// valid temporary data chunks
|
||||
assertMakeName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
|
||||
assertParseName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
|
||||
assertMakeName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
|
||||
assertParseName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
|
||||
assertMakeName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
|
||||
assertParseName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
|
||||
assertMakeName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
|
||||
assertParseName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
|
||||
|
||||
// valid temporary data chunks (old temporary suffix, only parse)
|
||||
assertParseName(`fish.chunk.004..tmp_0000000047`, "fish", 2, "", "001b")
|
||||
assertParseName(`fish.chunk.323..tmp_9994567890123`, "fish", 321, "", "3jjfvo3wr")
|
||||
|
||||
// parsing invalid data chunk names
|
||||
assertParseName(`fish.chunk.3`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.001`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.21`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.-21`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk.004abcd`, "", -1, "", "") // missing underscore delimiter
|
||||
assertParseName(`fish.chunk.004__1234`, "", -1, "", "") // extra underscore delimiter
|
||||
assertParseName(`fish.chunk.004_123`, "", -1, "", "") // too short temporary suffix
|
||||
assertParseName(`fish.chunk.004_1234567890`, "", -1, "", "") // too long temporary suffix
|
||||
assertParseName(`fish.chunk.004_-1234`, "", -1, "", "") // temporary suffix must be positive
|
||||
assertParseName(`fish.chunk.004_123E`, "", -1, "", "") // uppercase not allowed
|
||||
assertParseName(`fish.chunk.004_12.3`, "", -1, "", "") // punctuation not allowed
|
||||
|
||||
// parsing invalid data chunk names (old temporary suffix)
|
||||
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.323..tmp_12345678901234`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", "")
|
||||
|
||||
// valid control chunks
|
||||
assertMakeName(`fish.chunk._info`, "fish", -1, "info", "")
|
||||
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", "")
|
||||
assertMakeName(`fish.chunk._blkinfo`, "fish", -3, "blkinfo", "")
|
||||
assertMakeName(`fish.chunk._x2y`, "fish", -4, "x2y", "")
|
||||
|
||||
assertParseName(`fish.chunk._info`, "fish", -1, "info", "")
|
||||
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", "")
|
||||
assertParseName(`fish.chunk._blkinfo`, "fish", -1, "blkinfo", "")
|
||||
assertParseName(`fish.chunk._x2y`, "fish", -1, "x2y", "")
|
||||
|
||||
// valid temporary control chunks
|
||||
assertMakeName(`fish.chunk._info_0001`, "fish", -1, "info", "1")
|
||||
assertMakeName(`fish.chunk._locks_4321`, "fish", -2, "locks", "4321")
|
||||
assertMakeName(`fish.chunk._uploads_abcd`, "fish", -3, "uploads", "abcd")
|
||||
assertMakeName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -4, "blkinfo", "xyzabcdef")
|
||||
assertMakeName(`fish.chunk._x2y_1aaa`, "fish", -5, "x2y", "1aaa")
|
||||
|
||||
assertParseName(`fish.chunk._info_0001`, "fish", -1, "info", "0001")
|
||||
assertParseName(`fish.chunk._locks_4321`, "fish", -1, "locks", "4321")
|
||||
assertParseName(`fish.chunk._uploads_9abc`, "fish", -1, "uploads", "9abc")
|
||||
assertParseName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -1, "blkinfo", "xyzabcdef")
|
||||
assertParseName(`fish.chunk._x2y_1aaa`, "fish", -1, "x2y", "1aaa")
|
||||
|
||||
// valid temporary control chunks (old temporary suffix, parse only)
|
||||
assertParseName(`fish.chunk._info..tmp_0000000047`, "fish", -1, "info", "001b")
|
||||
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", "15wx")
|
||||
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", "0000")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123`, "fish", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk._x2y..tmp_0000000000`, "fish", -1, "x2y", "0000")
|
||||
|
||||
// parsing invalid control chunk names
|
||||
assertParseName(`fish.chunk.metadata`, "", -1, "", "") // must be prepended by underscore
|
||||
assertParseName(`fish.chunk.info`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.locks`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.uploads`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk._os`, "", -1, "", "") // too short
|
||||
assertParseName(`fish.chunk._metadata`, "", -1, "", "") // too long
|
||||
assertParseName(`fish.chunk._blockinfo`, "", -1, "", "") // way too long
|
||||
assertParseName(`fish.chunk._4me`, "", -1, "", "") // cannot start with digit
|
||||
assertParseName(`fish.chunk._567`, "", -1, "", "") // cannot be all digits
|
||||
assertParseName(`fish.chunk._me_ta`, "", -1, "", "") // punctuation not allowed
|
||||
assertParseName(`fish.chunk._in-fo`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._.bin`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._.2xy`, "", -1, "", "")
|
||||
|
||||
// parsing invalid temporary control chunks
|
||||
assertParseName(`fish.chunk._blkinfo1234`, "", -1, "", "") // missing underscore delimiter
|
||||
assertParseName(`fish.chunk._info__1234`, "", -1, "", "") // extra underscore delimiter
|
||||
assertParseName(`fish.chunk._info_123`, "", -1, "", "") // too short temporary suffix
|
||||
assertParseName(`fish.chunk._info_1234567890`, "", -1, "", "") // too long temporary suffix
|
||||
assertParseName(`fish.chunk._info_-1234`, "", -1, "", "") // temporary suffix must be positive
|
||||
assertParseName(`fish.chunk._info_123E`, "", -1, "", "") // uppercase not allowed
|
||||
assertParseName(`fish.chunk._info_12.3`, "", -1, "", "") // punctuation not allowed
|
||||
|
||||
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", "")
|
||||
|
||||
// short control chunk names: 3 letters ok, 1-2 letters not allowed
|
||||
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", "")
|
||||
assertParseName(`fish.chunk._int`, "fish", -1, "int", "")
|
||||
|
||||
assertMakeNamePanics("fish", -1, "in", "")
|
||||
assertMakeNamePanics("fish", -1, "up", "4")
|
||||
assertMakeNamePanics("fish", -1, "x", "")
|
||||
assertMakeNamePanics("fish", -1, "c", "1z")
|
||||
|
||||
assertMakeName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0")
|
||||
assertMakeName(`fish.chunk._ext_0026`, "fish", -1, "ext", "26")
|
||||
assertMakeName(`fish.chunk._int_0abc`, "fish", -1, "int", "abc")
|
||||
assertMakeName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
|
||||
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
||||
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
||||
|
||||
assertParseName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0000")
|
||||
assertParseName(`fish.chunk._ext_0026`, "fish", -1, "ext", "0026")
|
||||
assertParseName(`fish.chunk._int_0abc`, "fish", -1, "int", "0abc")
|
||||
assertParseName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
|
||||
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
||||
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
|
||||
|
||||
// base file name can sometimes look like a valid chunk name
|
||||
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", "")
|
||||
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", "")
|
||||
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", "")
|
||||
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", "")
|
||||
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", "")
|
||||
|
||||
// base file name looking like a valid chunk name (old temporary suffix)
|
||||
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000022`, "fish.chunk.003", 3, "", "000m")
|
||||
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000023`, "fish.chunk._info", 3, "", "000n")
|
||||
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk.003.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.003", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk._info.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._info", -1, "blkinfo", "3jjfvo3wr")
|
||||
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", "")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000025`, "fish.chunk.004..tmp_0000000021", 3, "", "000p")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", "")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.004..tmp_0000000021", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.004`, "fish.chunk._blkinfo..tmp_9994567890123", 2, "", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.005..tmp_0000000026`, "fish.chunk._blkinfo..tmp_9994567890123", 3, "", "000q")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "info", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._Meta`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blkinfo..tmp_1234567890123456789", 2, "", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.005..tmp_0000000022`, "fish.chunk._blkinfo..tmp_1234567890123456789", 3, "", "000m")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "info", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "blkinfo", "3jjfvo3wr")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", "")
|
||||
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
|
||||
|
||||
// attempts to make invalid chunk names
|
||||
assertMakeNamePanics("fish", -1, "", "") // neither data nor control
|
||||
assertMakeNamePanics("fish", 0, "info", "") // both data and control
|
||||
assertMakeNamePanics("fish", -1, "metadata", "") // control type too long
|
||||
assertMakeNamePanics("fish", -1, "blockinfo", "") // control type way too long
|
||||
assertMakeNamePanics("fish", -1, "2xy", "") // first digit not allowed
|
||||
assertMakeNamePanics("fish", -1, "123", "") // all digits not allowed
|
||||
assertMakeNamePanics("fish", -1, "Meta", "") // only lower case letters allowed
|
||||
assertMakeNamePanics("fish", -1, "in-fo", "") // punctuation not allowed
|
||||
assertMakeNamePanics("fish", -1, "_info", "")
|
||||
assertMakeNamePanics("fish", -1, "info_", "")
|
||||
assertMakeNamePanics("fish", -2, ".bind", "")
|
||||
assertMakeNamePanics("fish", -2, "bind.", "")
|
||||
|
||||
assertMakeNamePanics("fish", -1, "", "1") // neither data nor control
|
||||
assertMakeNamePanics("fish", 0, "info", "23") // both data and control
|
||||
assertMakeNamePanics("fish", -1, "metadata", "45") // control type too long
|
||||
assertMakeNamePanics("fish", -1, "blockinfo", "7") // control type way too long
|
||||
assertMakeNamePanics("fish", -1, "2xy", "abc") // first digit not allowed
|
||||
assertMakeNamePanics("fish", -1, "123", "def") // all digits not allowed
|
||||
assertMakeNamePanics("fish", -1, "Meta", "mnk") // only lower case letters allowed
|
||||
assertMakeNamePanics("fish", -1, "in-fo", "xyz") // punctuation not allowed
|
||||
assertMakeNamePanics("fish", -1, "_info", "5678")
|
||||
assertMakeNamePanics("fish", -1, "info_", "999")
|
||||
assertMakeNamePanics("fish", -2, ".bind", "0")
|
||||
assertMakeNamePanics("fish", -2, "bind.", "0")
|
||||
|
||||
assertMakeNamePanics("fish", 0, "", "1234567890") // temporary suffix too long
|
||||
assertMakeNamePanics("fish", 0, "", "123F4") // uppercase not allowed
|
||||
assertMakeNamePanics("fish", 0, "", "123.") // punctuation not allowed
|
||||
assertMakeNamePanics("fish", 0, "", "_123")
|
||||
}
|
||||
|
||||
func testSmallFileInternals(t *testing.T, f *Fs) {
|
||||
const dir = "small"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
f.opt.FailHard = false
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
checkSmallFileInternals := func(obj fs.Object) {
|
||||
assert.NotNil(t, obj)
|
||||
o, ok := obj.(*Object)
|
||||
assert.True(t, ok)
|
||||
assert.NotNil(t, o)
|
||||
if o == nil {
|
||||
return
|
||||
}
|
||||
switch {
|
||||
case !f.useMeta:
|
||||
// If meta format is "none", non-chunked file (even empty)
|
||||
// internally is a single chunk without meta object.
|
||||
assert.Nil(t, o.main)
|
||||
assert.True(t, o.isComposite()) // sorry, sometimes a name is misleading
|
||||
assert.Equal(t, 1, len(o.chunks))
|
||||
case f.hashAll:
|
||||
// Consistent hashing forces meta object on small files too
|
||||
assert.NotNil(t, o.main)
|
||||
assert.True(t, o.isComposite())
|
||||
assert.Equal(t, 1, len(o.chunks))
|
||||
default:
|
||||
// normally non-chunked file is kept in the Object's main field
|
||||
assert.NotNil(t, o.main)
|
||||
assert.False(t, o.isComposite())
|
||||
assert.Equal(t, 0, len(o.chunks))
|
||||
}
|
||||
}
|
||||
|
||||
checkContents := func(obj fs.Object, contents string) {
|
||||
assert.NotNil(t, obj)
|
||||
assert.Equal(t, int64(len(contents)), obj.Size())
|
||||
|
||||
r, err := obj.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
}
|
||||
|
||||
checkHashsum := func(obj fs.Object) {
|
||||
var ht hash.Type
|
||||
switch {
|
||||
case !f.hashAll:
|
||||
return
|
||||
case f.useMD5:
|
||||
ht = hash.MD5
|
||||
case f.useSHA1:
|
||||
ht = hash.SHA1
|
||||
default:
|
||||
return
|
||||
}
|
||||
// even empty files must have hashsum in consistent mode
|
||||
sum, err := obj.Hash(ctx, ht)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, sum, "")
|
||||
}
|
||||
|
||||
checkSmallFile := func(name, contents string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, put := fstests.PutTestContents(ctx, t, f, &item, contents, false)
|
||||
assert.NotNil(t, put)
|
||||
checkSmallFileInternals(put)
|
||||
checkContents(put, contents)
|
||||
checkHashsum(put)
|
||||
|
||||
// objects returned by Put and NewObject must have similar structure
|
||||
obj, err := f.NewObject(ctx, filename)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, obj)
|
||||
checkSmallFileInternals(obj)
|
||||
checkContents(obj, contents)
|
||||
checkHashsum(obj)
|
||||
|
||||
_ = obj.Remove(ctx)
|
||||
_ = put.Remove(ctx) // for good
|
||||
}
|
||||
|
||||
checkSmallFile("emptyfile", "")
|
||||
checkSmallFile("smallfile", "Ok")
|
||||
}
|
||||
|
||||
func testPreventCorruption(t *testing.T, f *Fs) {
|
||||
if f.opt.ChunkSize > 50 {
|
||||
t.Skip("this test requires small chunks")
|
||||
}
|
||||
const dir = "corrupted"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
f.opt.FailHard = true
|
||||
|
||||
contents := random.String(250)
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
const overlapMessage = "chunk overlap"
|
||||
|
||||
assertOverlapError := func(err error) {
|
||||
assert.Error(t, err)
|
||||
if err != nil {
|
||||
assert.Contains(t, err.Error(), overlapMessage)
|
||||
}
|
||||
}
|
||||
|
||||
newFile := func(name string) fs.Object {
|
||||
item := fstest.Item{Path: path.Join(dir, name), ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj
|
||||
}
|
||||
billyObj := newFile("billy")
|
||||
|
||||
billyChunkName := func(chunkNo int) string {
|
||||
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
|
||||
}
|
||||
|
||||
err := f.Mkdir(ctx, billyChunkName(1))
|
||||
assertOverlapError(err)
|
||||
|
||||
_, err = f.Move(ctx, newFile("silly1"), billyChunkName(2))
|
||||
assert.Error(t, err)
|
||||
assert.True(t, err == fs.ErrorCantMove || (err != nil && strings.Contains(err.Error(), overlapMessage)))
|
||||
|
||||
_, err = f.Copy(ctx, newFile("silly2"), billyChunkName(3))
|
||||
assert.Error(t, err)
|
||||
assert.True(t, err == fs.ErrorCantCopy || (err != nil && strings.Contains(err.Error(), overlapMessage)))
|
||||
|
||||
// accessing chunks in strict mode is prohibited
|
||||
f.opt.FailHard = true
|
||||
billyChunk4Name := billyChunkName(4)
|
||||
billyChunk4, err := f.NewObject(ctx, billyChunk4Name)
|
||||
assertOverlapError(err)
|
||||
|
||||
f.opt.FailHard = false
|
||||
billyChunk4, err = f.NewObject(ctx, billyChunk4Name)
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, billyChunk4)
|
||||
|
||||
f.opt.FailHard = true
|
||||
_, err = f.Put(ctx, bytes.NewBufferString(contents), billyChunk4)
|
||||
assertOverlapError(err)
|
||||
|
||||
// you can freely read chunks (if you have an object)
|
||||
r, err := billyChunk4.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
var chunkContents []byte
|
||||
assert.NotPanics(t, func() {
|
||||
chunkContents, err = ioutil.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
assert.NotEqual(t, contents, string(chunkContents))
|
||||
|
||||
// but you can't change them
|
||||
err = billyChunk4.Update(ctx, bytes.NewBufferString(contents), newFile("silly3"))
|
||||
assertOverlapError(err)
|
||||
|
||||
// Remove isn't special, you can't corrupt files even if you have an object
|
||||
err = billyChunk4.Remove(ctx)
|
||||
assertOverlapError(err)
|
||||
|
||||
// recreate billy in case it was anyhow corrupted
|
||||
willyObj := newFile("willy")
|
||||
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
|
||||
f.opt.FailHard = false
|
||||
willyChunk, err := f.NewObject(ctx, willyChunkName)
|
||||
f.opt.FailHard = true
|
||||
assert.NoError(t, err)
|
||||
require.NotNil(t, willyChunk)
|
||||
|
||||
_, err = operations.Copy(ctx, f, willyChunk, willyChunkName, newFile("silly4"))
|
||||
assertOverlapError(err)
|
||||
|
||||
// operations.Move will return error when chunker's Move refused
|
||||
// to corrupt target file, but reverts to copy/delete method
|
||||
// still trying to delete target chunk. Chunker must come to rescue.
|
||||
_, err = operations.Move(ctx, f, willyChunk, willyChunkName, newFile("silly5"))
|
||||
assertOverlapError(err)
|
||||
r, err = willyChunk.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotPanics(t, func() {
|
||||
_, err = ioutil.ReadAll(r)
|
||||
_ = r.Close()
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func testChunkNumberOverflow(t *testing.T, f *Fs) {
|
||||
if f.opt.ChunkSize > 50 {
|
||||
t.Skip("this test requires small chunks")
|
||||
}
|
||||
const dir = "wreaked"
|
||||
const wreakNumber = 10200300
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
contents := random.String(100)
|
||||
|
||||
newFile := func(f fs.Fs, name string) (fs.Object, string) {
|
||||
filename := path.Join(dir, name)
|
||||
item := fstest.Item{Path: filename, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, true)
|
||||
require.NotNil(t, obj)
|
||||
return obj, filename
|
||||
}
|
||||
|
||||
f.opt.FailHard = false
|
||||
file, fileName := newFile(f, "wreaker")
|
||||
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
|
||||
|
||||
f.opt.FailHard = false
|
||||
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
|
||||
_, err := f.NewObject(ctx, fileName)
|
||||
assert.Error(t, err)
|
||||
|
||||
f.opt.FailHard = true
|
||||
_, err = f.List(ctx, dir)
|
||||
assert.Error(t, err)
|
||||
_, err = f.NewObject(ctx, fileName)
|
||||
assert.Error(t, err)
|
||||
|
||||
f.opt.FailHard = false
|
||||
_ = wreak.Remove(ctx)
|
||||
_ = file.Remove(ctx)
|
||||
}
|
||||
|
||||
func testMetadataInput(t *testing.T, f *Fs) {
|
||||
const minChunkForTest = 50
|
||||
if f.opt.ChunkSize < minChunkForTest {
|
||||
t.Skip("this test requires chunks that fit metadata")
|
||||
}
|
||||
|
||||
const dir = "usermeta"
|
||||
ctx := context.Background()
|
||||
saveOpt := f.opt
|
||||
defer func() {
|
||||
f.opt.FailHard = false
|
||||
_ = operations.Purge(ctx, f.base, dir)
|
||||
f.opt = saveOpt
|
||||
}()
|
||||
f.opt.FailHard = false
|
||||
|
||||
modTime := fstest.Time("2001-02-03T04:05:06.499999999Z")
|
||||
|
||||
putFile := func(f fs.Fs, name, contents, message string, check bool) fs.Object {
|
||||
item := fstest.Item{Path: name, ModTime: modTime}
|
||||
_, obj := fstests.PutTestContents(ctx, t, f, &item, contents, check)
|
||||
assert.NotNil(t, obj, message)
|
||||
return obj
|
||||
}
|
||||
|
||||
runSubtest := func(contents, name string) {
|
||||
description := fmt.Sprintf("file with %s metadata", name)
|
||||
filename := path.Join(dir, name)
|
||||
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
|
||||
|
||||
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
|
||||
_ = putFile(f, filename, contents, "upload "+description, false)
|
||||
|
||||
obj, err := f.NewObject(ctx, filename)
|
||||
assert.NoError(t, err, "access "+description)
|
||||
assert.NotNil(t, obj)
|
||||
assert.Equal(t, int64(len(contents)), obj.Size(), "size "+description)
|
||||
|
||||
o, ok := obj.(*Object)
|
||||
assert.NotNil(t, ok)
|
||||
if o != nil {
|
||||
assert.True(t, o.isComposite() && len(o.chunks) == 1, description+" is forced composite")
|
||||
o = nil
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = obj.Remove(ctx)
|
||||
_ = part.Remove(ctx)
|
||||
}()
|
||||
|
||||
r, err := obj.Open(ctx)
|
||||
assert.NoError(t, err, "open "+description)
|
||||
assert.NotNil(t, r, "open stream of "+description)
|
||||
if err == nil && r != nil {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err, "read all of "+description)
|
||||
assert.Equal(t, contents, string(data), description+" contents is ok")
|
||||
_ = r.Close()
|
||||
}
|
||||
}
|
||||
|
||||
metaData, err := marshalSimpleJSON(ctx, 3, 1, "", "")
|
||||
require.NoError(t, err)
|
||||
todaysMeta := string(metaData)
|
||||
runSubtest(todaysMeta, "today")
|
||||
|
||||
pastMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":1`)
|
||||
pastMeta = regexp.MustCompile(`"size":[0-9]+`).ReplaceAllLiteralString(pastMeta, `"size":0`)
|
||||
runSubtest(pastMeta, "past")
|
||||
|
||||
futureMeta := regexp.MustCompile(`"ver":[0-9]+`).ReplaceAllLiteralString(todaysMeta, `"ver":999`)
|
||||
futureMeta = regexp.MustCompile(`"nchunks":[0-9]+`).ReplaceAllLiteralString(futureMeta, `"nchunks":0,"x":"y"`)
|
||||
runSubtest(futureMeta, "future")
|
||||
}
|
||||
|
||||
// InternalTest dispatches all internal tests
|
||||
func (f *Fs) InternalTest(t *testing.T) {
|
||||
t.Run("PutLarge", func(t *testing.T) {
|
||||
if *UploadKilobytes <= 0 {
|
||||
t.Skip("-upload-kilobytes is not set")
|
||||
}
|
||||
testPutLarge(t, f, *UploadKilobytes)
|
||||
})
|
||||
t.Run("ChunkNameFormat", func(t *testing.T) {
|
||||
testChunkNameFormat(t, f)
|
||||
})
|
||||
t.Run("SmallFileInternals", func(t *testing.T) {
|
||||
testSmallFileInternals(t, f)
|
||||
})
|
||||
t.Run("PreventCorruption", func(t *testing.T) {
|
||||
testPreventCorruption(t, f)
|
||||
})
|
||||
t.Run("ChunkNumberOverflow", func(t *testing.T) {
|
||||
testChunkNumberOverflow(t, f)
|
||||
})
|
||||
t.Run("MetadataInput", func(t *testing.T) {
|
||||
testMetadataInput(t, f)
|
||||
})
|
||||
}
|
||||
|
||||
var _ fstests.InternalTester = (*Fs)(nil)
|
||||
58
backend/chunker/chunker_test.go
Normal file
58
backend/chunker/chunker_test.go
Normal file
@@ -0,0 +1,58 @@
|
||||
// Test the Chunker filesystem interface
|
||||
package chunker_test
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/all" // for integration tests
|
||||
"github.com/rclone/rclone/backend/chunker"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// Command line flags
|
||||
var (
|
||||
// Invalid characters are not supported by some remotes, eg. Mailru.
|
||||
// We enable testing with invalid characters when -remote is not set, so
|
||||
// chunker overlays a local directory, but invalid characters are disabled
|
||||
// by default when -remote is set, eg. when test_all runs backend tests.
|
||||
// You can still test with invalid characters using the below flag.
|
||||
UseBadChars = flag.Bool("bad-chars", false, "Set to test bad characters in file names when -remote is set")
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against a concrete remote
|
||||
// set by the -remote flag. If the flag is not set, it creates a
|
||||
// dynamic chunker overlay wrapping a local temporary directory.
|
||||
func TestIntegration(t *testing.T) {
|
||||
opt := fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*chunker.Object)(nil),
|
||||
SkipBadWindowsCharacters: !*UseBadChars,
|
||||
UnimplementableObjectMethods: []string{
|
||||
"MimeType",
|
||||
"GetTier",
|
||||
"SetTier",
|
||||
},
|
||||
UnimplementableFsMethods: []string{
|
||||
"PublicLink",
|
||||
"OpenWriterAt",
|
||||
"MergeDirs",
|
||||
"DirCacheFlush",
|
||||
"UserInfo",
|
||||
"Disconnect",
|
||||
},
|
||||
}
|
||||
if *fstest.RemoteName == "" {
|
||||
name := "TestChunker"
|
||||
opt.RemoteName = name + ":"
|
||||
tempDir := filepath.Join(os.TempDir(), "rclone-chunker-test-standard")
|
||||
opt.ExtraConfig = []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "chunker"},
|
||||
{Name: name, Key: "remote", Value: tempDir},
|
||||
}
|
||||
}
|
||||
fstests.Run(t, &opt)
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package crypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/aes"
|
||||
gocipher "crypto/cipher"
|
||||
"crypto/rand"
|
||||
@@ -13,10 +14,10 @@ import (
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt/pkcs7"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rfjakob/eme"
|
||||
"golang.org/x/crypto/nacl/secretbox"
|
||||
"golang.org/x/crypto/scrypt"
|
||||
@@ -68,7 +69,7 @@ type ReadSeekCloser interface {
|
||||
}
|
||||
|
||||
// OpenRangeSeek opens the file handle at the offset with the limit given
|
||||
type OpenRangeSeek func(offset, limit int64) (io.ReadCloser, error)
|
||||
type OpenRangeSeek func(ctx context.Context, offset, limit int64) (io.ReadCloser, error)
|
||||
|
||||
// Cipher is used to swap out the encryption implementations
|
||||
type Cipher interface {
|
||||
@@ -85,7 +86,7 @@ type Cipher interface {
|
||||
// DecryptData
|
||||
DecryptData(io.ReadCloser) (io.ReadCloser, error)
|
||||
// DecryptDataSeek decrypt at a given position
|
||||
DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
|
||||
DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error)
|
||||
// EncryptedSize calculates the size of the data when encrypted
|
||||
EncryptedSize(int64) int64
|
||||
// DecryptedSize calculates the size of the data when decrypted
|
||||
@@ -207,21 +208,6 @@ func (c *cipher) putBlock(buf []byte) {
|
||||
c.buffers.Put(buf)
|
||||
}
|
||||
|
||||
// check to see if the byte string is valid with no control characters
|
||||
// from 0x00 to 0x1F and is a valid UTF-8 string
|
||||
func checkValidString(buf []byte) error {
|
||||
for i := range buf {
|
||||
c := buf[i]
|
||||
if c >= 0x00 && c < 0x20 || c == 0x7F {
|
||||
return ErrorBadDecryptControlChar
|
||||
}
|
||||
}
|
||||
if !utf8.Valid(buf) {
|
||||
return ErrorBadDecryptUTF8
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodeFileName encodes a filename using a modified version of
|
||||
// standard base32 as described in RFC4648
|
||||
//
|
||||
@@ -293,10 +279,6 @@ func (c *cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = checkValidString(plaintext)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(plaintext), err
|
||||
}
|
||||
|
||||
@@ -755,22 +737,22 @@ func (c *cipher) newDecrypter(rc io.ReadCloser) (*decrypter, error) {
|
||||
}
|
||||
|
||||
// newDecrypterSeek creates a new file handle decrypting on the fly
|
||||
func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
||||
func (c *cipher) newDecrypterSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (fh *decrypter, err error) {
|
||||
var rc io.ReadCloser
|
||||
doRangeSeek := false
|
||||
setLimit := false
|
||||
// Open initially with no seek
|
||||
if offset == 0 && limit < 0 {
|
||||
// If no offset or limit then open whole file
|
||||
rc, err = open(0, -1)
|
||||
rc, err = open(ctx, 0, -1)
|
||||
} else if offset == 0 {
|
||||
// If no offset open the header + limit worth of the file
|
||||
_, underlyingLimit, _, _ := calculateUnderlying(offset, limit)
|
||||
rc, err = open(0, int64(fileHeaderSize)+underlyingLimit)
|
||||
rc, err = open(ctx, 0, int64(fileHeaderSize)+underlyingLimit)
|
||||
setLimit = true
|
||||
} else {
|
||||
// Otherwise just read the header to start with
|
||||
rc, err = open(0, int64(fileHeaderSize))
|
||||
rc, err = open(ctx, 0, int64(fileHeaderSize))
|
||||
doRangeSeek = true
|
||||
}
|
||||
if err != nil {
|
||||
@@ -783,7 +765,7 @@ func (c *cipher) newDecrypterSeek(open OpenRangeSeek, offset, limit int64) (fh *
|
||||
}
|
||||
fh.open = open // will be called by fh.RangeSeek
|
||||
if doRangeSeek {
|
||||
_, err = fh.RangeSeek(offset, io.SeekStart, limit)
|
||||
_, err = fh.RangeSeek(ctx, offset, io.SeekStart, limit)
|
||||
if err != nil {
|
||||
_ = fh.Close()
|
||||
return nil, err
|
||||
@@ -903,7 +885,7 @@ func calculateUnderlying(offset, limit int64) (underlyingOffset, underlyingLimit
|
||||
// limiting the total length to limit.
|
||||
//
|
||||
// RangeSeek with a limit of < 0 is equivalent to a regular Seek.
|
||||
func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, error) {
|
||||
func (fh *decrypter) RangeSeek(ctx context.Context, offset int64, whence int, limit int64) (int64, error) {
|
||||
fh.mu.Lock()
|
||||
defer fh.mu.Unlock()
|
||||
|
||||
@@ -930,7 +912,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
|
||||
// Can we seek underlying stream directly?
|
||||
if do, ok := fh.rc.(fs.RangeSeeker); ok {
|
||||
// Seek underlying stream directly
|
||||
_, err := do.RangeSeek(underlyingOffset, 0, underlyingLimit)
|
||||
_, err := do.RangeSeek(ctx, underlyingOffset, 0, underlyingLimit)
|
||||
if err != nil {
|
||||
return 0, fh.finish(err)
|
||||
}
|
||||
@@ -940,7 +922,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
|
||||
fh.rc = nil
|
||||
|
||||
// Re-open the underlying object with the offset given
|
||||
rc, err := fh.open(underlyingOffset, underlyingLimit)
|
||||
rc, err := fh.open(ctx, underlyingOffset, underlyingLimit)
|
||||
if err != nil {
|
||||
return 0, fh.finish(errors.Wrap(err, "couldn't reopen file with offset and limit"))
|
||||
}
|
||||
@@ -969,7 +951,7 @@ func (fh *decrypter) RangeSeek(offset int64, whence int, limit int64) (int64, er
|
||||
|
||||
// Seek implements the io.Seeker interface
|
||||
func (fh *decrypter) Seek(offset int64, whence int) (int64, error) {
|
||||
return fh.RangeSeek(offset, whence, -1)
|
||||
return fh.RangeSeek(context.TODO(), offset, whence, -1)
|
||||
}
|
||||
|
||||
// finish sets the final error and tidies up
|
||||
@@ -1043,8 +1025,8 @@ func (c *cipher) DecryptData(rc io.ReadCloser) (io.ReadCloser, error) {
|
||||
// The open function must return a ReadCloser opened to the offset supplied
|
||||
//
|
||||
// You must use this form of DecryptData if you might want to Seek the file handle
|
||||
func (c *cipher) DecryptDataSeek(open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||
out, err := c.newDecrypterSeek(open, offset, limit)
|
||||
func (c *cipher) DecryptDataSeek(ctx context.Context, open OpenRangeSeek, offset, limit int64) (ReadSeekCloser, error) {
|
||||
out, err := c.newDecrypterSeek(ctx, open, offset, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package crypt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base32"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -9,8 +10,8 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt/pkcs7"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -43,69 +44,6 @@ func TestNewNameEncryptionModeString(t *testing.T) {
|
||||
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
|
||||
}
|
||||
|
||||
func TestValidString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expected error
|
||||
}{
|
||||
{"", nil},
|
||||
{"\x01", ErrorBadDecryptControlChar},
|
||||
{"a\x02", ErrorBadDecryptControlChar},
|
||||
{"abc\x03", ErrorBadDecryptControlChar},
|
||||
{"abc\x04def", ErrorBadDecryptControlChar},
|
||||
{"\x05d", ErrorBadDecryptControlChar},
|
||||
{"\x06def", ErrorBadDecryptControlChar},
|
||||
{"\x07", ErrorBadDecryptControlChar},
|
||||
{"\x08", ErrorBadDecryptControlChar},
|
||||
{"\x09", ErrorBadDecryptControlChar},
|
||||
{"\x0A", ErrorBadDecryptControlChar},
|
||||
{"\x0B", ErrorBadDecryptControlChar},
|
||||
{"\x0C", ErrorBadDecryptControlChar},
|
||||
{"\x0D", ErrorBadDecryptControlChar},
|
||||
{"\x0E", ErrorBadDecryptControlChar},
|
||||
{"\x0F", ErrorBadDecryptControlChar},
|
||||
{"\x10", ErrorBadDecryptControlChar},
|
||||
{"\x11", ErrorBadDecryptControlChar},
|
||||
{"\x12", ErrorBadDecryptControlChar},
|
||||
{"\x13", ErrorBadDecryptControlChar},
|
||||
{"\x14", ErrorBadDecryptControlChar},
|
||||
{"\x15", ErrorBadDecryptControlChar},
|
||||
{"\x16", ErrorBadDecryptControlChar},
|
||||
{"\x17", ErrorBadDecryptControlChar},
|
||||
{"\x18", ErrorBadDecryptControlChar},
|
||||
{"\x19", ErrorBadDecryptControlChar},
|
||||
{"\x1A", ErrorBadDecryptControlChar},
|
||||
{"\x1B", ErrorBadDecryptControlChar},
|
||||
{"\x1C", ErrorBadDecryptControlChar},
|
||||
{"\x1D", ErrorBadDecryptControlChar},
|
||||
{"\x1E", ErrorBadDecryptControlChar},
|
||||
{"\x1F", ErrorBadDecryptControlChar},
|
||||
{"\x20", nil},
|
||||
{"\x7E", nil},
|
||||
{"\x7F", ErrorBadDecryptControlChar},
|
||||
{"£100", nil},
|
||||
{`hello? sausage/êé/Hello, 世界/ " ' @ < > & ?/z.txt`, nil},
|
||||
{"£100", nil},
|
||||
// Following tests from https://secure.php.net/manual/en/reference.pcre.pattern.modifiers.php#54805
|
||||
{"a", nil}, // Valid ASCII
|
||||
{"\xc3\xb1", nil}, // Valid 2 Octet Sequence
|
||||
{"\xc3\x28", ErrorBadDecryptUTF8}, // Invalid 2 Octet Sequence
|
||||
{"\xa0\xa1", ErrorBadDecryptUTF8}, // Invalid Sequence Identifier
|
||||
{"\xe2\x82\xa1", nil}, // Valid 3 Octet Sequence
|
||||
{"\xe2\x28\xa1", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 2nd Octet)
|
||||
{"\xe2\x82\x28", ErrorBadDecryptUTF8}, // Invalid 3 Octet Sequence (in 3rd Octet)
|
||||
{"\xf0\x90\x8c\xbc", nil}, // Valid 4 Octet Sequence
|
||||
{"\xf0\x28\x8c\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 2nd Octet)
|
||||
{"\xf0\x90\x28\xbc", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 3rd Octet)
|
||||
{"\xf0\x28\x8c\x28", ErrorBadDecryptUTF8}, // Invalid 4 Octet Sequence (in 4th Octet)
|
||||
{"\xf8\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 5 Octet Sequence (but not Unicode!)
|
||||
{"\xfc\xa1\xa1\xa1\xa1\xa1", ErrorBadDecryptUTF8}, // Valid 6 Octet Sequence (but not Unicode!)
|
||||
} {
|
||||
actual := checkValidString([]byte(test.in))
|
||||
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeFileName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -209,8 +147,6 @@ func TestDecryptSegment(t *testing.T) {
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
{c.encryptSegment("\x01"), ErrorBadDecryptControlChar},
|
||||
{c.encryptSegment("\xc3\x28"), ErrorBadDecryptUTF8},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
@@ -704,16 +640,16 @@ var (
|
||||
|
||||
// Test test infrastructure first!
|
||||
func TestRandomSource(t *testing.T) {
|
||||
source := newRandomSource(1E8)
|
||||
sink := newRandomSource(1E8)
|
||||
source := newRandomSource(1e8)
|
||||
sink := newRandomSource(1e8)
|
||||
n, err := io.Copy(sink, source)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, int64(1E8), n)
|
||||
assert.Equal(t, int64(1e8), n)
|
||||
|
||||
source = newRandomSource(1E8)
|
||||
source = newRandomSource(1e8)
|
||||
buf := make([]byte, 16)
|
||||
_, _ = source.Read(buf)
|
||||
sink = newRandomSource(1E8)
|
||||
sink = newRandomSource(1e8)
|
||||
_, err = io.Copy(sink, source)
|
||||
assert.Error(t, err, "Error in stream")
|
||||
}
|
||||
@@ -753,23 +689,23 @@ func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt1(t *testing.T) {
|
||||
testEncryptDecrypt(t, 1, 1E7)
|
||||
testEncryptDecrypt(t, 1, 1e7)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt32(t *testing.T) {
|
||||
testEncryptDecrypt(t, 32, 1E8)
|
||||
testEncryptDecrypt(t, 32, 1e8)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt4096(t *testing.T) {
|
||||
testEncryptDecrypt(t, 4096, 1E8)
|
||||
testEncryptDecrypt(t, 4096, 1e8)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt65536(t *testing.T) {
|
||||
testEncryptDecrypt(t, 65536, 1E8)
|
||||
testEncryptDecrypt(t, 65536, 1e8)
|
||||
}
|
||||
|
||||
func TestEncryptDecrypt65537(t *testing.T) {
|
||||
testEncryptDecrypt(t, 65537, 1E8)
|
||||
testEncryptDecrypt(t, 65537, 1e8)
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -802,7 +738,7 @@ func TestEncryptData(t *testing.T) {
|
||||
} {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
// Check encode works
|
||||
buf := bytes.NewBuffer(test.in)
|
||||
@@ -825,7 +761,7 @@ func TestEncryptData(t *testing.T) {
|
||||
func TestNewEncrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
z := &zeroes{}
|
||||
|
||||
@@ -852,7 +788,7 @@ func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
fh, err := c.newEncrypter(in, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(32), n)
|
||||
}
|
||||
@@ -884,7 +820,7 @@ func (c *closeDetector) Close() error {
|
||||
func TestNewDecrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1E8) // nodge the crypto rand generator
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
cd := newCloseDetector(bytes.NewBuffer(file0))
|
||||
fh, err := c.newDecrypter(cd)
|
||||
@@ -935,7 +871,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
fh, err := c.newDecrypter(in)
|
||||
assert.NoError(t, err)
|
||||
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1E6)
|
||||
n, err := io.CopyN(ioutil.Discard, fh, 1e6)
|
||||
assert.Equal(t, io.ErrUnexpectedEOF, err)
|
||||
assert.Equal(t, int64(16), n)
|
||||
}
|
||||
@@ -965,7 +901,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
|
||||
// Open stream with a seek of underlyingOffset
|
||||
var reader io.ReadCloser
|
||||
open := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
open := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
end := len(ciphertext)
|
||||
if underlyingLimit >= 0 {
|
||||
end = int(underlyingOffset + underlyingLimit)
|
||||
@@ -1006,7 +942,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
if offset+limit > len(plaintext) {
|
||||
continue
|
||||
}
|
||||
rc, err := c.DecryptDataSeek(open, int64(offset), int64(limit))
|
||||
rc, err := c.DecryptDataSeek(context.Background(), open, int64(offset), int64(limit))
|
||||
assert.NoError(t, err)
|
||||
|
||||
check(rc, offset, limit)
|
||||
@@ -1014,14 +950,14 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
}
|
||||
|
||||
// Try decoding it with a single open and lots of seeks
|
||||
fh, err := c.DecryptDataSeek(open, 0, -1)
|
||||
fh, err := c.DecryptDataSeek(context.Background(), open, 0, -1)
|
||||
assert.NoError(t, err)
|
||||
for _, offset := range trials {
|
||||
for _, limit := range limits {
|
||||
if offset+limit > len(plaintext) {
|
||||
continue
|
||||
}
|
||||
_, err := fh.RangeSeek(int64(offset), io.SeekStart, int64(limit))
|
||||
_, err := fh.RangeSeek(context.Background(), int64(offset), io.SeekStart, int64(limit))
|
||||
assert.NoError(t, err)
|
||||
|
||||
check(fh, offset, limit)
|
||||
@@ -1072,7 +1008,7 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
} {
|
||||
what := fmt.Sprintf("offset = %d, limit = %d", test.offset, test.limit)
|
||||
callCount := 0
|
||||
testOpen := func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
testOpen := func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
switch callCount {
|
||||
case 0:
|
||||
assert.Equal(t, int64(0), underlyingOffset, what)
|
||||
@@ -1084,11 +1020,11 @@ func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
t.Errorf("Too many calls %d for %s", callCount+1, what)
|
||||
}
|
||||
callCount++
|
||||
return open(underlyingOffset, underlyingLimit)
|
||||
return open(ctx, underlyingOffset, underlyingLimit)
|
||||
}
|
||||
fh, err := c.DecryptDataSeek(testOpen, 0, -1)
|
||||
fh, err := c.DecryptDataSeek(context.Background(), testOpen, 0, -1)
|
||||
assert.NoError(t, err)
|
||||
gotOffset, err := fh.RangeSeek(test.offset, io.SeekStart, test.limit)
|
||||
gotOffset, err := fh.RangeSeek(context.Background(), test.offset, io.SeekStart, test.limit)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, gotOffset, test.offset)
|
||||
}
|
||||
|
||||
@@ -2,19 +2,21 @@
|
||||
package crypt
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/accounting"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fspath"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fspath"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
)
|
||||
|
||||
// Globals
|
||||
@@ -34,19 +36,21 @@ func init() {
|
||||
Default: "standard",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "off",
|
||||
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
|
||||
}, {
|
||||
Value: "standard",
|
||||
Help: "Encrypt the filenames see the docs for the details.",
|
||||
}, {
|
||||
Value: "obfuscate",
|
||||
Help: "Very simple filename obfuscation.",
|
||||
}, {
|
||||
Value: "off",
|
||||
Help: "Don't encrypt the file names. Adds a \".bin\" extension only.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "directory_name_encryption",
|
||||
Help: "Option to either encrypt directory names or leave them intact.",
|
||||
Name: "directory_name_encryption",
|
||||
Help: `Option to either encrypt directory names or leave them intact.
|
||||
|
||||
NB If filename_encryption is "off" then this option will do nothing.`,
|
||||
Default: true,
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
@@ -62,6 +66,7 @@ func init() {
|
||||
Name: "password",
|
||||
Help: "Password or pass phrase for encryption.",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password2",
|
||||
Help: "Password or pass phrase for salt. Optional but recommended.\nShould be different to the previous password.",
|
||||
@@ -142,6 +147,10 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse remote %q to wrap", remote)
|
||||
}
|
||||
// Make sure to remove trailing . reffering to the current dir
|
||||
if path.Base(rpath) == "." {
|
||||
rpath = strings.TrimSuffix(rpath, ".")
|
||||
}
|
||||
// Look for a file first
|
||||
remotePath := fspath.JoinRootPath(wPath, cipher.EncryptFileName(rpath))
|
||||
wrappedFs, err := wInfo.NewFs(wName, remotePath, wConfig)
|
||||
@@ -169,23 +178,10 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
|
||||
WriteMimeType: false,
|
||||
BucketBased: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
SetTier: true,
|
||||
GetTier: true,
|
||||
}).Fill(f).Mask(wrappedFs).WrapsFs(f, wrappedFs)
|
||||
|
||||
doChangeNotify := wrappedFs.Features().ChangeNotify
|
||||
if doChangeNotify != nil {
|
||||
f.features.ChangeNotify = func(notifyFunc func(string, fs.EntryType), pollInterval <-chan time.Duration) {
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
decrypted, err := f.DecryptFileName(path)
|
||||
if err != nil {
|
||||
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
|
||||
return
|
||||
}
|
||||
notifyFunc(decrypted, entryType)
|
||||
}
|
||||
doChangeNotify(wrappedNotifyFunc, pollInterval)
|
||||
}
|
||||
}
|
||||
|
||||
return f, err
|
||||
}
|
||||
|
||||
@@ -202,6 +198,7 @@ type Options struct {
|
||||
// Fs represents a wrapped fs.Fs
|
||||
type Fs struct {
|
||||
fs.Fs
|
||||
wrapper fs.Fs
|
||||
name string
|
||||
root string
|
||||
opt Options
|
||||
@@ -244,7 +241,7 @@ func (f *Fs) add(entries *fs.DirEntries, obj fs.Object) {
|
||||
}
|
||||
|
||||
// Encrypt an directory file name to entries.
|
||||
func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
||||
func (f *Fs) addDir(ctx context.Context, entries *fs.DirEntries, dir fs.Directory) {
|
||||
remote := dir.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||
if err != nil {
|
||||
@@ -254,18 +251,18 @@ func (f *Fs) addDir(entries *fs.DirEntries, dir fs.Directory) {
|
||||
if f.opt.ShowMapping {
|
||||
fs.Logf(decryptedRemote, "Encrypts to %q", remote)
|
||||
}
|
||||
*entries = append(*entries, f.newDir(dir))
|
||||
*entries = append(*entries, f.newDir(ctx, dir))
|
||||
}
|
||||
|
||||
// Encrypt some directory entries. This alters entries returning it as newEntries.
|
||||
func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||
func (f *Fs) encryptEntries(ctx context.Context, entries fs.DirEntries) (newEntries fs.DirEntries, err error) {
|
||||
newEntries = entries[:0] // in place filter
|
||||
for _, entry := range entries {
|
||||
switch x := entry.(type) {
|
||||
case fs.Object:
|
||||
f.add(&newEntries, x)
|
||||
case fs.Directory:
|
||||
f.addDir(&newEntries, x)
|
||||
f.addDir(ctx, &newEntries, x)
|
||||
default:
|
||||
return nil, errors.Errorf("Unknown object type %T", entry)
|
||||
}
|
||||
@@ -282,12 +279,12 @@ func (f *Fs) encryptEntries(entries fs.DirEntries) (newEntries fs.DirEntries, er
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.Fs.List(f.cipher.EncryptDirName(dir))
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, err = f.Fs.List(ctx, f.cipher.EncryptDirName(dir))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.encryptEntries(entries)
|
||||
return f.encryptEntries(ctx, entries)
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -306,9 +303,9 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
return f.Fs.Features().ListR(f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
|
||||
newEntries, err := f.encryptEntries(entries)
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
return f.Fs.Features().ListR(ctx, f.cipher.EncryptDirName(dir), func(entries fs.DirEntries) error {
|
||||
newEntries, err := f.encryptEntries(ctx, entries)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -317,18 +314,18 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(f.cipher.EncryptFileName(remote))
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o, err := f.Fs.NewObject(ctx, f.cipher.EncryptFileName(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.newObject(o), nil
|
||||
}
|
||||
|
||||
type putFn func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||
type putFn func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error)
|
||||
|
||||
// put implements Put or PutStream
|
||||
func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put putFn) (fs.Object, error) {
|
||||
// Encrypt the data into wrappedIn
|
||||
wrappedIn, err := f.cipher.EncryptData(in)
|
||||
if err != nil {
|
||||
@@ -354,7 +351,7 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
}
|
||||
|
||||
// Transfer the data
|
||||
o, err := put(wrappedIn, f.newObjectInfo(src), options...)
|
||||
o, err := put(ctx, wrappedIn, f.newObjectInfo(src), options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -363,13 +360,13 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
if ht != hash.None && hasher != nil {
|
||||
srcHash := hasher.Sums()[ht]
|
||||
var dstHash string
|
||||
dstHash, err = o.Hash(ht)
|
||||
dstHash, err = o.Hash(ctx, ht)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read destination hash")
|
||||
}
|
||||
if srcHash != "" && dstHash != "" && srcHash != dstHash {
|
||||
// remove object
|
||||
err = o.Remove()
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to remove corrupted object: %v", err)
|
||||
}
|
||||
@@ -385,13 +382,13 @@ func (f *Fs) put(in io.Reader, src fs.ObjectInfo, options []fs.OpenOption, put p
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.put(in, src, options, f.Fs.Put)
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.put(ctx, in, src, options, f.Fs.Put)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.put(in, src, options, f.Fs.Features().PutStream)
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.put(ctx, in, src, options, f.Fs.Features().PutStream)
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
@@ -402,15 +399,15 @@ func (f *Fs) Hashes() hash.Set {
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
return f.Fs.Mkdir(f.cipher.EncryptDirName(dir))
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return f.Fs.Mkdir(ctx, f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
return f.Fs.Rmdir(f.cipher.EncryptDirName(dir))
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return f.Fs.Rmdir(ctx, f.cipher.EncryptDirName(dir))
|
||||
}
|
||||
|
||||
// Purge all files in the root and the root directory
|
||||
@@ -419,12 +416,12 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
// quicker than just running Remove() on the result of List()
|
||||
//
|
||||
// Return an error if it doesn't exist
|
||||
func (f *Fs) Purge() error {
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
do := f.Fs.Features().Purge
|
||||
if do == nil {
|
||||
return fs.ErrorCantPurge
|
||||
}
|
||||
return do()
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Copy src to this remote using server side copy operations.
|
||||
@@ -436,7 +433,7 @@ func (f *Fs) Purge() error {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.Fs.Features().Copy
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
@@ -445,7 +442,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
|
||||
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -461,7 +458,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
do := f.Fs.Features().Move
|
||||
if do == nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
@@ -470,7 +467,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
if !ok {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
oResult, err := do(o.Object, f.cipher.EncryptFileName(remote))
|
||||
oResult, err := do(ctx, o.Object, f.cipher.EncryptFileName(remote))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -485,7 +482,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
do := f.Fs.Features().DirMove
|
||||
if do == nil {
|
||||
return fs.ErrorCantDirMove
|
||||
@@ -495,14 +492,14 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
return do(srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
|
||||
return do(ctx, srcFs.Fs, f.cipher.EncryptDirName(srcRemote), f.cipher.EncryptDirName(dstRemote))
|
||||
}
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
//
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
do := f.Fs.Features().PutUnchecked
|
||||
if do == nil {
|
||||
return nil, errors.New("can't PutUnchecked")
|
||||
@@ -511,7 +508,7 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o, err := do(wrappedIn, f.newObjectInfo(src))
|
||||
o, err := do(ctx, wrappedIn, f.newObjectInfo(src))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -522,21 +519,21 @@ func (f *Fs) PutUnchecked(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOpt
|
||||
//
|
||||
// Implement this if you have a way of emptying the trash or
|
||||
// otherwise cleaning up old versions of files.
|
||||
func (f *Fs) CleanUp() error {
|
||||
func (f *Fs) CleanUp(ctx context.Context) error {
|
||||
do := f.Fs.Features().CleanUp
|
||||
if do == nil {
|
||||
return errors.New("can't CleanUp")
|
||||
}
|
||||
return do()
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
do := f.Fs.Features().About
|
||||
if do == nil {
|
||||
return nil, errors.New("About not supported")
|
||||
}
|
||||
return do()
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// UnWrap returns the Fs that this Fs is wrapping
|
||||
@@ -544,6 +541,16 @@ func (f *Fs) UnWrap() fs.Fs {
|
||||
return f.Fs
|
||||
}
|
||||
|
||||
// WrapFs returns the Fs that is wrapping this Fs
|
||||
func (f *Fs) WrapFs() fs.Fs {
|
||||
return f.wrapper
|
||||
}
|
||||
|
||||
// SetWrapper sets the Fs that is wrapping this Fs
|
||||
func (f *Fs) SetWrapper(wrapper fs.Fs) {
|
||||
f.wrapper = wrapper
|
||||
}
|
||||
|
||||
// EncryptFileName returns an encrypted file name
|
||||
func (f *Fs) EncryptFileName(fileName string) string {
|
||||
return f.cipher.EncryptFileName(fileName)
|
||||
@@ -558,10 +565,10 @@ func (f *Fs) DecryptFileName(encryptedFileName string) (string, error) {
|
||||
// src with it, and calculates the hash given by HashType on the fly
|
||||
//
|
||||
// Note that we break lots of encapsulation in this function.
|
||||
func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
func (f *Fs) ComputeHash(ctx context.Context, o *Object, src fs.Object, hashType hash.Type) (hashStr string, err error) {
|
||||
// Read the nonce - opening the file is sufficient to read the nonce in
|
||||
// use a limited read so we only read the header
|
||||
in, err := o.Object.Open(&fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||
in, err := o.Object.Open(ctx, &fs.RangeOption{Start: 0, End: int64(fileHeaderSize) - 1})
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open object to read nonce")
|
||||
}
|
||||
@@ -591,7 +598,7 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr
|
||||
}
|
||||
|
||||
// Open the src for input
|
||||
in, err = src.Open()
|
||||
in, err = src.Open(ctx)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to open src")
|
||||
}
|
||||
@@ -616,6 +623,75 @@ func (f *Fs) ComputeHash(o *Object, src fs.Object, hashType hash.Type) (hashStr
|
||||
return m.Sums()[hashType], nil
|
||||
}
|
||||
|
||||
// MergeDirs merges the contents of all the directories passed
|
||||
// in into the first one and rmdirs the other directories.
|
||||
func (f *Fs) MergeDirs(ctx context.Context, dirs []fs.Directory) error {
|
||||
do := f.Fs.Features().MergeDirs
|
||||
if do == nil {
|
||||
return errors.New("MergeDirs not supported")
|
||||
}
|
||||
out := make([]fs.Directory, len(dirs))
|
||||
for i, dir := range dirs {
|
||||
out[i] = fs.NewDirCopy(ctx, dir).SetRemote(f.cipher.EncryptDirName(dir.Remote()))
|
||||
}
|
||||
return do(ctx, out)
|
||||
}
|
||||
|
||||
// DirCacheFlush resets the directory cache - used in testing
|
||||
// as an optional interface
|
||||
func (f *Fs) DirCacheFlush() {
|
||||
do := f.Fs.Features().DirCacheFlush
|
||||
if do != nil {
|
||||
do()
|
||||
}
|
||||
}
|
||||
|
||||
// PublicLink generates a public link to the remote path (usually readable by anyone)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
do := f.Fs.Features().PublicLink
|
||||
if do == nil {
|
||||
return "", errors.New("PublicLink not supported")
|
||||
}
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
// assume it is a directory
|
||||
return do(ctx, f.cipher.EncryptDirName(remote))
|
||||
}
|
||||
return do(ctx, o.(*Object).Object.Remote())
|
||||
}
|
||||
|
||||
// ChangeNotify calls the passed function with a path
|
||||
// that has had changes. If the implementation
|
||||
// uses polling, it should adhere to the given interval.
|
||||
func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryType), pollIntervalChan <-chan time.Duration) {
|
||||
do := f.Fs.Features().ChangeNotify
|
||||
if do == nil {
|
||||
return
|
||||
}
|
||||
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
|
||||
// fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
|
||||
var (
|
||||
err error
|
||||
decrypted string
|
||||
)
|
||||
switch entryType {
|
||||
case fs.EntryDirectory:
|
||||
decrypted, err = f.cipher.DecryptDirName(path)
|
||||
case fs.EntryObject:
|
||||
decrypted, err = f.cipher.DecryptFileName(path)
|
||||
default:
|
||||
fs.Errorf(path, "crypt ChangeNotify: ignoring unknown EntryType %d", entryType)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
fs.Logf(f, "ChangeNotify was unable to decrypt %q: %s", path, err)
|
||||
return
|
||||
}
|
||||
notifyFunc(decrypted, entryType)
|
||||
}
|
||||
do(ctx, wrappedNotifyFunc, pollIntervalChan)
|
||||
}
|
||||
|
||||
// Object describes a wrapped for being read from the Fs
|
||||
//
|
||||
// This decrypts the remote name and decrypts the data
|
||||
@@ -666,7 +742,7 @@ func (o *Object) Size() int64 {
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ht hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, ht hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
@@ -676,7 +752,7 @@ func (o *Object) UnWrap() fs.Object {
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
var openOptions []fs.OpenOption
|
||||
var offset, limit int64 = 0, -1
|
||||
for _, option := range options {
|
||||
@@ -690,10 +766,10 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
openOptions = append(openOptions, option)
|
||||
}
|
||||
}
|
||||
rc, err = o.f.cipher.DecryptDataSeek(func(underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
rc, err = o.f.cipher.DecryptDataSeek(ctx, func(ctx context.Context, underlyingOffset, underlyingLimit int64) (io.ReadCloser, error) {
|
||||
if underlyingOffset == 0 && underlyingLimit < 0 {
|
||||
// Open with no seek
|
||||
return o.Object.Open(openOptions...)
|
||||
return o.Object.Open(ctx, openOptions...)
|
||||
}
|
||||
// Open stream with a range of underlyingOffset, underlyingLimit
|
||||
end := int64(-1)
|
||||
@@ -704,7 +780,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
}
|
||||
}
|
||||
newOpenOptions := append(openOptions, &fs.RangeOption{Start: underlyingOffset, End: end})
|
||||
return o.Object.Open(newOpenOptions...)
|
||||
return o.Object.Open(ctx, newOpenOptions...)
|
||||
}, offset, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -713,17 +789,17 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
update := func(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return o.Object, o.Object.Update(in, src, options...)
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
update := func(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return o.Object, o.Object.Update(ctx, in, src, options...)
|
||||
}
|
||||
_, err := o.f.put(in, src, options, update)
|
||||
_, err := o.f.put(ctx, in, src, options, update)
|
||||
return err
|
||||
}
|
||||
|
||||
// newDir returns a dir with the Name decrypted
|
||||
func (f *Fs) newDir(dir fs.Directory) fs.Directory {
|
||||
newDir := fs.NewDirCopy(dir)
|
||||
func (f *Fs) newDir(ctx context.Context, dir fs.Directory) fs.Directory {
|
||||
newDir := fs.NewDirCopy(ctx, dir)
|
||||
remote := dir.Remote()
|
||||
decryptedRemote, err := f.cipher.DecryptDirName(remote)
|
||||
if err != nil {
|
||||
@@ -734,6 +810,24 @@ func (f *Fs) newDir(dir fs.Directory) fs.Directory {
|
||||
return newDir
|
||||
}
|
||||
|
||||
// UserInfo returns info about the connected user
|
||||
func (f *Fs) UserInfo(ctx context.Context) (map[string]string, error) {
|
||||
do := f.Fs.Features().UserInfo
|
||||
if do == nil {
|
||||
return nil, fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// Disconnect the current user
|
||||
func (f *Fs) Disconnect(ctx context.Context) error {
|
||||
do := f.Fs.Features().Disconnect
|
||||
if do == nil {
|
||||
return fs.ErrorNotImplemented
|
||||
}
|
||||
return do(ctx)
|
||||
}
|
||||
|
||||
// ObjectInfo describes a wrapped fs.ObjectInfo for being the source
|
||||
//
|
||||
// This encrypts the remote name and adjusts the size
|
||||
@@ -770,10 +864,38 @@ func (o *ObjectInfo) Size() int64 {
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *ObjectInfo) Hash(hash hash.Type) (string, error) {
|
||||
func (o *ObjectInfo) Hash(ctx context.Context, hash hash.Type) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
do, ok := o.Object.(fs.IDer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.ID()
|
||||
}
|
||||
|
||||
// SetTier performs changing storage tier of the Object if
|
||||
// multiple storage classes supported
|
||||
func (o *Object) SetTier(tier string) error {
|
||||
do, ok := o.Object.(fs.SetTierer)
|
||||
if !ok {
|
||||
return errors.New("crypt: underlying remote does not support SetTier")
|
||||
}
|
||||
return do.SetTier(tier)
|
||||
}
|
||||
|
||||
// GetTier returns storage tier or class of the Object
|
||||
func (o *Object) GetTier() string {
|
||||
do, ok := o.Object.(fs.GetTierer)
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return do.GetTier()
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
@@ -787,7 +909,17 @@ var (
|
||||
_ fs.UnWrapper = (*Fs)(nil)
|
||||
_ fs.ListRer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Wrapper = (*Fs)(nil)
|
||||
_ fs.MergeDirser = (*Fs)(nil)
|
||||
_ fs.DirCacheFlusher = (*Fs)(nil)
|
||||
_ fs.ChangeNotifier = (*Fs)(nil)
|
||||
_ fs.PublicLinker = (*Fs)(nil)
|
||||
_ fs.UserInfoer = (*Fs)(nil)
|
||||
_ fs.Disconnecter = (*Fs)(nil)
|
||||
_ fs.ObjectInfo = (*ObjectInfo)(nil)
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.ObjectUnWrapper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
_ fs.SetTierer = (*Object)(nil)
|
||||
_ fs.GetTierer = (*Object)(nil)
|
||||
)
|
||||
|
||||
@@ -6,13 +6,13 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/crypt"
|
||||
_ "github.com/ncw/rclone/backend/drive" // for integration tests
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
_ "github.com/ncw/rclone/backend/swift" // for integration tests
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/crypt"
|
||||
_ "github.com/rclone/rclone/backend/drive" // for integration tests
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
_ "github.com/rclone/rclone/backend/swift" // for integration tests
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
@@ -21,8 +21,10 @@ func TestIntegration(t *testing.T) {
|
||||
t.Skip("Skipping as -remote not set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -42,6 +44,8 @@ func TestStandard(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -61,6 +65,8 @@ func TestOff(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "off"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -80,6 +86,8 @@ func TestObfuscate(t *testing.T) {
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato2")},
|
||||
{Name: name, Key: "filename_encryption", Value: "obfuscate"},
|
||||
},
|
||||
SkipBadWindowsCharacters: true,
|
||||
SkipBadWindowsCharacters: true,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,8 @@
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -12,11 +11,11 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
_ "github.com/ncw/rclone/backend/local"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/operations"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/pkg/errors"
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/api/drive/v3"
|
||||
@@ -197,7 +196,7 @@ func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.CopyFile(f, testFilesFs, "example2.doc", "example2.doc")
|
||||
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.doc", "example2.doc")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -211,7 +210,7 @@ func (f *Fs) InternalTestDocumentUpdate(t *testing.T) {
|
||||
_, f.importMimeTypes, err = parseExtensions("odt,ods,doc")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.CopyFile(f, testFilesFs, "example2.xlsx", "example1.ods")
|
||||
err = operations.CopyFile(context.Background(), f, testFilesFs, "example2.xlsx", "example1.ods")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -222,10 +221,10 @@ func (f *Fs) InternalTestDocumentExport(t *testing.T) {
|
||||
f.exportExtensions, _, err = parseExtensions("txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := f.NewObject("example2.txt")
|
||||
obj, err := f.NewObject(context.Background(), "example2.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open()
|
||||
rc, err := obj.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
@@ -248,10 +247,10 @@ func (f *Fs) InternalTestDocumentLink(t *testing.T) {
|
||||
f.exportExtensions, _, err = parseExtensions("link.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
obj, err := f.NewObject("example2.link.html")
|
||||
obj, err := f.NewObject(context.Background(), "example2.link.html")
|
||||
require.NoError(t, err)
|
||||
|
||||
rc, err := obj.Open()
|
||||
rc, err := obj.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
defer func() { require.NoError(t, rc.Close()) }()
|
||||
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
// Test Drive filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package drive
|
||||
@@ -8,23 +8,21 @@
|
||||
//
|
||||
// This contains code adapted from google.golang.org/api (C) the GO AUTHORS
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package drive
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
@@ -52,15 +50,13 @@ type resumableUpload struct {
|
||||
}
|
||||
|
||||
// Upload the io.Reader in of size bytes with contentType and info
|
||||
func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
||||
func (f *Fs) Upload(ctx context.Context, in io.Reader, size int64, contentType, fileID, remote string, info *drive.File) (*drive.File, error) {
|
||||
params := url.Values{
|
||||
"alt": {"json"},
|
||||
"uploadType": {"resumable"},
|
||||
"fields": {partialFields},
|
||||
}
|
||||
if f.isTeamDrive {
|
||||
params.Set("supportsTeamDrives", "true")
|
||||
}
|
||||
params.Set("supportsAllDrives", "true")
|
||||
if f.opt.KeepRevisionForever {
|
||||
params.Set("keepRevisionForever", "true")
|
||||
}
|
||||
@@ -85,18 +81,21 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
googleapi.Expand(req.URL, map[string]string{
|
||||
"fileId": fileID,
|
||||
})
|
||||
req.Header.Set("Content-Type", "application/json; charset=UTF-8")
|
||||
req.Header.Set("X-Upload-Content-Type", contentType)
|
||||
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
|
||||
if size >= 0 {
|
||||
req.Header.Set("X-Upload-Content-Length", fmt.Sprintf("%v", size))
|
||||
}
|
||||
res, err = f.client.Do(req)
|
||||
if err == nil {
|
||||
defer googleapi.CloseBody(res)
|
||||
err = googleapi.CheckResponse(res)
|
||||
}
|
||||
return shouldRetry(err)
|
||||
return f.shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -110,60 +109,31 @@ func (f *Fs) Upload(in io.Reader, size int64, contentType, fileID, remote string
|
||||
MediaType: contentType,
|
||||
ContentLength: size,
|
||||
}
|
||||
return rx.Upload()
|
||||
return rx.Upload(ctx)
|
||||
}
|
||||
|
||||
// Make an http.Request for the range passed in
|
||||
func (rx *resumableUpload) makeRequest(start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||
func (rx *resumableUpload) makeRequest(ctx context.Context, start int64, body io.ReadSeeker, reqSize int64) *http.Request {
|
||||
req, _ := http.NewRequest("POST", rx.URI, body)
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
req.ContentLength = reqSize
|
||||
totalSize := "*"
|
||||
if rx.ContentLength >= 0 {
|
||||
totalSize = strconv.FormatInt(rx.ContentLength, 10)
|
||||
}
|
||||
if reqSize != 0 {
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, totalSize))
|
||||
} else {
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", totalSize))
|
||||
}
|
||||
req.Header.Set("Content-Type", rx.MediaType)
|
||||
return req
|
||||
}
|
||||
|
||||
// rangeRE matches the transfer status response from the server. $1 is
|
||||
// the last byte index uploaded.
|
||||
var rangeRE = regexp.MustCompile(`^0\-(\d+)$`)
|
||||
|
||||
// Query drive for the amount transferred so far
|
||||
//
|
||||
// If error is nil, then start should be valid
|
||||
func (rx *resumableUpload) transferStatus() (start int64, err error) {
|
||||
req := rx.makeRequest(0, nil, 0)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer googleapi.CloseBody(res)
|
||||
if res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
|
||||
return rx.ContentLength, nil
|
||||
}
|
||||
if res.StatusCode != statusResumeIncomplete {
|
||||
err = googleapi.CheckResponse(res)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return 0, errors.Errorf("unexpected http return code %v", res.StatusCode)
|
||||
}
|
||||
Range := res.Header.Get("Range")
|
||||
if m := rangeRE.FindStringSubmatch(Range); len(m) == 2 {
|
||||
start, err = strconv.ParseInt(m[1], 10, 64)
|
||||
if err == nil {
|
||||
return start, nil
|
||||
}
|
||||
}
|
||||
return 0, errors.Errorf("unable to parse range %q", Range)
|
||||
}
|
||||
|
||||
// Transfer a chunk - caller must call googleapi.CloseBody(res) if err == nil || res != nil
|
||||
func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
||||
func (rx *resumableUpload) transferChunk(ctx context.Context, start int64, chunk io.ReadSeeker, chunkSize int64) (int, error) {
|
||||
_, _ = chunk.Seek(0, io.SeekStart)
|
||||
req := rx.makeRequest(start, chunk, chunkSize)
|
||||
req := rx.makeRequest(ctx, start, chunk, chunkSize)
|
||||
res, err := rx.f.client.Do(req)
|
||||
if err != nil {
|
||||
return 599, err
|
||||
@@ -196,23 +166,45 @@ func (rx *resumableUpload) transferChunk(start int64, chunk io.ReadSeeker, chunk
|
||||
|
||||
// Upload uploads the chunks from the input
|
||||
// It retries each chunk using the pacer and --low-level-retries
|
||||
func (rx *resumableUpload) Upload() (*drive.File, error) {
|
||||
func (rx *resumableUpload) Upload(ctx context.Context) (*drive.File, error) {
|
||||
start := int64(0)
|
||||
var StatusCode int
|
||||
var err error
|
||||
buf := make([]byte, int(rx.f.opt.ChunkSize))
|
||||
for start < rx.ContentLength {
|
||||
reqSize := rx.ContentLength - start
|
||||
if reqSize >= int64(rx.f.opt.ChunkSize) {
|
||||
reqSize = int64(rx.f.opt.ChunkSize)
|
||||
for finished := false; !finished; {
|
||||
var reqSize int64
|
||||
var chunk io.ReadSeeker
|
||||
if rx.ContentLength >= 0 {
|
||||
// If size known use repeatable reader for smoother bwlimit
|
||||
if start >= rx.ContentLength {
|
||||
break
|
||||
}
|
||||
reqSize = rx.ContentLength - start
|
||||
if reqSize >= int64(rx.f.opt.ChunkSize) {
|
||||
reqSize = int64(rx.f.opt.ChunkSize)
|
||||
}
|
||||
chunk = readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||
} else {
|
||||
// If size unknown read into buffer
|
||||
var n int
|
||||
n, err = readers.ReadFill(rx.Media, buf)
|
||||
if err == io.EOF {
|
||||
// Send the last chunk with the correct ContentLength
|
||||
// otherwise Google doesn't know we've finished
|
||||
rx.ContentLength = start + int64(n)
|
||||
finished = true
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reqSize = int64(n)
|
||||
chunk = bytes.NewReader(buf[:reqSize])
|
||||
}
|
||||
chunk := readers.NewRepeatableLimitReaderBuffer(rx.Media, buf, reqSize)
|
||||
|
||||
// Transfer the chunk
|
||||
err = rx.f.pacer.Call(func() (bool, error) {
|
||||
fs.Debugf(rx.remote, "Sending chunk %d length %d", start, reqSize)
|
||||
StatusCode, err = rx.transferChunk(start, chunk, reqSize)
|
||||
again, err := shouldRetry(err)
|
||||
StatusCode, err = rx.transferChunk(ctx, start, chunk, reqSize)
|
||||
again, err := rx.f.shouldRetry(err)
|
||||
if StatusCode == statusResumeIncomplete || StatusCode == http.StatusCreated || StatusCode == http.StatusOK {
|
||||
again = false
|
||||
err = nil
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/dropbox/dbhash"
|
||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
|
||||
@@ -22,6 +22,7 @@ of path_display and all will be well.
|
||||
*/
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
@@ -37,17 +38,19 @@ import (
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/sharing"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/team"
|
||||
"github.com/dropbox/dropbox-sdk-go-unofficial/dropbox/users"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/dropbox/dbhash"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
@@ -101,10 +104,14 @@ var (
|
||||
// A regexp matching path names for files Dropbox ignores
|
||||
// See https://www.dropbox.com/en/help/145 - Ignored files
|
||||
ignoredFiles = regexp.MustCompile(`(?i)(^|/)(desktop\.ini|thumbs\.db|\.ds_store|icon\r|\.dropbox|\.dropbox.attr)$`)
|
||||
|
||||
// DbHashType is the hash.Type for Dropbox
|
||||
DbHashType hash.Type
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
func init() {
|
||||
DbHashType = hash.RegisterHash("DropboxHash", 64, dbhash.New)
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "dropbox",
|
||||
Description: "Dropbox",
|
||||
@@ -138,14 +145,28 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
|
||||
Help: "Impersonate this user when using a business account.",
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// https://www.dropbox.com/help/syncing-uploads/files-not-syncing lists / and \
|
||||
// as invalid characters.
|
||||
// Testing revealed names with trailing spaces and the DEL character don't work.
|
||||
// Also encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeDel |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
ChunkSize fs.SizeSuffix `config:"chunk_size"`
|
||||
Impersonate string `config:"impersonate"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote dropbox server
|
||||
@@ -371,14 +392,15 @@ func (f *Fs) setRoot(root string) {
|
||||
// getMetadata gets the metadata for a file or directory
|
||||
func (f *Fs) getMetadata(objPath string) (entry files.IsMetadata, notFound bool, err error) {
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{Path: objPath})
|
||||
entry, err = f.srv.GetMetadata(&files.GetMetadataArg{
|
||||
Path: f.opt.Enc.FromStandardPath(objPath),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case files.GetMetadataAPIError:
|
||||
switch e.EndpointError.Path.Tag {
|
||||
case files.LookupErrorNotFound:
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
||||
notFound = true
|
||||
err = nil
|
||||
}
|
||||
@@ -441,7 +463,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
}
|
||||
|
||||
@@ -454,7 +476,7 @@ func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
root := f.slashRoot
|
||||
if dir != "" {
|
||||
root += "/" + dir
|
||||
@@ -465,7 +487,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
for {
|
||||
if !started {
|
||||
arg := files.ListFolderArg{
|
||||
Path: root,
|
||||
Path: f.opt.Enc.FromStandardPath(root),
|
||||
Recursive: false,
|
||||
}
|
||||
if root == "/" {
|
||||
@@ -478,8 +500,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case files.ListFolderAPIError:
|
||||
switch e.EndpointError.Path.Tag {
|
||||
case files.LookupErrorNotFound:
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorNotFound {
|
||||
err = fs.ErrorDirNotFound
|
||||
}
|
||||
}
|
||||
@@ -516,7 +537,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
|
||||
// Only the last element is reliably cased in PathDisplay
|
||||
entryPath := metadata.PathDisplay
|
||||
leaf := path.Base(entryPath)
|
||||
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
|
||||
remote := path.Join(dir, leaf)
|
||||
if folderInfo != nil {
|
||||
d := fs.NewDir(remote, time.Now())
|
||||
@@ -541,22 +562,22 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return o, o.Update(in, src, options...)
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the container if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
root := path.Join(f.slashRoot, dir)
|
||||
|
||||
// can't create or run metadata on root
|
||||
@@ -574,7 +595,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
|
||||
// create it
|
||||
arg2 := files.CreateFolderArg{
|
||||
Path: root,
|
||||
Path: f.opt.Enc.FromStandardPath(root),
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.CreateFolderV2(&arg2)
|
||||
@@ -586,7 +607,7 @@ func (f *Fs) Mkdir(dir string) error {
|
||||
// Rmdir deletes the container
|
||||
//
|
||||
// Returns an error if it isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
root := path.Join(f.slashRoot, dir)
|
||||
|
||||
// can't remove root
|
||||
@@ -600,6 +621,7 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
return errors.Wrap(err, "Rmdir")
|
||||
}
|
||||
|
||||
root = f.opt.Enc.FromStandardPath(root)
|
||||
// check directory empty
|
||||
arg := files.ListFolderArg{
|
||||
Path: root,
|
||||
@@ -642,7 +664,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
@@ -656,9 +678,12 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Copy
|
||||
arg := files.RelocationArg{}
|
||||
arg.FromPath = srcObj.remotePath()
|
||||
arg.ToPath = dstObj.remotePath()
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()),
|
||||
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
var err error
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -687,10 +712,12 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
// Optional interface: Only implement this if you have a way of
|
||||
// deleting all the files quicker than just running Remove() on the
|
||||
// result of List()
|
||||
func (f *Fs) Purge() (err error) {
|
||||
func (f *Fs) Purge(ctx context.Context) (err error) {
|
||||
// Let dropbox delete the filesystem tree
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{Path: f.slashRoot})
|
||||
_, err = f.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: f.opt.Enc.FromStandardPath(f.slashRoot),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
@@ -705,7 +732,7 @@ func (f *Fs) Purge() (err error) {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
@@ -719,9 +746,12 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// Do the move
|
||||
arg := files.RelocationArg{}
|
||||
arg.FromPath = srcObj.remotePath()
|
||||
arg.ToPath = dstObj.remotePath()
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: f.opt.Enc.FromStandardPath(srcObj.remotePath()),
|
||||
ToPath: f.opt.Enc.FromStandardPath(dstObj.remotePath()),
|
||||
},
|
||||
}
|
||||
var err error
|
||||
var result *files.RelocationResult
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
@@ -745,8 +775,8 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
}
|
||||
|
||||
// PublicLink adds a "readable by anyone with link" permission on the given file or folder.
|
||||
func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
absPath := "/" + path.Join(f.Root(), remote)
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (link string, err error) {
|
||||
absPath := f.opt.Enc.FromStandardPath(path.Join(f.slashRoot, remote))
|
||||
fs.Debugf(f, "attempting to share '%s' (absolute path: %s)", remote, absPath)
|
||||
createArg := sharing.CreateSharedLinkWithSettingsArg{
|
||||
Path: absPath,
|
||||
@@ -757,7 +787,8 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
return shouldRetry(err)
|
||||
})
|
||||
|
||||
if err != nil && strings.Contains(err.Error(), sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
if err != nil && strings.Contains(err.Error(),
|
||||
sharing.CreateSharedLinkWithSettingsErrorSharedLinkAlreadyExists) {
|
||||
fs.Debugf(absPath, "has a public link already, attempting to retrieve it")
|
||||
listArg := sharing.ListSharedLinksArg{
|
||||
Path: absPath,
|
||||
@@ -798,7 +829,7 @@ func (f *Fs) PublicLink(remote string) (link string, err error) {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
@@ -819,9 +850,12 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
// ...apparently not necessary
|
||||
|
||||
// Do the move
|
||||
arg := files.RelocationArg{}
|
||||
arg.FromPath = srcPath
|
||||
arg.ToPath = dstPath
|
||||
arg := files.RelocationArg{
|
||||
RelocationPath: files.RelocationPath{
|
||||
FromPath: f.opt.Enc.FromStandardPath(srcPath),
|
||||
ToPath: f.opt.Enc.FromStandardPath(dstPath),
|
||||
},
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.srv.MoveV2(&arg)
|
||||
return shouldRetry(err)
|
||||
@@ -834,7 +868,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
// About gets quota information
|
||||
func (f *Fs) About() (usage *fs.Usage, err error) {
|
||||
func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
var q *users.SpaceUsage
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
q, err = f.users.GetSpaceUsage()
|
||||
@@ -862,7 +896,7 @@ func (f *Fs) About() (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.Dropbox)
|
||||
return hash.Set(DbHashType)
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
@@ -886,8 +920,8 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the dropbox special hash
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
if t != hash.Dropbox {
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != DbHashType {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
err := o.readMetaData()
|
||||
@@ -948,7 +982,7 @@ func (o *Object) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData()
|
||||
if err != nil {
|
||||
fs.Debugf(o, "Failed to read metadata: %v", err)
|
||||
@@ -960,7 +994,7 @@ func (o *Object) ModTime() time.Time {
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
//
|
||||
// Commits the datastore
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
// Dropbox doesn't have a way of doing this so returning this
|
||||
// error will cause the file to be deleted first then
|
||||
// re-uploaded to set the time.
|
||||
@@ -973,9 +1007,13 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
headers := fs.OpenOptionHeaders(options)
|
||||
arg := files.DownloadArg{Path: o.remotePath(), ExtraHeaders: headers}
|
||||
arg := files.DownloadArg{
|
||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||
ExtraHeaders: headers,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, in, err = o.fs.srv.Download(&arg)
|
||||
return shouldRetry(err)
|
||||
@@ -984,7 +1022,7 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
switch e := err.(type) {
|
||||
case files.DownloadAPIError:
|
||||
// Don't attempt to retry copyright violation errors
|
||||
if e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.LookupErrorRestrictedContent {
|
||||
return nil, fserrors.NoRetryError(err)
|
||||
}
|
||||
}
|
||||
@@ -1085,6 +1123,13 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
return false, nil
|
||||
}
|
||||
entry, err = o.fs.srv.UploadSessionFinish(args, chunk)
|
||||
// If error is insufficient space then don't retry
|
||||
if e, ok := err.(files.UploadSessionFinishAPIError); ok {
|
||||
if e.EndpointError != nil && e.EndpointError.Path != nil && e.EndpointError.Path.Tag == files.WriteErrorInsufficientSpace {
|
||||
err = fserrors.NoRetryError(err)
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
// after the first chunk is uploaded, we retry everything
|
||||
return err != nil, err
|
||||
})
|
||||
@@ -1099,16 +1144,15 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
remote := o.remotePath()
|
||||
if ignoredFiles.MatchString(remote) {
|
||||
fs.Logf(o, "File name disallowed - not uploading")
|
||||
return nil
|
||||
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
|
||||
}
|
||||
commitInfo := files.NewCommitInfo(o.remotePath())
|
||||
commitInfo := files.NewCommitInfo(o.fs.opt.Enc.FromStandardPath(o.remotePath()))
|
||||
commitInfo.Mode.Tag = "overwrite"
|
||||
// The Dropbox API only accepts timestamps in UTC with second precision.
|
||||
commitInfo.ClientModified = src.ModTime().UTC().Round(time.Second)
|
||||
commitInfo.ClientModified = src.ModTime(ctx).UTC().Round(time.Second)
|
||||
|
||||
size := src.Size()
|
||||
var err error
|
||||
@@ -1128,9 +1172,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() (err error) {
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{Path: o.remotePath()})
|
||||
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
|
||||
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
|
||||
})
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
|
||||
@@ -4,8 +4,8 @@ package dropbox
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
396
backend/fichier/api.go
Normal file
396
backend/fichier/api.go
Normal file
@@ -0,0 +1,396 @@
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// retryErrorCodes is a slice of error codes that we will retry
|
||||
var retryErrorCodes = []int{
|
||||
429, // Too Many Requests.
|
||||
500, // Internal Server Error
|
||||
502, // Bad Gateway
|
||||
503, // Service Unavailable
|
||||
504, // Gateway Timeout
|
||||
509, // Bandwidth Limit Exceeded
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this resp and err
|
||||
// deserve to be retried. It returns the err as a convenience
|
||||
func shouldRetry(resp *http.Response, err error) (bool, error) {
|
||||
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
|
||||
}
|
||||
|
||||
var isAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString
|
||||
|
||||
func (f *Fs) getDownloadToken(ctx context.Context, url string) (*GetTokenResponse, error) {
|
||||
request := DownloadRequest{
|
||||
URL: url,
|
||||
Single: 1,
|
||||
}
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/download/get_token.cgi",
|
||||
}
|
||||
|
||||
var token GetTokenResponse
|
||||
err := f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, &token)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
|
||||
return &token, nil
|
||||
}
|
||||
|
||||
func fileFromSharedFile(file *SharedFile) File {
|
||||
return File{
|
||||
URL: file.Link,
|
||||
Filename: file.Filename,
|
||||
Size: file.Size,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Fs) listSharedFiles(ctx context.Context, id string) (entries fs.DirEntries, err error) {
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: "https://1fichier.com/dir/",
|
||||
Path: id,
|
||||
Parameters: map[string][]string{"json": {"1"}},
|
||||
}
|
||||
|
||||
var sharedFiles SharedFolderResponse
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, &sharedFiles)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
|
||||
entries = make([]fs.DirEntry, len(sharedFiles))
|
||||
|
||||
for i, sharedFile := range sharedFiles {
|
||||
entries[i] = f.newObjectFromFile(ctx, "", fileFromSharedFile(&sharedFile))
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listFiles(ctx context.Context, directoryID int) (filesList *FilesList, err error) {
|
||||
// fs.Debugf(f, "Requesting files for dir `%s`", directoryID)
|
||||
request := ListFilesRequest{
|
||||
FolderID: directoryID,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/ls.cgi",
|
||||
}
|
||||
|
||||
filesList = &FilesList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, filesList)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list files")
|
||||
}
|
||||
for i := range filesList.Items {
|
||||
item := &filesList.Items[i]
|
||||
item.Filename = f.opt.Enc.ToStandardName(item.Filename)
|
||||
}
|
||||
|
||||
return filesList, nil
|
||||
}
|
||||
|
||||
func (f *Fs) listFolders(ctx context.Context, directoryID int) (foldersList *FoldersList, err error) {
|
||||
// fs.Debugf(f, "Requesting folders for id `%s`", directoryID)
|
||||
|
||||
request := ListFolderRequest{
|
||||
FolderID: directoryID,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/ls.cgi",
|
||||
}
|
||||
|
||||
foldersList = &FoldersList{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, foldersList)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't list folders")
|
||||
}
|
||||
foldersList.Name = f.opt.Enc.ToStandardName(foldersList.Name)
|
||||
for i := range foldersList.SubFolders {
|
||||
folder := &foldersList.SubFolders[i]
|
||||
folder.Name = f.opt.Enc.ToStandardName(folder.Name)
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got FoldersList for id `%s`", directoryID)
|
||||
|
||||
return foldersList, err
|
||||
}
|
||||
|
||||
func (f *Fs) listDir(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
files, err := f.listFiles(ctx, folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folders, err := f.listFolders(ctx, folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
entries = make([]fs.DirEntry, len(files.Items)+len(folders.SubFolders))
|
||||
|
||||
for i, item := range files.Items {
|
||||
entries[i] = f.newObjectFromFile(ctx, dir, item)
|
||||
}
|
||||
|
||||
for i, folder := range folders.SubFolders {
|
||||
createDate, err := time.Parse("2006-01-02 15:04:05", folder.CreateDate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fullPath := getRemote(dir, folder.Name)
|
||||
folderID := strconv.Itoa(folder.ID)
|
||||
|
||||
entries[len(files.Items)+i] = fs.NewDir(fullPath, createDate).SetID(folderID)
|
||||
|
||||
// fs.Debugf(f, "Put Path `%s` for id `%d` into dircache", fullPath, folder.ID)
|
||||
f.dirCache.Put(fullPath, folderID)
|
||||
}
|
||||
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
func (f *Fs) newObjectFromFile(ctx context.Context, dir string, item File) *Object {
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: getRemote(dir, item.Filename),
|
||||
file: item,
|
||||
}
|
||||
}
|
||||
|
||||
func getRemote(dir, fileName string) string {
|
||||
if dir == "" {
|
||||
return fileName
|
||||
}
|
||||
|
||||
return dir + "/" + fileName
|
||||
}
|
||||
|
||||
func (f *Fs) makeFolder(ctx context.Context, leaf string, folderID int) (response *MakeFolderResponse, err error) {
|
||||
name := f.opt.Enc.FromStandardName(leaf)
|
||||
// fs.Debugf(f, "Creating folder `%s` in id `%s`", name, directoryID)
|
||||
|
||||
request := MakeFolderRequest{
|
||||
FolderID: folderID,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/mkdir.cgi",
|
||||
}
|
||||
|
||||
response = &MakeFolderResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, &request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't create folder")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Created Folder `%s` in id `%s`", name, directoryID)
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) removeFolder(ctx context.Context, name string, folderID int) (response *GenericOKResponse, err error) {
|
||||
// fs.Debugf(f, "Removing folder with id `%s`", directoryID)
|
||||
|
||||
request := &RemoveFolderRequest{
|
||||
FolderID: folderID,
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/folder/rm.cgi",
|
||||
}
|
||||
|
||||
response = &GenericOKResponse{}
|
||||
var resp *http.Response
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err = f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't remove folder")
|
||||
}
|
||||
if response.Status != "OK" {
|
||||
return nil, errors.New("Can't remove non-empty dir")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Removed Folder with id `%s`", directoryID)
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) deleteFile(ctx context.Context, url string) (response *GenericOKResponse, err error) {
|
||||
request := &RemoveFileRequest{
|
||||
Files: []RmFile{
|
||||
{url},
|
||||
},
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/file/rm.cgi",
|
||||
}
|
||||
|
||||
response = &GenericOKResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, request, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't remove file")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Removed file with url `%s`", url)
|
||||
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func (f *Fs) getUploadNode(ctx context.Context) (response *GetUploadNodeResponse, err error) {
|
||||
// fs.Debugf(f, "Requesting Upload node")
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
ContentType: "application/json", // 1Fichier API is bad
|
||||
Path: "/upload/get_upload_server.cgi",
|
||||
}
|
||||
|
||||
response = &GetUploadNodeResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "didnt got an upload node")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Got Upload node")
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) uploadFile(ctx context.Context, in io.Reader, size int64, fileName, folderID, uploadID, node string) (response *http.Response, err error) {
|
||||
// fs.Debugf(f, "Uploading File `%s`", fileName)
|
||||
|
||||
fileName = f.opt.Enc.FromStandardName(fileName)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "POST",
|
||||
Path: "/upload.cgi",
|
||||
Parameters: map[string][]string{
|
||||
"id": {uploadID},
|
||||
},
|
||||
NoResponse: true,
|
||||
Body: in,
|
||||
ContentLength: &size,
|
||||
MultipartContentName: "file[]",
|
||||
MultipartFileName: fileName,
|
||||
MultipartParams: map[string][]string{
|
||||
"did": {folderID},
|
||||
},
|
||||
}
|
||||
|
||||
if node != "" {
|
||||
opts.RootURL = "https://" + node
|
||||
}
|
||||
|
||||
err = f.pacer.CallNoRetry(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, nil)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't upload file")
|
||||
}
|
||||
|
||||
// fs.Debugf(f, "Uploaded File `%s`", fileName)
|
||||
|
||||
return response, err
|
||||
}
|
||||
|
||||
func (f *Fs) endUpload(ctx context.Context, uploadID string, nodeurl string) (response *EndFileUploadResponse, err error) {
|
||||
// fs.Debugf(f, "Ending File Upload `%s`", uploadID)
|
||||
|
||||
if len(uploadID) > 10 || !isAlphaNumeric(uploadID) {
|
||||
return nil, errors.New("Invalid UploadID")
|
||||
}
|
||||
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: "/end.pl",
|
||||
RootURL: "https://" + nodeurl,
|
||||
Parameters: map[string][]string{
|
||||
"xid": {uploadID},
|
||||
},
|
||||
ExtraHeaders: map[string]string{
|
||||
"JSON": "1",
|
||||
},
|
||||
}
|
||||
|
||||
response = &EndFileUploadResponse{}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
resp, err := f.rest.CallJSON(ctx, &opts, nil, response)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "couldn't finish file upload")
|
||||
}
|
||||
|
||||
return response, err
|
||||
}
|
||||
435
backend/fichier/fichier.go
Normal file
435
backend/fichier/fichier.go
Normal file
@@ -0,0 +1,435 @@
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/dircache"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
rootID = "0"
|
||||
apiBaseURL = "https://api.1fichier.com/v1"
|
||||
minSleep = 334 * time.Millisecond // 3 API calls per second is recommended
|
||||
maxSleep = 5 * time.Second
|
||||
decayConstant = 2 // bigger for slower decay, exponential
|
||||
)
|
||||
|
||||
func init() {
|
||||
fs.Register(&fs.RegInfo{
|
||||
Name: "fichier",
|
||||
Description: "1Fichier",
|
||||
Config: func(name string, config configmap.Mapper) {
|
||||
},
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{{
|
||||
Help: "Your API Key, get it from https://1fichier.com/console/params.pl",
|
||||
Name: "api_key",
|
||||
}, {
|
||||
Help: "If you want to download a shared folder, add this parameter",
|
||||
Name: "shared_folder",
|
||||
Required: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// Characters that need escaping
|
||||
//
|
||||
// '\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
// '<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
// '>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
// '"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
// '\'': ''', // FULLWIDTH APOSTROPHE
|
||||
// '$': '$', // FULLWIDTH DOLLAR SIGN
|
||||
// '`': '`', // FULLWIDTH GRAVE ACCENT
|
||||
//
|
||||
// Leading space and trailing space
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeSingleQuote |
|
||||
encoder.EncodeBackQuote |
|
||||
encoder.EncodeDoubleQuote |
|
||||
encoder.EncodeLtGt |
|
||||
encoder.EncodeDollar |
|
||||
encoder.EncodeLeftSpace |
|
||||
encoder.EncodeRightSpace |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
APIKey string `config:"api_key"`
|
||||
SharedFolder string `config:"shared_folder"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs is the interface a cloud storage system must provide
|
||||
type Fs struct {
|
||||
root string
|
||||
name string
|
||||
features *fs.Features
|
||||
opt Options
|
||||
dirCache *dircache.DirCache
|
||||
baseClient *http.Client
|
||||
pacer *fs.Pacer
|
||||
rest *rest.Client
|
||||
}
|
||||
|
||||
// FindLeaf finds a directory of name leaf in the folder with ID pathID
|
||||
func (f *Fs) FindLeaf(ctx context.Context, pathID, leaf string) (pathIDOut string, found bool, err error) {
|
||||
folderID, err := strconv.Atoi(pathID)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
folders, err := f.listFolders(ctx, folderID)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
|
||||
for _, folder := range folders.SubFolders {
|
||||
if folder.Name == leaf {
|
||||
pathIDOut := strconv.Itoa(folder.ID)
|
||||
return pathIDOut, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
// CreateDir makes a directory with pathID as parent and name leaf
|
||||
func (f *Fs) CreateDir(ctx context.Context, pathID, leaf string) (newID string, err error) {
|
||||
folderID, err := strconv.Atoi(pathID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
resp, err := f.makeFolder(ctx, leaf, folderID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strconv.Itoa(resp.FolderID), err
|
||||
}
|
||||
|
||||
// Name of the remote (as passed into NewFs)
|
||||
func (f *Fs) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String returns a description of the FS
|
||||
func (f *Fs) String() string {
|
||||
return fmt.Sprintf("1Fichier root '%s'", f.root)
|
||||
}
|
||||
|
||||
// Precision of the ModTimes in this Fs
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
|
||||
// Hashes returns the supported hash types of the filesystem
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
return hash.Set(hash.Whirlpool)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
func (f *Fs) Features() *fs.Features {
|
||||
return f.features
|
||||
}
|
||||
|
||||
// NewFs makes a new Fs object from the path
|
||||
//
|
||||
// The path is of the form remote:path
|
||||
//
|
||||
// Remotes are looked up in the config file. If the remote isn't
|
||||
// found then NotFoundInConfigFile will be returned.
|
||||
//
|
||||
// On Windows avoid single character remote names as they can be mixed
|
||||
// up with drive letters.
|
||||
func NewFs(name string, root string, config configmap.Mapper) (fs.Fs, error) {
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(config, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If using a Shared Folder override root
|
||||
if opt.SharedFolder != "" {
|
||||
root = ""
|
||||
}
|
||||
|
||||
//workaround for wonky parser
|
||||
root = strings.Trim(root, "/")
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
baseClient: &http.Client{},
|
||||
}
|
||||
|
||||
f.features = (&fs.Features{
|
||||
DuplicateFiles: true,
|
||||
CanHaveEmptyDirectories: true,
|
||||
}).Fill(f)
|
||||
|
||||
client := fshttp.NewClient(fs.Config)
|
||||
|
||||
f.rest = rest.NewClient(client).SetRoot(apiBaseURL)
|
||||
|
||||
f.rest.SetHeader("Authorization", "Bearer "+f.opt.APIKey)
|
||||
|
||||
f.dirCache = dircache.New(root, rootID, f)
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Find the current root
|
||||
err = f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// Assume it is a file
|
||||
newRoot, remote := dircache.SplitPath(root)
|
||||
tempF := *f
|
||||
tempF.dirCache = dircache.New(newRoot, rootID, &tempF)
|
||||
tempF.root = newRoot
|
||||
// Make new Fs which is the parent
|
||||
err = tempF.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
// No root so return old f
|
||||
return f, nil
|
||||
}
|
||||
_, err := tempF.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound {
|
||||
// File doesn't exist so return old f
|
||||
return f, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
f.features.Fill(&tempF)
|
||||
// XXX: update the old f here instead of returning tempF, since
|
||||
// `features` were already filled with functions having *f as a receiver.
|
||||
// See https://github.com/rclone/rclone/issues/2182
|
||||
f.dirCache = tempF.dirCache
|
||||
f.root = tempF.root
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// List the objects and directories in dir into entries. The
|
||||
// entries can be returned in any order but should be for a
|
||||
// complete directory.
|
||||
//
|
||||
// dir should be "" to list the root, and should not have
|
||||
// trailing slashes.
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if f.opt.SharedFolder != "" {
|
||||
return f.listSharedFiles(ctx, f.opt.SharedFolder)
|
||||
}
|
||||
|
||||
dirContent, err := f.listDir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dirContent, nil
|
||||
}
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, false)
|
||||
if err != nil {
|
||||
if err == fs.ErrorDirNotFound {
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
files, err := f.listFiles(ctx, folderID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, file := range files.Items {
|
||||
if file.Filename == leaf {
|
||||
path, ok := f.dirCache.GetInv(directoryID)
|
||||
|
||||
if !ok {
|
||||
return nil, errors.New("Cannot find dir in dircache")
|
||||
}
|
||||
|
||||
return f.newObjectFromFile(ctx, path, file), nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
|
||||
// Put in to the remote path with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Put should either
|
||||
// return an error or upload it properly (rather than e.g. calling panic).
|
||||
//
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
exisitingObj, err := f.NewObject(ctx, src.Remote())
|
||||
switch err {
|
||||
case nil:
|
||||
return exisitingObj, exisitingObj.Update(ctx, in, src, options...)
|
||||
case fs.ErrorObjectNotFound:
|
||||
// Not found so create it
|
||||
return f.PutUnchecked(ctx, in, src, options...)
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// putUnchecked uploads the object with the given name and size
|
||||
//
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) putUnchecked(ctx context.Context, in io.Reader, remote string, size int64, options ...fs.OpenOption) (fs.Object, error) {
|
||||
if size > int64(100e9) {
|
||||
return nil, errors.New("File too big, cant upload")
|
||||
} else if size == 0 {
|
||||
return nil, fs.ErrorCantUploadEmptyFiles
|
||||
}
|
||||
|
||||
nodeResponse, err := f.getUploadNode(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
leaf, directoryID, err := f.dirCache.FindRootAndPath(ctx, remote, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = f.uploadFile(ctx, in, size, leaf, directoryID, nodeResponse.ID, nodeResponse.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fileUploadResponse, err := f.endUpload(ctx, nodeResponse.ID, nodeResponse.URL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(fileUploadResponse.Links) != 1 {
|
||||
return nil, errors.New("unexpected amount of files")
|
||||
}
|
||||
|
||||
link := fileUploadResponse.Links[0]
|
||||
fileSize, err := strconv.ParseInt(link.Size, 10, 64)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
file: File{
|
||||
ACL: 0,
|
||||
CDN: 0,
|
||||
Checksum: link.Whirlpool,
|
||||
ContentType: "",
|
||||
Date: time.Now().Format("2006-01-02 15:04:05"),
|
||||
Filename: link.Filename,
|
||||
Pass: 0,
|
||||
Size: fileSize,
|
||||
URL: link.Download,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PutUnchecked uploads the object
|
||||
//
|
||||
// This will create a duplicate if we upload a new file without
|
||||
// checking to see if there is one already - use Put() for that.
|
||||
func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.putUnchecked(ctx, in, src.Remote(), src.Size(), options...)
|
||||
}
|
||||
|
||||
// Mkdir makes the directory (container, bucket)
|
||||
//
|
||||
// Shouldn't return an error if it already exists
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if dir != "" {
|
||||
_, err = f.dirCache.FindDir(ctx, dir, true)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
err := f.dirCache.FindRoot(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
directoryID, err := f.dirCache.FindDir(ctx, dir, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
folderID, err := strconv.Atoi(directoryID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.removeFolder(ctx, dir, folderID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f.dirCache.FlushDir(dir)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.PutUncheckeder = (*Fs)(nil)
|
||||
_ dircache.DirCacher = (*Fs)(nil)
|
||||
)
|
||||
17
backend/fichier/fichier_test.go
Normal file
17
backend/fichier/fichier_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
// Test 1Fichier filesystem interface
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fs.Config.LogLevel = fs.LogLevelDebug
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFichier:",
|
||||
})
|
||||
}
|
||||
158
backend/fichier/object.go
Normal file
158
backend/fichier/object.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package fichier
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
)
|
||||
|
||||
// Object is a filesystem like object provided by an Fs
|
||||
type Object struct {
|
||||
fs *Fs
|
||||
remote string
|
||||
file File
|
||||
}
|
||||
|
||||
// String returns a description of the Object
|
||||
func (o *Object) String() string {
|
||||
return o.file.Filename
|
||||
}
|
||||
|
||||
// Remote returns the remote path
|
||||
func (o *Object) Remote() string {
|
||||
return o.remote
|
||||
}
|
||||
|
||||
// ModTime returns the modification date of the file
|
||||
// It should return a best guess if one isn't available
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
modTime, err := time.Parse("2006-01-02 15:04:05", o.file.Date)
|
||||
|
||||
if err != nil {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
return modTime
|
||||
}
|
||||
|
||||
// Size returns the size of the file
|
||||
func (o *Object) Size() int64 {
|
||||
return o.file.Size
|
||||
}
|
||||
|
||||
// Fs returns read only access to the Fs that this object is part of
|
||||
func (o *Object) Fs() fs.Info {
|
||||
return o.fs
|
||||
}
|
||||
|
||||
// Hash returns the selected checksum of the file
|
||||
// If no checksum is available it returns ""
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.Whirlpool {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
return o.file.Checksum, nil
|
||||
}
|
||||
|
||||
// Storable says whether this object can be stored
|
||||
func (o *Object) Storable() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// SetModTime sets the metadata on the object to set the modification date
|
||||
func (o *Object) SetModTime(context.Context, time.Time) error {
|
||||
return fs.ErrorCantSetModTime
|
||||
//return errors.New("setting modtime is not supported for 1fichier remotes")
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
fs.FixRangeOption(options, o.file.Size)
|
||||
downloadToken, err := o.fs.getDownloadToken(ctx, o.file.URL)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
RootURL: downloadToken.URL,
|
||||
Options: options,
|
||||
}
|
||||
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
resp, err = o.fs.rest.Call(ctx, &opts)
|
||||
return shouldRetry(resp, err)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.Body, err
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
//
|
||||
// When called from outside a Fs by rclone, src.Size() will always be >= 0.
|
||||
// But for unknown-sized objects (indicated by src.Size() == -1), Upload should either
|
||||
// return an error or update the object properly (rather than e.g. calling panic).
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
if src.Size() < 0 {
|
||||
return errors.New("refusing to update with unknown size")
|
||||
}
|
||||
|
||||
// upload with new size but old name
|
||||
info, err := o.fs.putUnchecked(ctx, in, o.Remote(), src.Size(), options...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete duplicate after successful upload
|
||||
err = o.Remove(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to remove old version")
|
||||
}
|
||||
|
||||
// Replace guts of old object with new one
|
||||
*o = *info.(*Object)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove removes this object
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
// fs.Debugf(f, "Removing file `%s` with url `%s`", o.file.Filename, o.file.URL)
|
||||
|
||||
_, err := o.fs.deleteFile(ctx, o.file.URL)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.file.ContentType
|
||||
}
|
||||
|
||||
// ID returns the ID of the Object if known, or "" if not
|
||||
func (o *Object) ID() string {
|
||||
return o.file.URL
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Object = (*Object)(nil)
|
||||
_ fs.MimeTyper = (*Object)(nil)
|
||||
_ fs.IDer = (*Object)(nil)
|
||||
)
|
||||
120
backend/fichier/structs.go
Normal file
120
backend/fichier/structs.go
Normal file
@@ -0,0 +1,120 @@
|
||||
package fichier
|
||||
|
||||
// ListFolderRequest is the request structure of the corresponding request
|
||||
type ListFolderRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// ListFilesRequest is the request structure of the corresponding request
|
||||
type ListFilesRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// DownloadRequest is the request structure of the corresponding request
|
||||
type DownloadRequest struct {
|
||||
URL string `json:"url"`
|
||||
Single int `json:"single"`
|
||||
}
|
||||
|
||||
// RemoveFolderRequest is the request structure of the corresponding request
|
||||
type RemoveFolderRequest struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// RemoveFileRequest is the request structure of the corresponding request
|
||||
type RemoveFileRequest struct {
|
||||
Files []RmFile `json:"files"`
|
||||
}
|
||||
|
||||
// RmFile is the request structure of the corresponding request
|
||||
type RmFile struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// GenericOKResponse is the response structure of the corresponding request
|
||||
type GenericOKResponse struct {
|
||||
Status string `json:"status"`
|
||||
Message string `json:"message"`
|
||||
}
|
||||
|
||||
// MakeFolderRequest is the request structure of the corresponding request
|
||||
type MakeFolderRequest struct {
|
||||
Name string `json:"name"`
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// MakeFolderResponse is the response structure of the corresponding request
|
||||
type MakeFolderResponse struct {
|
||||
Name string `json:"name"`
|
||||
FolderID int `json:"folder_id"`
|
||||
}
|
||||
|
||||
// GetUploadNodeResponse is the response structure of the corresponding request
|
||||
type GetUploadNodeResponse struct {
|
||||
ID string `json:"id"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// GetTokenResponse is the response structure of the corresponding request
|
||||
type GetTokenResponse struct {
|
||||
URL string `json:"url"`
|
||||
Status string `json:"Status"`
|
||||
Message string `json:"Message"`
|
||||
}
|
||||
|
||||
// SharedFolderResponse is the response structure of the corresponding request
|
||||
type SharedFolderResponse []SharedFile
|
||||
|
||||
// SharedFile is the structure how 1Fichier returns a shared File
|
||||
type SharedFile struct {
|
||||
Filename string `json:"filename"`
|
||||
Link string `json:"link"`
|
||||
Size int64 `json:"size"`
|
||||
}
|
||||
|
||||
// EndFileUploadResponse is the response structure of the corresponding request
|
||||
type EndFileUploadResponse struct {
|
||||
Incoming int `json:"incoming"`
|
||||
Links []struct {
|
||||
Download string `json:"download"`
|
||||
Filename string `json:"filename"`
|
||||
Remove string `json:"remove"`
|
||||
Size string `json:"size"`
|
||||
Whirlpool string `json:"whirlpool"`
|
||||
} `json:"links"`
|
||||
}
|
||||
|
||||
// File is the structure how 1Fichier returns a File
|
||||
type File struct {
|
||||
ACL int `json:"acl"`
|
||||
CDN int `json:"cdn"`
|
||||
Checksum string `json:"checksum"`
|
||||
ContentType string `json:"content-type"`
|
||||
Date string `json:"date"`
|
||||
Filename string `json:"filename"`
|
||||
Pass int `json:"pass"`
|
||||
Size int64 `json:"size"`
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// FilesList is the structure how 1Fichier returns a list of files
|
||||
type FilesList struct {
|
||||
Items []File `json:"items"`
|
||||
Status string `json:"Status"`
|
||||
}
|
||||
|
||||
// Folder is the structure how 1Fichier returns a Folder
|
||||
type Folder struct {
|
||||
CreateDate string `json:"create_date"`
|
||||
ID int `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Pass int `json:"pass"`
|
||||
}
|
||||
|
||||
// FoldersList is the structure how 1Fichier returns a list of Folders
|
||||
type FoldersList struct {
|
||||
FolderID int `json:"folder_id"`
|
||||
Name string `json:"name"`
|
||||
Status string `json:"Status"`
|
||||
SubFolders []Folder `json:"sub_folders"`
|
||||
}
|
||||
@@ -2,6 +2,8 @@
|
||||
package ftp
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"net/textproto"
|
||||
"os"
|
||||
@@ -10,14 +12,16 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -26,43 +30,70 @@ func init() {
|
||||
Name: "ftp",
|
||||
Description: "FTP Connection",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ftp.example.com",
|
||||
Help: "Connect to ftp.example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
},
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "host",
|
||||
Help: "FTP host to connect to",
|
||||
Required: true,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "ftp.example.com",
|
||||
Help: "Connect to ftp.example.com",
|
||||
}},
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "FTP username, leave blank for current username, " + os.Getenv("USER"),
|
||||
}, {
|
||||
Name: "port",
|
||||
Help: "FTP port, leave blank to use default (21)",
|
||||
}, {
|
||||
Name: "pass",
|
||||
Help: "FTP password",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "tls",
|
||||
Help: "Use FTP over TLS (Implicit)",
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "concurrency",
|
||||
Help: "Maximum number of FTP simultaneous connections, 0 for unlimited",
|
||||
Default: 0,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_check_certificate",
|
||||
Help: "Do not verify the TLS certificate of the server",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "disable_epsv",
|
||||
Help: "Disable using EPSV even if server advertises support",
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// The FTP protocal can't handle trailing spaces (for instance
|
||||
// pureftpd turns them into _)
|
||||
//
|
||||
// proftpd can't handle '*' in file names
|
||||
// pureftpd can't handle '[', ']' or '*'
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeRightSpace),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
Host string `config:"host"`
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Port string `config:"port"`
|
||||
TLS bool `config:"tls"`
|
||||
Concurrency int `config:"concurrency"`
|
||||
SkipVerifyTLSCert bool `config:"no_check_certificate"`
|
||||
DisableEPSV bool `config:"disable_epsv"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote FTP server
|
||||
@@ -120,7 +151,18 @@ func (f *Fs) Features() *fs.Features {
|
||||
// Open a new connection to the FTP server.
|
||||
func (f *Fs) ftpConnection() (*ftp.ServerConn, error) {
|
||||
fs.Debugf(f, "Connecting to FTP server")
|
||||
c, err := ftp.DialTimeout(f.dialAddr, fs.Config.ConnectTimeout)
|
||||
ftpConfig := []ftp.DialOption{ftp.DialWithTimeout(fs.Config.ConnectTimeout)}
|
||||
if f.opt.TLS {
|
||||
tlsConfig := &tls.Config{
|
||||
ServerName: f.opt.Host,
|
||||
InsecureSkipVerify: f.opt.SkipVerifyTLSCert,
|
||||
}
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithTLS(tlsConfig))
|
||||
}
|
||||
if f.opt.DisableEPSV {
|
||||
ftpConfig = append(ftpConfig, ftp.DialWithDisabledEPSV(true))
|
||||
}
|
||||
c, err := ftp.Dial(f.dialAddr, ftpConfig...)
|
||||
if err != nil {
|
||||
fs.Errorf(f, "Error while Dialing %s: %s", f.dialAddr, err)
|
||||
return nil, errors.Wrap(err, "ftpConnection Dial")
|
||||
@@ -148,7 +190,11 @@ func (f *Fs) getFtpConnection() (c *ftp.ServerConn, err error) {
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
return f.ftpConnection()
|
||||
c, err = f.ftpConnection()
|
||||
if err != nil && f.opt.Concurrency > 0 {
|
||||
f.tokens.Put()
|
||||
}
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Return an FTP connection to the pool
|
||||
@@ -161,7 +207,13 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
if f.opt.Concurrency > 0 {
|
||||
defer f.tokens.Put()
|
||||
}
|
||||
if pc == nil {
|
||||
return
|
||||
}
|
||||
c := *pc
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
*pc = nil
|
||||
if err != nil {
|
||||
// If not a regular FTP error code then check the connection
|
||||
@@ -182,6 +234,7 @@ func (f *Fs) putFtpConnection(pc **ftp.ServerConn, err error) {
|
||||
|
||||
// NewFs constructs an Fs from the path, container:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
ctx := context.Background()
|
||||
// defer fs.Trace(nil, "name=%q, root=%q", name, root)("fs=%v, err=%v", &ff, &err)
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
@@ -203,7 +256,11 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
}
|
||||
|
||||
dialAddr := opt.Host + ":" + port
|
||||
u := "ftp://" + path.Join(dialAddr+"/", root)
|
||||
protocol := "ftp://"
|
||||
if opt.TLS {
|
||||
protocol = "ftps://"
|
||||
}
|
||||
u := protocol + path.Join(dialAddr+"/", root)
|
||||
f := &Fs{
|
||||
name: name,
|
||||
root: root,
|
||||
@@ -230,7 +287,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
}
|
||||
_, err := f.NewObject(remote)
|
||||
_, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
if err == fs.ErrorObjectNotFound || errors.Cause(err) == fs.ErrorNotAFile {
|
||||
// File doesn't exist so return old f
|
||||
@@ -269,10 +326,37 @@ func translateErrorDir(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// entryToStandard converts an incoming ftp.Entry to Standard encoding
|
||||
func (f *Fs) entryToStandard(entry *ftp.Entry) {
|
||||
// Skip . and .. as we don't want these encoded
|
||||
if entry.Name == "." || entry.Name == ".." {
|
||||
return
|
||||
}
|
||||
entry.Name = f.opt.Enc.ToStandardName(entry.Name)
|
||||
entry.Target = f.opt.Enc.ToStandardPath(entry.Target)
|
||||
}
|
||||
|
||||
// dirFromStandardPath returns dir in encoded form.
|
||||
func (f *Fs) dirFromStandardPath(dir string) string {
|
||||
// Skip . and .. as we don't want these encoded
|
||||
if dir == "." || dir == ".." {
|
||||
return dir
|
||||
}
|
||||
return f.opt.Enc.FromStandardPath(dir)
|
||||
}
|
||||
|
||||
// findItem finds a directory entry for the name in its parent directory
|
||||
func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
fullPath := path.Join(f.root, remote)
|
||||
if fullPath == "" || fullPath == "." || fullPath == "/" {
|
||||
// if root, assume exists and synthesize an entry
|
||||
return &ftp.Entry{
|
||||
Name: "",
|
||||
Type: ftp.EntryTypeFolder,
|
||||
Time: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
dir := path.Dir(fullPath)
|
||||
base := path.Base(fullPath)
|
||||
|
||||
@@ -280,12 +364,13 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "findItem")
|
||||
}
|
||||
files, err := c.List(dir)
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
for _, file := range files {
|
||||
f.entryToStandard(file)
|
||||
if file.Name == base {
|
||||
return file, nil
|
||||
}
|
||||
@@ -295,7 +380,7 @@ func (f *Fs) findItem(remote string) (entry *ftp.Entry, err error) {
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (o fs.Object, err error) {
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (o fs.Object, err error) {
|
||||
// defer fs.Trace(remote, "")("o=%v, err=%v", &o, &err)
|
||||
entry, err := f.findItem(remote)
|
||||
if err != nil {
|
||||
@@ -339,17 +424,42 @@ func (f *Fs) dirExists(remote string) (exists bool, err error) {
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// defer fs.Trace(dir, "curlevel=%d", curlevel)("")
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
// defer log.Trace(dir, "dir=%q", dir)("entries=%v, err=%v", &entries, &err)
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "list")
|
||||
}
|
||||
files, err := c.List(path.Join(f.root, dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorDir(err)
|
||||
|
||||
var listErr error
|
||||
var files []*ftp.Entry
|
||||
|
||||
resultchan := make(chan []*ftp.Entry, 1)
|
||||
errchan := make(chan error, 1)
|
||||
go func() {
|
||||
result, err := c.List(f.dirFromStandardPath(path.Join(f.root, dir)))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
errchan <- err
|
||||
return
|
||||
}
|
||||
resultchan <- result
|
||||
}()
|
||||
|
||||
// Wait for List for up to Timeout seconds
|
||||
timer := time.NewTimer(fs.Config.Timeout)
|
||||
select {
|
||||
case listErr = <-errchan:
|
||||
timer.Stop()
|
||||
return nil, translateErrorDir(listErr)
|
||||
case files = <-resultchan:
|
||||
timer.Stop()
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f, "Timeout when waiting for List")
|
||||
return nil, errors.New("Timeout when waiting for List")
|
||||
}
|
||||
|
||||
// Annoyingly FTP returns success for a directory which
|
||||
// doesn't exist, so check it really doesn't exist if no
|
||||
// entries found.
|
||||
@@ -364,6 +474,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
for i := range files {
|
||||
object := files[i]
|
||||
f.entryToStandard(object)
|
||||
newremote := path.Join(dir, object.Name)
|
||||
switch object.Type {
|
||||
case ftp.EntryTypeFolder:
|
||||
@@ -404,7 +515,7 @@ func (f *Fs) Precision() time.Duration {
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// fs.Debugf(f, "Trying to put file %s", src.Remote())
|
||||
err := f.mkParentDir(src.Remote())
|
||||
if err != nil {
|
||||
@@ -414,13 +525,13 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
err = o.Update(in, src, options...)
|
||||
err = o.Update(ctx, in, src, options...)
|
||||
return o, err
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// getInfo reads the FileInfo for a path
|
||||
@@ -433,19 +544,21 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getInfo")
|
||||
}
|
||||
files, err := c.List(dir)
|
||||
files, err := c.List(f.dirFromStandardPath(dir))
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, translateErrorFile(err)
|
||||
}
|
||||
|
||||
for i := range files {
|
||||
if files[i].Name == base {
|
||||
file := files[i]
|
||||
f.entryToStandard(file)
|
||||
if file.Name == base {
|
||||
info := &FileInfo{
|
||||
Name: remote,
|
||||
Size: files[i].Size,
|
||||
ModTime: files[i].Time,
|
||||
IsDir: files[i].Type == ftp.EntryTypeFolder,
|
||||
Size: file.Size,
|
||||
ModTime: file.Time,
|
||||
IsDir: file.Type == ftp.EntryTypeFolder,
|
||||
}
|
||||
return info, nil
|
||||
}
|
||||
@@ -455,6 +568,7 @@ func (f *Fs) getInfo(remote string) (fi *FileInfo, err error) {
|
||||
|
||||
// mkdir makes the directory and parents using unrooted paths
|
||||
func (f *Fs) mkdir(abspath string) error {
|
||||
abspath = path.Clean(abspath)
|
||||
if abspath == "." || abspath == "/" {
|
||||
return nil
|
||||
}
|
||||
@@ -476,7 +590,7 @@ func (f *Fs) mkdir(abspath string) error {
|
||||
if connErr != nil {
|
||||
return errors.Wrap(connErr, "mkdir")
|
||||
}
|
||||
err = c.MakeDir(abspath)
|
||||
err = c.MakeDir(f.dirFromStandardPath(abspath))
|
||||
f.putFtpConnection(&c, err)
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
@@ -498,7 +612,7 @@ func (f *Fs) mkParentDir(remote string) error {
|
||||
}
|
||||
|
||||
// Mkdir creates the directory if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) (err error) {
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
// defer fs.Trace(dir, "")("err=%v", &err)
|
||||
root := path.Join(f.root, dir)
|
||||
return f.mkdir(root)
|
||||
@@ -507,18 +621,18 @@ func (f *Fs) Mkdir(dir string) (err error) {
|
||||
// Rmdir removes the directory (container, bucket) if empty
|
||||
//
|
||||
// Return an error if it doesn't exist or isn't empty
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
c, err := f.getFtpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(translateErrorFile(err), "Rmdir")
|
||||
}
|
||||
err = c.RemoveDir(path.Join(f.root, dir))
|
||||
err = c.RemoveDir(f.dirFromStandardPath(path.Join(f.root, dir)))
|
||||
f.putFtpConnection(&c, err)
|
||||
return translateErrorDir(err)
|
||||
}
|
||||
|
||||
// Move renames a remote file object
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
@@ -533,14 +647,14 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
return nil, errors.Wrap(err, "Move")
|
||||
}
|
||||
err = c.Rename(
|
||||
path.Join(srcObj.fs.root, srcObj.remote),
|
||||
path.Join(f.root, remote),
|
||||
f.opt.Enc.FromStandardPath(path.Join(srcObj.fs.root, srcObj.remote)),
|
||||
f.opt.Enc.FromStandardPath(path.Join(f.root, remote)),
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move Rename failed")
|
||||
}
|
||||
dstObj, err := f.NewObject(remote)
|
||||
dstObj, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Move NewObject failed")
|
||||
}
|
||||
@@ -555,7 +669,7 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
fs.Debugf(srcFs, "Can't move directory - not same remote type")
|
||||
@@ -587,8 +701,8 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
return errors.Wrap(err, "DirMove")
|
||||
}
|
||||
err = c.Rename(
|
||||
srcPath,
|
||||
dstPath,
|
||||
f.dirFromStandardPath(srcPath),
|
||||
f.dirFromStandardPath(dstPath),
|
||||
)
|
||||
f.putFtpConnection(&c, err)
|
||||
if err != nil {
|
||||
@@ -618,7 +732,7 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the hash of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
@@ -628,12 +742,12 @@ func (o *Object) Size() int64 {
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the object
|
||||
func (o *Object) ModTime() time.Time {
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.info.ModTime
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the object
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -674,11 +788,13 @@ func (f *ftpReadCloser) Close() error {
|
||||
case <-timer.C:
|
||||
// if timer fired assume no error but connection dead
|
||||
fs.Errorf(f.f, "Timeout when waiting for connection Close")
|
||||
f.f.putFtpConnection(nil, nil)
|
||||
return nil
|
||||
}
|
||||
// if errors while reading or closing, dump the connection
|
||||
if err != nil || f.err != nil {
|
||||
_ = f.c.Quit()
|
||||
f.f.putFtpConnection(nil, nil)
|
||||
} else {
|
||||
f.f.putFtpConnection(&f.c, nil)
|
||||
}
|
||||
@@ -694,7 +810,7 @@ func (f *ftpReadCloser) Close() error {
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
// defer fs.Trace(o, "")("rc=%v, err=%v", &rc, &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
var offset, limit int64 = 0, -1
|
||||
@@ -714,7 +830,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "open")
|
||||
}
|
||||
fd, err := c.RetrFrom(path, uint64(offset))
|
||||
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, errors.Wrap(err, "open")
|
||||
@@ -728,7 +844,7 @@ func (o *Object) Open(options ...fs.OpenOption) (rc io.ReadCloser, err error) {
|
||||
// Copy the reader into the object updating modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
// defer fs.Trace(o, "src=%v", src)("err=%v", &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// remove the file if upload failed
|
||||
@@ -738,7 +854,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
// may still be dealing with it for a moment. A sleep isn't ideal but I haven't been
|
||||
// able to think of a better method to find out if the server has finished - ncw
|
||||
time.Sleep(1 * time.Second)
|
||||
removeErr := o.Remove()
|
||||
removeErr := o.Remove(ctx)
|
||||
if removeErr != nil {
|
||||
fs.Debugf(o, "Failed to remove: %v", removeErr)
|
||||
} else {
|
||||
@@ -749,10 +865,11 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Update")
|
||||
}
|
||||
err = c.Stor(path, in)
|
||||
err = c.Stor(o.fs.opt.Enc.FromStandardPath(path), in)
|
||||
if err != nil {
|
||||
_ = c.Quit() // toss this connection to avoid sync errors
|
||||
remove()
|
||||
o.fs.putFtpConnection(nil, err)
|
||||
return errors.Wrap(err, "update stor")
|
||||
}
|
||||
o.fs.putFtpConnection(&c, nil)
|
||||
@@ -764,7 +881,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() (err error) {
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
// defer fs.Trace(o, "")("err=%v", &err)
|
||||
path := path.Join(o.fs.root, o.remote)
|
||||
// Check if it's a directory or a file
|
||||
@@ -773,13 +890,13 @@ func (o *Object) Remove() (err error) {
|
||||
return err
|
||||
}
|
||||
if info.IsDir {
|
||||
err = o.fs.Rmdir(o.remote)
|
||||
err = o.fs.Rmdir(ctx, o.remote)
|
||||
} else {
|
||||
c, err := o.fs.getFtpConnection()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Remove")
|
||||
}
|
||||
err = c.Delete(path)
|
||||
err = c.Delete(o.fs.opt.Enc.FromStandardPath(path))
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return err
|
||||
|
||||
@@ -4,14 +4,45 @@ package ftp_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/ftp"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/ftp"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTP:",
|
||||
RemoteName: "TestFTPProftpd:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration2(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTPRclone:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
func TestIntegration3(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("skipping as -remote is set")
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestFTPPureftpd:",
|
||||
NilObject: (*ftp.Object)(nil),
|
||||
})
|
||||
}
|
||||
|
||||
// func TestIntegration4(t *testing.T) {
|
||||
// if *fstest.RemoteName != "" {
|
||||
// t.Skip("skipping as -remote is set")
|
||||
// }
|
||||
// fstests.Run(t, &fstests.Opt{
|
||||
// RemoteName: "TestFTPVsftpd:",
|
||||
// NilObject: (*ftp.Object)(nil),
|
||||
// })
|
||||
// }
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
// Package googlecloudstorage provides an interface to Google Cloud Storage
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
|
||||
/*
|
||||
@@ -26,23 +23,23 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fserrors"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/fs/walk"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
"github.com/ncw/rclone/lib/pacer"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/bucket"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/googleapi"
|
||||
@@ -64,7 +61,7 @@ const (
|
||||
var (
|
||||
// Description of how to auth for this app
|
||||
storageConfig = &oauth2.Config{
|
||||
Scopes: []string{storage.DevstorageFullControlScope},
|
||||
Scopes: []string{storage.DevstorageReadWriteScope},
|
||||
Endpoint: google.Endpoint,
|
||||
ClientID: rcloneClientID,
|
||||
ClientSecret: obscure.MustReveal(rcloneEncryptedClientSecret),
|
||||
@@ -249,34 +246,42 @@ Docs: https://cloud.google.com/storage/docs/bucket-policy-only
|
||||
Value: "DURABLE_REDUCED_AVAILABILITY",
|
||||
Help: "Durable reduced availability storage class",
|
||||
}},
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
ProjectNumber string `config:"project_number"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
ObjectACL string `config:"object_acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
ProjectNumber string `config:"project_number"`
|
||||
ServiceAccountFile string `config:"service_account_file"`
|
||||
ServiceAccountCredentials string `config:"service_account_credentials"`
|
||||
ObjectACL string `config:"object_acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
BucketPolicyOnly bool `config:"bucket_policy_only"`
|
||||
Location string `config:"location"`
|
||||
StorageClass string `config:"storage_class"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
type Fs struct {
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
bucket string // the bucket we are working on
|
||||
bucketOKMu sync.Mutex // mutex to protect bucket OK
|
||||
bucketOK bool // true if we have created the bucket
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
name string // name of this remote
|
||||
root string // the path we are working on if any
|
||||
opt Options // parsed options
|
||||
features *fs.Features // optional features
|
||||
svc *storage.Service // the connection to the storage server
|
||||
client *http.Client // authorized client
|
||||
rootBucket string // bucket part of root (if any)
|
||||
rootDirectory string // directory part of root (if any)
|
||||
cache *bucket.Cache // cache of bucket status
|
||||
pacer *fs.Pacer // To pace the API calls
|
||||
}
|
||||
|
||||
// Object describes a storage object
|
||||
@@ -301,18 +306,18 @@ func (f *Fs) Name() string {
|
||||
|
||||
// Root of the remote (as passed into NewFs)
|
||||
func (f *Fs) Root() string {
|
||||
if f.root == "" {
|
||||
return f.bucket
|
||||
}
|
||||
return f.bucket + "/" + f.root
|
||||
return f.root
|
||||
}
|
||||
|
||||
// String converts this Fs to a string
|
||||
func (f *Fs) String() string {
|
||||
if f.root == "" {
|
||||
return fmt.Sprintf("Storage bucket %s", f.bucket)
|
||||
if f.rootBucket == "" {
|
||||
return fmt.Sprintf("GCS root")
|
||||
}
|
||||
return fmt.Sprintf("Storage bucket %s path %s", f.bucket, f.root)
|
||||
if f.rootDirectory == "" {
|
||||
return fmt.Sprintf("GCS bucket %s", f.rootBucket)
|
||||
}
|
||||
return fmt.Sprintf("GCS bucket %s path %s", f.rootBucket, f.rootDirectory)
|
||||
}
|
||||
|
||||
// Features returns the optional features of this Fs
|
||||
@@ -344,21 +349,24 @@ func shouldRetry(err error) (again bool, errOut error) {
|
||||
return again, err
|
||||
}
|
||||
|
||||
// Pattern to match a storage path
|
||||
var matcher = regexp.MustCompile(`^([^/]*)(.*)$`)
|
||||
|
||||
// parseParse parses a storage 'url'
|
||||
func parsePath(path string) (bucket, directory string, err error) {
|
||||
parts := matcher.FindStringSubmatch(path)
|
||||
if parts == nil {
|
||||
err = errors.Errorf("couldn't find bucket in storage path %q", path)
|
||||
} else {
|
||||
bucket, directory = parts[1], parts[2]
|
||||
directory = strings.Trim(directory, "/")
|
||||
}
|
||||
// parsePath parses a remote 'url'
|
||||
func parsePath(path string) (root string) {
|
||||
root = strings.Trim(path, "/")
|
||||
return
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
// split returns bucket and bucketPath from the object
|
||||
func (o *Object) split() (bucket, bucketPath string) {
|
||||
return o.fs.split(o.remote)
|
||||
}
|
||||
|
||||
func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
conf, err := google.JWTConfigFromJSON(credentialsData, storageConfig.Scopes...)
|
||||
if err != nil {
|
||||
@@ -368,8 +376,15 @@ func getServiceAccountClient(credentialsData []byte) (*http.Client, error) {
|
||||
return oauth2.NewClient(ctxWithSpecialClient, conf.TokenSource(ctxWithSpecialClient)), nil
|
||||
}
|
||||
|
||||
// setRoot changes the root of the Fs
|
||||
func (f *Fs) setRoot(root string) {
|
||||
f.root = parsePath(root)
|
||||
f.rootBucket, f.rootDirectory = bucket.Split(f.root)
|
||||
}
|
||||
|
||||
// NewFs constructs an Fs from the path, bucket:path
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
var oAuthClient *http.Client
|
||||
|
||||
// Parse config into Options struct
|
||||
@@ -409,22 +424,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
}
|
||||
}
|
||||
|
||||
bucket, directory, err := parsePath(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
bucket: bucket,
|
||||
root: directory,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
name: name,
|
||||
root: root,
|
||||
opt: *opt,
|
||||
pacer: fs.NewPacer(pacer.NewGoogleDrive(pacer.MinSleep(minSleep))),
|
||||
cache: bucket.NewCache(),
|
||||
}
|
||||
f.setRoot(root)
|
||||
f.features = (&fs.Features{
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
ReadMimeType: true,
|
||||
WriteMimeType: true,
|
||||
BucketBased: true,
|
||||
BucketBasedRootOK: true,
|
||||
}).Fill(f)
|
||||
|
||||
// Create a new authorized Drive client.
|
||||
@@ -434,20 +446,19 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, errors.Wrap(err, "couldn't create Google Cloud Storage client")
|
||||
}
|
||||
|
||||
if f.root != "" {
|
||||
f.root += "/"
|
||||
if f.rootBucket != "" && f.rootDirectory != "" {
|
||||
// Check to see if the object exists
|
||||
encodedDirectory := f.opt.Enc.FromStandardPath(f.rootDirectory)
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.Get(bucket, directory).Do()
|
||||
_, err = f.svc.Objects.Get(f.rootBucket, encodedDirectory).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.root = path.Dir(directory)
|
||||
if f.root == "." {
|
||||
f.root = ""
|
||||
} else {
|
||||
f.root += "/"
|
||||
newRoot := path.Dir(f.root)
|
||||
if newRoot == "." {
|
||||
newRoot = ""
|
||||
}
|
||||
f.setRoot(newRoot)
|
||||
// return an error with an fs which points to the parent
|
||||
return f, fs.ErrorIsFile
|
||||
}
|
||||
@@ -458,7 +469,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
// Return an Object from a path
|
||||
//
|
||||
// If it can't be found it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object, error) {
|
||||
func (f *Fs) newObjectWithInfo(ctx context.Context, remote string, info *storage.Object) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
@@ -466,7 +477,7 @@ func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object,
|
||||
if info != nil {
|
||||
o.setMetaData(info)
|
||||
} else {
|
||||
err := o.readMetaData() // reads info and meta, returning an error
|
||||
err := o.readMetaData(ctx) // reads info and meta, returning an error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -476,8 +487,8 @@ func (f *Fs) newObjectWithInfo(remote string, info *storage.Object) (fs.Object,
|
||||
|
||||
// NewObject finds the Object at remote. If it can't be found
|
||||
// it returns the error fs.ErrorObjectNotFound.
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(remote, nil)
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
return f.newObjectWithInfo(ctx, remote, nil)
|
||||
}
|
||||
|
||||
// listFn is called from list to handle an object.
|
||||
@@ -488,20 +499,24 @@ type listFn func(remote string, object *storage.Object, isDirectory bool) error
|
||||
// dir is the starting directory, "" for root
|
||||
//
|
||||
// Set recurse to read sub directories
|
||||
func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
|
||||
root := f.root
|
||||
rootLength := len(root)
|
||||
if dir != "" {
|
||||
root += dir + "/"
|
||||
//
|
||||
// The remote has prefix removed from it and if addBucket is set
|
||||
// then it adds the bucket to the start.
|
||||
func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBucket bool, recurse bool, fn listFn) (err error) {
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
list := f.svc.Objects.List(f.bucket).Prefix(root).MaxResults(listChunks)
|
||||
if directory != "" {
|
||||
directory += "/"
|
||||
}
|
||||
list := f.svc.Objects.List(bucket).Prefix(directory).MaxResults(listChunks)
|
||||
if !recurse {
|
||||
list = list.Delimiter("/")
|
||||
}
|
||||
for {
|
||||
var objects *storage.Objects
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
objects, err = list.Do()
|
||||
objects, err = list.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -514,31 +529,38 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
|
||||
}
|
||||
if !recurse {
|
||||
var object storage.Object
|
||||
for _, prefix := range objects.Prefixes {
|
||||
if !strings.HasSuffix(prefix, "/") {
|
||||
for _, remote := range objects.Prefixes {
|
||||
if !strings.HasSuffix(remote, "/") {
|
||||
continue
|
||||
}
|
||||
err = fn(prefix[rootLength:len(prefix)-1], &object, true)
|
||||
remote = f.opt.Enc.ToStandardPath(remote)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", remote)
|
||||
continue
|
||||
}
|
||||
remote = remote[len(prefix) : len(remote)-1]
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
err = fn(remote, &object, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, object := range objects.Items {
|
||||
if !strings.HasPrefix(object.Name, root) {
|
||||
remote := f.opt.Enc.ToStandardPath(object.Name)
|
||||
if !strings.HasPrefix(remote, prefix) {
|
||||
fs.Logf(f, "Odd name received %q", object.Name)
|
||||
continue
|
||||
}
|
||||
remote := object.Name[rootLength:]
|
||||
remote = remote[len(prefix):]
|
||||
isDirectory := strings.HasSuffix(remote, "/")
|
||||
if addBucket {
|
||||
remote = path.Join(bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if (strings.HasSuffix(remote, "/") || remote == "") && object.Size == 0 {
|
||||
if recurse && remote != "" {
|
||||
// add a directory in if --fast-list since will have no prefixes
|
||||
err = fn(remote[:len(remote)-1], object, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if isDirectory && object.Size == 0 {
|
||||
continue // skip directory marker
|
||||
}
|
||||
err = fn(remote, object, false)
|
||||
@@ -555,32 +577,23 @@ func (f *Fs) list(dir string, recurse bool, fn listFn) (err error) {
|
||||
}
|
||||
|
||||
// Convert a list item into a DirEntry
|
||||
func (f *Fs) itemToDirEntry(remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
|
||||
func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.Object, isDirectory bool) (fs.DirEntry, error) {
|
||||
if isDirectory {
|
||||
d := fs.NewDir(remote, time.Time{}).SetSize(int64(object.Size))
|
||||
return d, nil
|
||||
}
|
||||
o, err := f.newObjectWithInfo(remote, object)
|
||||
o, err := f.newObjectWithInfo(ctx, remote, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o, nil
|
||||
}
|
||||
|
||||
// mark the bucket as being OK
|
||||
func (f *Fs) markBucketOK() {
|
||||
if f.bucket != "" {
|
||||
f.bucketOKMu.Lock()
|
||||
f.bucketOK = true
|
||||
f.bucketOKMu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// listDir lists a single directory
|
||||
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
|
||||
// List the objects
|
||||
err = f.list(dir, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -593,15 +606,12 @@ func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
|
||||
return nil, err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
f.cache.MarkOK(bucket)
|
||||
return entries, err
|
||||
}
|
||||
|
||||
// listBuckets lists the buckets
|
||||
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
if dir != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error) {
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return nil, errors.New("can't list buckets without project number")
|
||||
}
|
||||
@@ -609,14 +619,14 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
for {
|
||||
var buckets *storage.Buckets
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
buckets, err = listBuckets.Do()
|
||||
buckets, err = listBuckets.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, bucket := range buckets.Items {
|
||||
d := fs.NewDir(bucket.Name, time.Time{})
|
||||
d := fs.NewDir(f.opt.Enc.ToStandardName(bucket.Name), time.Time{})
|
||||
entries = append(entries, d)
|
||||
}
|
||||
if buckets.NextPageToken == "" {
|
||||
@@ -636,11 +646,15 @@ func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
if f.bucket == "" {
|
||||
return f.listBuckets(dir)
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" {
|
||||
if directory != "" {
|
||||
return nil, fs.ErrorListBucketRequired
|
||||
}
|
||||
return f.listBuckets(ctx)
|
||||
}
|
||||
return f.listDir(dir)
|
||||
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
}
|
||||
|
||||
// ListR lists the objects and directories of the Fs starting
|
||||
@@ -659,23 +673,44 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
//
|
||||
// Don't implement this unless you have a more efficient way
|
||||
// of listing recursively that doing a directory traversal.
|
||||
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
if f.bucket == "" {
|
||||
return fs.ErrorListBucketRequired
|
||||
}
|
||||
func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
list := walk.NewListRHelper(callback)
|
||||
err = f.list(dir, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(remote, object, isDirectory)
|
||||
listR := func(bucket, directory, prefix string, addBucket bool) error {
|
||||
return f.list(ctx, bucket, directory, prefix, addBucket, true, func(remote string, object *storage.Object, isDirectory bool) error {
|
||||
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
}
|
||||
if bucket == "" {
|
||||
entries, err := f.listBuckets(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return list.Add(entry)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
for _, entry := range entries {
|
||||
err = list.Add(entry)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucket := entry.Remote()
|
||||
err = listR(bucket, "", f.rootDirectory, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
}
|
||||
} else {
|
||||
err = listR(bucket, directory, f.rootDirectory, f.rootBucket == "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.cache.MarkOK(bucket)
|
||||
}
|
||||
// bucket must be present if listing succeeded
|
||||
f.markBucketOK()
|
||||
return list.Flush()
|
||||
}
|
||||
|
||||
@@ -684,94 +719,88 @@ func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
|
||||
// Copy the reader in to the new object which is returned
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
// Temporary Object under construction
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: src.Remote(),
|
||||
}
|
||||
return o, o.Update(in, src, options...)
|
||||
return o, o.Update(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// Mkdir creates the bucket if it doesn't exist
|
||||
func (f *Fs) Mkdir(dir string) (err error) {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.bucketOK {
|
||||
return nil
|
||||
}
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) (err error) {
|
||||
bucket, _ := f.split(dir)
|
||||
return f.makeBucket(ctx, bucket)
|
||||
}
|
||||
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(f.bucket).MaxResults(1).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
f.bucketOK = true
|
||||
return nil
|
||||
} else if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code != http.StatusNotFound {
|
||||
// makeBucket creates the bucket if it doesn't exist
|
||||
func (f *Fs) makeBucket(ctx context.Context, bucket string) (err error) {
|
||||
return f.cache.Create(bucket, func() error {
|
||||
// List something from the bucket to see if it exists. Doing it like this enables the use of a
|
||||
// service account that only has the "Storage Object Admin" role. See #2193 for details.
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
_, err = f.svc.Objects.List(bucket).MaxResults(1).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
// Bucket already exists
|
||||
return nil
|
||||
} else if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code != http.StatusNotFound {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
} else {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
} else {
|
||||
return errors.Wrap(err, "failed to get bucket")
|
||||
}
|
||||
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return errors.New("can't make bucket without project number")
|
||||
}
|
||||
if f.opt.ProjectNumber == "" {
|
||||
return errors.New("can't make bucket without project number")
|
||||
}
|
||||
|
||||
bucket := storage.Bucket{
|
||||
Name: f.bucket,
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
if f.opt.BucketPolicyOnly {
|
||||
bucket.IamConfiguration = &storage.BucketIamConfiguration{
|
||||
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
|
||||
Enabled: true,
|
||||
},
|
||||
bucket := storage.Bucket{
|
||||
Name: bucket,
|
||||
Location: f.opt.Location,
|
||||
StorageClass: f.opt.StorageClass,
|
||||
}
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
if f.opt.BucketPolicyOnly {
|
||||
bucket.IamConfiguration = &storage.BucketIamConfiguration{
|
||||
BucketPolicyOnly: &storage.BucketIamConfigurationBucketPolicyOnly{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
}
|
||||
_, err = insertBucket.Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = true
|
||||
}
|
||||
return err
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
insertBucket := f.svc.Buckets.Insert(f.opt.ProjectNumber, &bucket)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
insertBucket.PredefinedAcl(f.opt.BucketACL)
|
||||
}
|
||||
_, err = insertBucket.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
}, nil)
|
||||
}
|
||||
|
||||
// Rmdir deletes the bucket if the fs is at the root
|
||||
//
|
||||
// Returns an error if it isn't empty: Error 409: The bucket you tried
|
||||
// to delete was not empty.
|
||||
func (f *Fs) Rmdir(dir string) (err error) {
|
||||
f.bucketOKMu.Lock()
|
||||
defer f.bucketOKMu.Unlock()
|
||||
if f.root != "" || dir != "" {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) (err error) {
|
||||
bucket, directory := f.split(dir)
|
||||
if bucket == "" || directory != "" {
|
||||
return nil
|
||||
}
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(f.bucket).Do()
|
||||
return shouldRetry(err)
|
||||
return f.cache.Remove(bucket, func() error {
|
||||
return f.pacer.Call(func() (bool, error) {
|
||||
err = f.svc.Buckets.Delete(bucket).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
})
|
||||
if err == nil {
|
||||
f.bucketOK = false
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Precision returns the precision
|
||||
@@ -788,8 +817,9 @@ func (f *Fs) Precision() time.Duration {
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantCopy
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
err := f.Mkdir("")
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstBucket, dstPath := f.split(remote)
|
||||
err := f.makeBucket(ctx, dstBucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -798,6 +828,7 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
fs.Debugf(src, "Can't copy - not same remote type")
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
srcBucket, srcPath := srcObj.split()
|
||||
|
||||
// Temporary Object under construction
|
||||
dstObj := &Object{
|
||||
@@ -805,13 +836,13 @@ func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
remote: remote,
|
||||
}
|
||||
|
||||
srcBucket := srcObj.fs.bucket
|
||||
srcObject := srcObj.fs.root + srcObj.remote
|
||||
dstBucket := f.bucket
|
||||
dstObject := f.root + remote
|
||||
var newObject *storage.Object
|
||||
err = f.pacer.Call(func() (bool, error) {
|
||||
newObject, err = f.svc.Objects.Copy(srcBucket, srcObject, dstBucket, dstObject, nil).Do()
|
||||
copyObject := f.svc.Objects.Copy(srcBucket, srcPath, dstBucket, dstPath, nil)
|
||||
if !f.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(f.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -848,7 +879,7 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns the Md5sum of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(t hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if t != hash.MD5 {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -894,24 +925,33 @@ func (o *Object) setMetaData(info *storage.Object) {
|
||||
}
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData() (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
var object *storage.Object
|
||||
// readObjectInfo reads the definition for an object
|
||||
func (o *Object) readObjectInfo(ctx context.Context) (object *storage.Object, err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
object, err = o.fs.svc.Objects.Get(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
object, err = o.fs.svc.Objects.Get(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
if gErr, ok := err.(*googleapi.Error); ok {
|
||||
if gErr.Code == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
return nil, fs.ErrorObjectNotFound
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return object, nil
|
||||
}
|
||||
|
||||
// readMetaData gets the metadata if it hasn't already been fetched
|
||||
//
|
||||
// it also sets the info
|
||||
func (o *Object) readMetaData(ctx context.Context) (err error) {
|
||||
if !o.modTime.IsZero() {
|
||||
return nil
|
||||
}
|
||||
object, err := o.readObjectInfo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.setMetaData(object)
|
||||
@@ -922,8 +962,8 @@ func (o *Object) readMetaData() (err error) {
|
||||
//
|
||||
// It attempts to read the objects mtime and if that isn't present the
|
||||
// LastModified returned in the http headers
|
||||
func (o *Object) ModTime() time.Time {
|
||||
err := o.readMetaData()
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
err := o.readMetaData(ctx)
|
||||
if err != nil {
|
||||
// fs.Logf(o, "Failed to read metadata: %v", err)
|
||||
return time.Now()
|
||||
@@ -939,16 +979,28 @@ func metadataFromModTime(modTime time.Time) map[string]string {
|
||||
}
|
||||
|
||||
// SetModTime sets the modification time of the local fs object
|
||||
func (o *Object) SetModTime(modTime time.Time) (err error) {
|
||||
// This only adds metadata so will perserve other metadata
|
||||
object := storage.Object{
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) (err error) {
|
||||
// read the complete existing object first
|
||||
object, err := o.readObjectInfo(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Add the mtime to the existing metadata
|
||||
mtime := modTime.Format(timeFormatOut)
|
||||
if object.Metadata == nil {
|
||||
object.Metadata = make(map[string]string, 1)
|
||||
}
|
||||
object.Metadata[metaMtime] = mtime
|
||||
// Copy the object to itself to update the metadata
|
||||
// Using PATCH requires too many permissions
|
||||
bucket, bucketPath := o.split()
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
newObject, err = o.fs.svc.Objects.Patch(o.fs.bucket, o.fs.root+o.remote, &object).Do()
|
||||
copyObject := o.fs.svc.Objects.Copy(bucket, bucketPath, bucket, bucketPath, object)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
copyObject.DestinationPredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = copyObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -964,11 +1016,13 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// Open an object for read
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
req, err := http.NewRequest("GET", o.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
fs.FixRangeOption(options, o.bytes)
|
||||
fs.OpenOptionAddHTTPHeaders(req.Header, options)
|
||||
var res *http.Response
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -995,27 +1049,27 @@ func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
// Update the object with the contents of the io.Reader, modTime and size
|
||||
//
|
||||
// The new object may have been created if an error is returned
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
err := o.fs.Mkdir("")
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
bucket, bucketPath := o.split()
|
||||
err := o.fs.makeBucket(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
modTime := src.ModTime()
|
||||
modTime := src.ModTime(ctx)
|
||||
|
||||
object := storage.Object{
|
||||
Bucket: o.fs.bucket,
|
||||
Name: o.fs.root + o.remote,
|
||||
ContentType: fs.MimeType(src),
|
||||
Updated: modTime.Format(timeFormatOut), // Doesn't get set
|
||||
Bucket: bucket,
|
||||
Name: bucketPath,
|
||||
ContentType: fs.MimeType(ctx, src),
|
||||
Metadata: metadataFromModTime(modTime),
|
||||
}
|
||||
var newObject *storage.Object
|
||||
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
|
||||
insertObject := o.fs.svc.Objects.Insert(o.fs.bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
||||
insertObject := o.fs.svc.Objects.Insert(bucket, &object).Media(in, googleapi.ContentType("")).Name(object.Name)
|
||||
if !o.fs.opt.BucketPolicyOnly {
|
||||
insertObject.PredefinedAcl(o.fs.opt.ObjectACL)
|
||||
}
|
||||
newObject, err = insertObject.Do()
|
||||
newObject, err = insertObject.Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -1027,16 +1081,17 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove an object
|
||||
func (o *Object) Remove() (err error) {
|
||||
func (o *Object) Remove(ctx context.Context) (err error) {
|
||||
bucket, bucketPath := o.split()
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
err = o.fs.svc.Objects.Delete(o.fs.bucket, o.fs.root+o.remote).Do()
|
||||
err = o.fs.svc.Objects.Delete(bucket, bucketPath).Context(ctx).Do()
|
||||
return shouldRetry(err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.mimeType
|
||||
}
|
||||
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
// Test GoogleCloudStorage filesystem interface
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package googlecloudstorage_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/googlecloudstorage"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/googlecloudstorage"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
// Build for unsupported platforms to stop go complaining
|
||||
// about "no buildable Go source files "
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package googlecloudstorage
|
||||
148
backend/googlephotos/albums.go
Normal file
148
backend/googlephotos/albums.go
Normal file
@@ -0,0 +1,148 @@
|
||||
// This file contains the albums abstraction
|
||||
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
)
|
||||
|
||||
// All the albums
|
||||
type albums struct {
|
||||
mu sync.Mutex
|
||||
dupes map[string][]*api.Album // duplicated names
|
||||
byID map[string]*api.Album //..indexed by ID
|
||||
byTitle map[string]*api.Album //..indexed by Title
|
||||
path map[string][]string // partial album names to directory
|
||||
}
|
||||
|
||||
// Create a new album
|
||||
func newAlbums() *albums {
|
||||
return &albums{
|
||||
dupes: map[string][]*api.Album{},
|
||||
byID: map[string]*api.Album{},
|
||||
byTitle: map[string]*api.Album{},
|
||||
path: map[string][]string{},
|
||||
}
|
||||
}
|
||||
|
||||
// add an album
|
||||
func (as *albums) add(album *api.Album) {
|
||||
// Munge the name of the album into a sensible path name
|
||||
album.Title = path.Clean(album.Title)
|
||||
if album.Title == "." || album.Title == "/" {
|
||||
album.Title = addID("", album.ID)
|
||||
}
|
||||
|
||||
as.mu.Lock()
|
||||
as._add(album)
|
||||
as.mu.Unlock()
|
||||
}
|
||||
|
||||
// _add an album - call with lock held
|
||||
func (as *albums) _add(album *api.Album) {
|
||||
// update dupes by title
|
||||
dupes := as.dupes[album.Title]
|
||||
dupes = append(dupes, album)
|
||||
as.dupes[album.Title] = dupes
|
||||
|
||||
// Dedupe the album name if necessary
|
||||
if len(dupes) >= 2 {
|
||||
// If this is the first dupe, then need to adjust the first one
|
||||
if len(dupes) == 2 {
|
||||
firstAlbum := dupes[0]
|
||||
as._del(firstAlbum)
|
||||
as._add(firstAlbum)
|
||||
// undo add of firstAlbum to dupes
|
||||
as.dupes[album.Title] = dupes
|
||||
}
|
||||
album.Title = addID(album.Title, album.ID)
|
||||
}
|
||||
|
||||
// Store the new album
|
||||
as.byID[album.ID] = album
|
||||
as.byTitle[album.Title] = album
|
||||
|
||||
// Store the partial paths
|
||||
dir, leaf := album.Title, ""
|
||||
for dir != "" {
|
||||
i := strings.LastIndex(dir, "/")
|
||||
if i >= 0 {
|
||||
dir, leaf = dir[:i], dir[i+1:]
|
||||
} else {
|
||||
dir, leaf = "", dir
|
||||
}
|
||||
dirs := as.path[dir]
|
||||
found := false
|
||||
for _, dir := range dirs {
|
||||
if dir == leaf {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
as.path[dir] = append(as.path[dir], leaf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// del an album
|
||||
func (as *albums) del(album *api.Album) {
|
||||
as.mu.Lock()
|
||||
as._del(album)
|
||||
as.mu.Unlock()
|
||||
}
|
||||
|
||||
// _del an album - call with lock held
|
||||
func (as *albums) _del(album *api.Album) {
|
||||
// We leave in dupes so it doesn't cause albums to get renamed
|
||||
|
||||
// Remove from byID and byTitle
|
||||
delete(as.byID, album.ID)
|
||||
delete(as.byTitle, album.Title)
|
||||
|
||||
// Remove from paths
|
||||
dir, leaf := album.Title, ""
|
||||
for dir != "" {
|
||||
// Can't delete if this dir exists anywhere in the path structure
|
||||
if _, found := as.path[dir]; found {
|
||||
break
|
||||
}
|
||||
i := strings.LastIndex(dir, "/")
|
||||
if i >= 0 {
|
||||
dir, leaf = dir[:i], dir[i+1:]
|
||||
} else {
|
||||
dir, leaf = "", dir
|
||||
}
|
||||
dirs := as.path[dir]
|
||||
for i, dir := range dirs {
|
||||
if dir == leaf {
|
||||
dirs = append(dirs[:i], dirs[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(dirs) == 0 {
|
||||
delete(as.path, dir)
|
||||
} else {
|
||||
as.path[dir] = dirs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// get an album by title
|
||||
func (as *albums) get(title string) (album *api.Album, ok bool) {
|
||||
as.mu.Lock()
|
||||
defer as.mu.Unlock()
|
||||
album, ok = as.byTitle[title]
|
||||
return album, ok
|
||||
}
|
||||
|
||||
// getDirs gets directories below an album path
|
||||
func (as *albums) getDirs(albumPath string) (dirs []string, ok bool) {
|
||||
as.mu.Lock()
|
||||
defer as.mu.Unlock()
|
||||
dirs, ok = as.path[albumPath]
|
||||
return dirs, ok
|
||||
}
|
||||
311
backend/googlephotos/albums_test.go
Normal file
311
backend/googlephotos/albums_test.go
Normal file
@@ -0,0 +1,311 @@
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewAlbums(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
assert.NotNil(t, albums.dupes)
|
||||
assert.NotNil(t, albums.byID)
|
||||
assert.NotNil(t, albums.byTitle)
|
||||
assert.NotNil(t, albums.path)
|
||||
}
|
||||
|
||||
func TestAlbumsAdd(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{}, albums.path)
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one"},
|
||||
}, albums.path)
|
||||
|
||||
a2 := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2",
|
||||
}
|
||||
albums.add(a2)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"two": a2,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a duplicate
|
||||
a2a := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2a",
|
||||
}
|
||||
albums.add(a2a)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a sub directory
|
||||
a1sub := &api.Album{
|
||||
Title: "one/sub",
|
||||
ID: "1sub",
|
||||
}
|
||||
albums.add(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
// Add a weird path
|
||||
a0 := &api.Album{
|
||||
Title: "/../././..////.",
|
||||
ID: "0",
|
||||
}
|
||||
albums.add(a0)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"{0}": []*api.Album{a0},
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"0": a0,
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"{0}": a0,
|
||||
"one": a1,
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}", "{0}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
}
|
||||
|
||||
func TestAlbumsDel(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
a2 := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2",
|
||||
}
|
||||
albums.add(a2)
|
||||
|
||||
// Add a duplicate
|
||||
a2a := &api.Album{
|
||||
Title: "two",
|
||||
ID: "2a",
|
||||
}
|
||||
albums.add(a2a)
|
||||
|
||||
// Add a sub directory
|
||||
a1sub := &api.Album{
|
||||
Title: "one/sub",
|
||||
ID: "1sub",
|
||||
}
|
||||
albums.add(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1": a1,
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one": a1,
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a1)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"2": a2,
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one/sub": a1sub,
|
||||
"two {2}": a2,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2}", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a2)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"2a": a2a,
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one/sub": a1sub,
|
||||
"two {2a}": a2a,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one", "two {2a}"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a2a)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"1sub": a1sub,
|
||||
}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{
|
||||
"one/sub": a1sub,
|
||||
}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{
|
||||
"": []string{"one"},
|
||||
"one": []string{"sub"},
|
||||
}, albums.path)
|
||||
|
||||
albums.del(a1sub)
|
||||
|
||||
assert.Equal(t, map[string][]*api.Album{
|
||||
"one": []*api.Album{a1},
|
||||
"two": []*api.Album{a2, a2a},
|
||||
"one/sub": []*api.Album{a1sub},
|
||||
}, albums.dupes)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byID)
|
||||
assert.Equal(t, map[string]*api.Album{}, albums.byTitle)
|
||||
assert.Equal(t, map[string][]string{}, albums.path)
|
||||
}
|
||||
|
||||
func TestAlbumsGet(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
album, ok := albums.get("one")
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, a1, album)
|
||||
|
||||
album, ok = albums.get("notfound")
|
||||
assert.Equal(t, false, ok)
|
||||
assert.Nil(t, album)
|
||||
}
|
||||
|
||||
func TestAlbumsGetDirs(t *testing.T) {
|
||||
albums := newAlbums()
|
||||
|
||||
a1 := &api.Album{
|
||||
Title: "one",
|
||||
ID: "1",
|
||||
}
|
||||
albums.add(a1)
|
||||
|
||||
dirs, ok := albums.getDirs("")
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, []string{"one"}, dirs)
|
||||
|
||||
dirs, ok = albums.getDirs("notfound")
|
||||
assert.Equal(t, false, ok)
|
||||
assert.Nil(t, dirs)
|
||||
}
|
||||
190
backend/googlephotos/api/types.go
Normal file
190
backend/googlephotos/api/types.go
Normal file
@@ -0,0 +1,190 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// ErrorDetails in the internals of the Error type
|
||||
type ErrorDetails struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Status string `json:"status"`
|
||||
}
|
||||
|
||||
// Error is returned on errors
|
||||
type Error struct {
|
||||
Details ErrorDetails `json:"error"`
|
||||
}
|
||||
|
||||
// Error statisfies error interface
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s (%d %s)", e.Details.Message, e.Details.Code, e.Details.Status)
|
||||
}
|
||||
|
||||
// Album of photos
|
||||
type Album struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
Title string `json:"title"`
|
||||
ProductURL string `json:"productUrl,omitempty"`
|
||||
MediaItemsCount string `json:"mediaItemsCount,omitempty"`
|
||||
CoverPhotoBaseURL string `json:"coverPhotoBaseUrl,omitempty"`
|
||||
CoverPhotoMediaItemID string `json:"coverPhotoMediaItemId,omitempty"`
|
||||
IsWriteable bool `json:"isWriteable,omitempty"`
|
||||
}
|
||||
|
||||
// ListAlbums is returned from albums.list and sharedAlbums.list
|
||||
type ListAlbums struct {
|
||||
Albums []Album `json:"albums"`
|
||||
SharedAlbums []Album `json:"sharedAlbums"`
|
||||
NextPageToken string `json:"nextPageToken"`
|
||||
}
|
||||
|
||||
// CreateAlbum creates an Album
|
||||
type CreateAlbum struct {
|
||||
Album *Album `json:"album"`
|
||||
}
|
||||
|
||||
// MediaItem is a photo or video
|
||||
type MediaItem struct {
|
||||
ID string `json:"id"`
|
||||
ProductURL string `json:"productUrl"`
|
||||
BaseURL string `json:"baseUrl"`
|
||||
MimeType string `json:"mimeType"`
|
||||
MediaMetadata struct {
|
||||
CreationTime time.Time `json:"creationTime"`
|
||||
Width string `json:"width"`
|
||||
Height string `json:"height"`
|
||||
Photo struct {
|
||||
} `json:"photo"`
|
||||
} `json:"mediaMetadata"`
|
||||
Filename string `json:"filename"`
|
||||
}
|
||||
|
||||
// MediaItems is returned from mediaitems.list, mediaitems.search
|
||||
type MediaItems struct {
|
||||
MediaItems []MediaItem `json:"mediaItems"`
|
||||
NextPageToken string `json:"nextPageToken"`
|
||||
}
|
||||
|
||||
//Content categories
|
||||
// NONE Default content category. This category is ignored when any other category is used in the filter.
|
||||
// LANDSCAPES Media items containing landscapes.
|
||||
// RECEIPTS Media items containing receipts.
|
||||
// CITYSCAPES Media items containing cityscapes.
|
||||
// LANDMARKS Media items containing landmarks.
|
||||
// SELFIES Media items that are selfies.
|
||||
// PEOPLE Media items containing people.
|
||||
// PETS Media items containing pets.
|
||||
// WEDDINGS Media items from weddings.
|
||||
// BIRTHDAYS Media items from birthdays.
|
||||
// DOCUMENTS Media items containing documents.
|
||||
// TRAVEL Media items taken during travel.
|
||||
// ANIMALS Media items containing animals.
|
||||
// FOOD Media items containing food.
|
||||
// SPORT Media items from sporting events.
|
||||
// NIGHT Media items taken at night.
|
||||
// PERFORMANCES Media items from performances.
|
||||
// WHITEBOARDS Media items containing whiteboards.
|
||||
// SCREENSHOTS Media items that are screenshots.
|
||||
// UTILITY Media items that are considered to be utility. These include, but aren't limited to documents, screenshots, whiteboards etc.
|
||||
// ARTS Media items containing art.
|
||||
// CRAFTS Media items containing crafts.
|
||||
// FASHION Media items related to fashion.
|
||||
// HOUSES Media items containing houses.
|
||||
// GARDENS Media items containing gardens.
|
||||
// FLOWERS Media items containing flowers.
|
||||
// HOLIDAYS Media items taken of holidays.
|
||||
|
||||
// MediaTypes
|
||||
// ALL_MEDIA Treated as if no filters are applied. All media types are included.
|
||||
// VIDEO All media items that are considered videos. This also includes movies the user has created using the Google Photos app.
|
||||
// PHOTO All media items that are considered photos. This includes .bmp, .gif, .ico, .jpg (and other spellings), .tiff, .webp and special photo types such as iOS live photos, Android motion photos, panoramas, photospheres.
|
||||
|
||||
// Features
|
||||
// NONE Treated as if no filters are applied. All features are included.
|
||||
// FAVORITES Media items that the user has marked as favorites in the Google Photos app.
|
||||
|
||||
// Date is used as part of SearchFilter
|
||||
type Date struct {
|
||||
Year int `json:"year,omitempty"`
|
||||
Month int `json:"month,omitempty"`
|
||||
Day int `json:"day,omitempty"`
|
||||
}
|
||||
|
||||
// DateFilter is uses to add date ranges to media item queries
|
||||
type DateFilter struct {
|
||||
Dates []Date `json:"dates,omitempty"`
|
||||
Ranges []struct {
|
||||
StartDate Date `json:"startDate,omitempty"`
|
||||
EndDate Date `json:"endDate,omitempty"`
|
||||
} `json:"ranges,omitempty"`
|
||||
}
|
||||
|
||||
// ContentFilter is uses to add content categories to media item queries
|
||||
type ContentFilter struct {
|
||||
IncludedContentCategories []string `json:"includedContentCategories,omitempty"`
|
||||
ExcludedContentCategories []string `json:"excludedContentCategories,omitempty"`
|
||||
}
|
||||
|
||||
// MediaTypeFilter is uses to add media types to media item queries
|
||||
type MediaTypeFilter struct {
|
||||
MediaTypes []string `json:"mediaTypes,omitempty"`
|
||||
}
|
||||
|
||||
// FeatureFilter is uses to add features to media item queries
|
||||
type FeatureFilter struct {
|
||||
IncludedFeatures []string `json:"includedFeatures,omitempty"`
|
||||
}
|
||||
|
||||
// Filters combines all the filter types for media item queries
|
||||
type Filters struct {
|
||||
DateFilter *DateFilter `json:"dateFilter,omitempty"`
|
||||
ContentFilter *ContentFilter `json:"contentFilter,omitempty"`
|
||||
MediaTypeFilter *MediaTypeFilter `json:"mediaTypeFilter,omitempty"`
|
||||
FeatureFilter *FeatureFilter `json:"featureFilter,omitempty"`
|
||||
IncludeArchivedMedia *bool `json:"includeArchivedMedia,omitempty"`
|
||||
ExcludeNonAppCreatedData *bool `json:"excludeNonAppCreatedData,omitempty"`
|
||||
}
|
||||
|
||||
// SearchFilter is uses with mediaItems.search
|
||||
type SearchFilter struct {
|
||||
AlbumID string `json:"albumId,omitempty"`
|
||||
PageSize int `json:"pageSize"`
|
||||
PageToken string `json:"pageToken,omitempty"`
|
||||
Filters *Filters `json:"filters,omitempty"`
|
||||
}
|
||||
|
||||
// SimpleMediaItem is part of NewMediaItem
|
||||
type SimpleMediaItem struct {
|
||||
UploadToken string `json:"uploadToken"`
|
||||
}
|
||||
|
||||
// NewMediaItem is a single media item for upload
|
||||
type NewMediaItem struct {
|
||||
Description string `json:"description"`
|
||||
SimpleMediaItem SimpleMediaItem `json:"simpleMediaItem"`
|
||||
}
|
||||
|
||||
// BatchCreateRequest creates media items from upload tokens
|
||||
type BatchCreateRequest struct {
|
||||
AlbumID string `json:"albumId,omitempty"`
|
||||
NewMediaItems []NewMediaItem `json:"newMediaItems"`
|
||||
}
|
||||
|
||||
// BatchCreateResponse is returned from BatchCreateRequest
|
||||
type BatchCreateResponse struct {
|
||||
NewMediaItemResults []struct {
|
||||
UploadToken string `json:"uploadToken"`
|
||||
Status struct {
|
||||
Message string `json:"message"`
|
||||
Code int `json:"code"`
|
||||
} `json:"status"`
|
||||
MediaItem MediaItem `json:"mediaItem"`
|
||||
} `json:"newMediaItemResults"`
|
||||
}
|
||||
|
||||
// BatchRemoveItems is for removing items from an album
|
||||
type BatchRemoveItems struct {
|
||||
MediaItemIds []string `json:"mediaItemIds"`
|
||||
}
|
||||
1059
backend/googlephotos/googlephotos.go
Normal file
1059
backend/googlephotos/googlephotos.go
Normal file
File diff suppressed because it is too large
Load Diff
297
backend/googlephotos/googlephotos_test.go
Normal file
297
backend/googlephotos/googlephotos_test.go
Normal file
@@ -0,0 +1,297 @@
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
_ "github.com/rclone/rclone/backend/local"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
// We have two different files here as Google Photos will uniq
|
||||
// them otherwise which confuses the tests as the filename is
|
||||
// unexpected.
|
||||
fileNameAlbum = "rclone-test-image1.jpg"
|
||||
fileNameUpload = "rclone-test-image2.jpg"
|
||||
)
|
||||
|
||||
func TestIntegration(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fstest.Initialise()
|
||||
|
||||
// Create Fs
|
||||
if *fstest.RemoteName == "" {
|
||||
*fstest.RemoteName = "TestGooglePhotos:"
|
||||
}
|
||||
f, err := fs.NewFs(*fstest.RemoteName)
|
||||
if err == fs.ErrorNotFoundInConfigFile {
|
||||
t.Skip(fmt.Sprintf("Couldn't create google photos backend - skipping tests: %v", err))
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create local Fs pointing at testfiles
|
||||
localFs, err := fs.NewFs("testfiles")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("CreateAlbum", func(t *testing.T) {
|
||||
albumName := "album/rclone-test-" + random.String(24)
|
||||
err = f.Mkdir(ctx, albumName)
|
||||
require.NoError(t, err)
|
||||
remote := albumName + "/" + fileNameAlbum
|
||||
|
||||
t.Run("PutFile", func(t *testing.T) {
|
||||
srcObj, err := localFs.NewObject(ctx, fileNameAlbum)
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
remoteWithID := addFileID(remote, dstObj.(*Object).id)
|
||||
|
||||
t.Run("ObjectFs", func(t *testing.T) {
|
||||
assert.Equal(t, f, dstObj.Fs())
|
||||
})
|
||||
|
||||
t.Run("ObjectString", func(t *testing.T) {
|
||||
assert.Equal(t, remote, dstObj.String())
|
||||
assert.Equal(t, "<nil>", (*Object)(nil).String())
|
||||
})
|
||||
|
||||
t.Run("ObjectHash", func(t *testing.T) {
|
||||
h, err := dstObj.Hash(ctx, hash.MD5)
|
||||
assert.Equal(t, "", h)
|
||||
assert.Equal(t, hash.ErrUnsupported, err)
|
||||
})
|
||||
|
||||
t.Run("ObjectSize", func(t *testing.T) {
|
||||
assert.Equal(t, int64(-1), dstObj.Size())
|
||||
f.(*Fs).opt.ReadSize = true
|
||||
defer func() {
|
||||
f.(*Fs).opt.ReadSize = false
|
||||
}()
|
||||
size := dstObj.Size()
|
||||
assert.True(t, size > 1000, fmt.Sprintf("Size too small %d", size))
|
||||
})
|
||||
|
||||
t.Run("ObjectSetModTime", func(t *testing.T) {
|
||||
err := dstObj.SetModTime(ctx, time.Now())
|
||||
assert.Equal(t, fs.ErrorCantSetModTime, err)
|
||||
})
|
||||
|
||||
t.Run("ObjectStorable", func(t *testing.T) {
|
||||
assert.True(t, dstObj.Storable())
|
||||
})
|
||||
|
||||
t.Run("ObjectOpen", func(t *testing.T) {
|
||||
in, err := dstObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
buf, err := ioutil.ReadAll(in)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, in.Close())
|
||||
assert.True(t, len(buf) > 1000)
|
||||
contentType := http.DetectContentType(buf[:512])
|
||||
assert.Equal(t, "image/jpeg", contentType)
|
||||
})
|
||||
|
||||
t.Run("CheckFileInAlbum", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, albumName)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, remote, entries[0].Remote())
|
||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||
})
|
||||
|
||||
// Check it is there in the date/month/year heirachy
|
||||
// 2013-07-13 is the creation date of the folder
|
||||
checkPresent := func(t *testing.T, objPath string) {
|
||||
entries, err := f.List(ctx, objPath)
|
||||
require.NoError(t, err)
|
||||
found := false
|
||||
for _, entry := range entries {
|
||||
leaf := path.Base(entry.Remote())
|
||||
if leaf == fileNameAlbum || leaf == remoteWithID {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
assert.True(t, found, fmt.Sprintf("didn't find %q in %q", fileNameAlbum, objPath))
|
||||
}
|
||||
|
||||
t.Run("CheckInByYear", func(t *testing.T) {
|
||||
checkPresent(t, "media/by-year/2013")
|
||||
})
|
||||
|
||||
t.Run("CheckInByMonth", func(t *testing.T) {
|
||||
checkPresent(t, "media/by-month/2013/2013-07")
|
||||
})
|
||||
|
||||
t.Run("CheckInByDay", func(t *testing.T) {
|
||||
checkPresent(t, "media/by-day/2013/2013-07-26")
|
||||
})
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remote, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("NewObjectWithID", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remoteWithID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remoteWithID, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("NewFsIsFile", func(t *testing.T) {
|
||||
fNew, err := fs.NewFs(*fstest.RemoteName + remote)
|
||||
assert.Equal(t, fs.ErrorIsFile, err)
|
||||
leaf := path.Base(remote)
|
||||
o, err := fNew.NewObject(ctx, leaf)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, leaf, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("RemoveFileFromAlbum", func(t *testing.T) {
|
||||
err = dstObj.Remove(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(time.Second)
|
||||
|
||||
// Check album empty
|
||||
entries, err := f.List(ctx, albumName)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(entries))
|
||||
})
|
||||
})
|
||||
|
||||
// remove the album
|
||||
err = f.Rmdir(ctx, albumName)
|
||||
require.Error(t, err) // FIXME doesn't work yet
|
||||
})
|
||||
|
||||
t.Run("UploadMkdir", func(t *testing.T) {
|
||||
assert.NoError(t, f.Mkdir(ctx, "upload/dir"))
|
||||
assert.NoError(t, f.Mkdir(ctx, "upload/dir/subdir"))
|
||||
|
||||
t.Run("List", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, "upload")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, "upload/dir", entries[0].Remote())
|
||||
|
||||
entries, err = f.List(ctx, "upload/dir")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, "upload/dir/subdir", entries[0].Remote())
|
||||
})
|
||||
|
||||
t.Run("Rmdir", func(t *testing.T) {
|
||||
assert.NoError(t, f.Rmdir(ctx, "upload/dir/subdir"))
|
||||
assert.NoError(t, f.Rmdir(ctx, "upload/dir"))
|
||||
|
||||
})
|
||||
|
||||
t.Run("ListEmpty", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, "upload")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 0, len(entries))
|
||||
|
||||
_, err = f.List(ctx, "upload/dir")
|
||||
assert.Equal(t, fs.ErrorDirNotFound, err)
|
||||
})
|
||||
})
|
||||
|
||||
t.Run("Upload", func(t *testing.T) {
|
||||
uploadDir := "upload/dir/subdir"
|
||||
remote := path.Join(uploadDir, fileNameUpload)
|
||||
|
||||
srcObj, err := localFs.NewObject(ctx, fileNameUpload)
|
||||
require.NoError(t, err)
|
||||
in, err := srcObj.Open(ctx)
|
||||
require.NoError(t, err)
|
||||
dstObj, err := f.Put(ctx, in, operations.NewOverrideRemote(srcObj, remote))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, remote, dstObj.Remote())
|
||||
_ = in.Close()
|
||||
remoteWithID := addFileID(remote, dstObj.(*Object).id)
|
||||
|
||||
t.Run("List", func(t *testing.T) {
|
||||
entries, err := f.List(ctx, uploadDir)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, remote, entries[0].Remote())
|
||||
assert.Equal(t, "2013-07-26 08:57:21 +0000 UTC", entries[0].ModTime(ctx).String())
|
||||
})
|
||||
|
||||
t.Run("NewObject", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remote)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remote, o.Remote())
|
||||
})
|
||||
|
||||
t.Run("NewObjectWithID", func(t *testing.T) {
|
||||
o, err := f.NewObject(ctx, remoteWithID)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, remoteWithID, o.Remote())
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
t.Run("Name", func(t *testing.T) {
|
||||
assert.Equal(t, (*fstest.RemoteName)[:len(*fstest.RemoteName)-1], f.Name())
|
||||
})
|
||||
|
||||
t.Run("Root", func(t *testing.T) {
|
||||
assert.Equal(t, "", f.Root())
|
||||
})
|
||||
|
||||
t.Run("String", func(t *testing.T) {
|
||||
assert.Equal(t, `Google Photos path ""`, f.String())
|
||||
})
|
||||
|
||||
t.Run("Features", func(t *testing.T) {
|
||||
features := f.Features()
|
||||
assert.False(t, features.CaseInsensitive)
|
||||
assert.True(t, features.ReadMimeType)
|
||||
})
|
||||
|
||||
t.Run("Precision", func(t *testing.T) {
|
||||
assert.Equal(t, fs.ModTimeNotSupported, f.Precision())
|
||||
})
|
||||
|
||||
t.Run("Hashes", func(t *testing.T) {
|
||||
assert.Equal(t, hash.Set(hash.None), f.Hashes())
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestAddID(t *testing.T) {
|
||||
assert.Equal(t, "potato {123}", addID("potato", "123"))
|
||||
assert.Equal(t, "{123}", addID("", "123"))
|
||||
}
|
||||
|
||||
func TestFileAddID(t *testing.T) {
|
||||
assert.Equal(t, "potato {123}.txt", addFileID("potato.txt", "123"))
|
||||
assert.Equal(t, "potato {123}", addFileID("potato", "123"))
|
||||
assert.Equal(t, "{123}", addFileID("", "123"))
|
||||
}
|
||||
|
||||
func TestFindID(t *testing.T) {
|
||||
assert.Equal(t, "", findID("potato"))
|
||||
ID := "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
|
||||
assert.Equal(t, ID, findID("potato {"+ID+"}.txt"))
|
||||
ID = ID[1:]
|
||||
assert.Equal(t, "", findID("potato {"+ID+"}.txt"))
|
||||
}
|
||||
335
backend/googlephotos/pattern.go
Normal file
335
backend/googlephotos/pattern.go
Normal file
@@ -0,0 +1,335 @@
|
||||
// Store the parsing of file patterns
|
||||
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// lister describes the subset of the interfaces on Fs needed for the
|
||||
// file pattern parsing
|
||||
type lister interface {
|
||||
listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error)
|
||||
listAlbums(ctx context.Context, shared bool) (all *albums, err error)
|
||||
listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error)
|
||||
dirTime() time.Time
|
||||
}
|
||||
|
||||
// dirPattern describes a single directory pattern
|
||||
type dirPattern struct {
|
||||
re string // match for the path
|
||||
match *regexp.Regexp // compiled match
|
||||
canUpload bool // true if can upload here
|
||||
canMkdir bool // true if can make a directory here
|
||||
isFile bool // true if this is a file
|
||||
isUpload bool // true if this is the upload directory
|
||||
// function to turn a match into DirEntries
|
||||
toEntries func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error)
|
||||
}
|
||||
|
||||
// dirPatters is a slice of all the directory patterns
|
||||
type dirPatterns []dirPattern
|
||||
|
||||
// patterns describes the layout of the google photos backend file system.
|
||||
//
|
||||
// NB no trailing / on paths
|
||||
var patterns = dirPatterns{
|
||||
{
|
||||
re: `^$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return fs.DirEntries{
|
||||
fs.NewDir(prefix+"media", f.dirTime()),
|
||||
fs.NewDir(prefix+"album", f.dirTime()),
|
||||
fs.NewDir(prefix+"shared-album", f.dirTime()),
|
||||
fs.NewDir(prefix+"upload", f.dirTime()),
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^upload(?:/(.*))?$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return f.listUploads(ctx, match[0])
|
||||
},
|
||||
canUpload: true,
|
||||
canMkdir: true,
|
||||
isUpload: true,
|
||||
},
|
||||
{
|
||||
re: `^upload/(.*)$`,
|
||||
isFile: true,
|
||||
canUpload: true,
|
||||
isUpload: true,
|
||||
},
|
||||
{
|
||||
re: `^media$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return fs.DirEntries{
|
||||
fs.NewDir(prefix+"all", f.dirTime()),
|
||||
fs.NewDir(prefix+"by-year", f.dirTime()),
|
||||
fs.NewDir(prefix+"by-month", f.dirTime()),
|
||||
fs.NewDir(prefix+"by-day", f.dirTime()),
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/all$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
return f.listDir(ctx, prefix, api.SearchFilter{})
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/all/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^media/by-year$`,
|
||||
toEntries: years,
|
||||
},
|
||||
{
|
||||
re: `^media/by-year/(\d{4})$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.listDir(ctx, prefix, filter)
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/by-year/(\d{4})/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^media/by-month$`,
|
||||
toEntries: years,
|
||||
},
|
||||
{
|
||||
re: `^media/by-month/(\d{4})$`,
|
||||
toEntries: months,
|
||||
},
|
||||
{
|
||||
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.listDir(ctx, prefix, filter)
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/by-month/\d{4}/(\d{4})-(\d{2})/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^media/by-day$`,
|
||||
toEntries: years,
|
||||
},
|
||||
{
|
||||
re: `^media/by-day/(\d{4})$`,
|
||||
toEntries: days,
|
||||
},
|
||||
{
|
||||
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (fs.DirEntries, error) {
|
||||
filter, err := yearMonthDayFilter(ctx, f, match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return f.listDir(ctx, prefix, filter)
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^media/by-day/\d{4}/(\d{4})-(\d{2})-(\d{2})/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^album$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, false, prefix, "")
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^album/(.+)$`,
|
||||
canMkdir: true,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, false, prefix, match[1])
|
||||
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^album/(.+?)/([^/]+)$`,
|
||||
canUpload: true,
|
||||
isFile: true,
|
||||
},
|
||||
{
|
||||
re: `^shared-album$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, true, prefix, "")
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^shared-album/(.+)$`,
|
||||
toEntries: func(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
return albumsToEntries(ctx, f, true, prefix, match[1])
|
||||
|
||||
},
|
||||
},
|
||||
{
|
||||
re: `^shared-album/(.+?)/([^/]+)$`,
|
||||
isFile: true,
|
||||
},
|
||||
}.mustCompile()
|
||||
|
||||
// mustCompile compiles the regexps in the dirPatterns
|
||||
func (ds dirPatterns) mustCompile() dirPatterns {
|
||||
for i := range ds {
|
||||
pattern := &ds[i]
|
||||
pattern.match = regexp.MustCompile(pattern.re)
|
||||
}
|
||||
return ds
|
||||
}
|
||||
|
||||
// match finds the path passed in in the matching structure and
|
||||
// returns the parameters and a pointer to the match, or nil.
|
||||
func (ds dirPatterns) match(root string, itemPath string, isFile bool) (match []string, prefix string, pattern *dirPattern) {
|
||||
itemPath = strings.Trim(itemPath, "/")
|
||||
absPath := path.Join(root, itemPath)
|
||||
prefix = strings.Trim(absPath[len(root):], "/")
|
||||
if prefix != "" {
|
||||
prefix += "/"
|
||||
}
|
||||
for i := range ds {
|
||||
pattern = &ds[i]
|
||||
if pattern.isFile != isFile {
|
||||
continue
|
||||
}
|
||||
match = pattern.match.FindStringSubmatch(absPath)
|
||||
if match != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return nil, "", nil
|
||||
}
|
||||
|
||||
// Return the years from 2000 to today
|
||||
// FIXME make configurable?
|
||||
func years(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
currentYear := f.dirTime().Year()
|
||||
for year := 2000; year <= currentYear; year++ {
|
||||
entries = append(entries, fs.NewDir(prefix+fmt.Sprint(year), f.dirTime()))
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Return the months in a given year
|
||||
func months(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
year := match[1]
|
||||
for month := 1; month <= 12; month++ {
|
||||
entries = append(entries, fs.NewDir(fmt.Sprintf("%s%s-%02d", prefix, year, month), f.dirTime()))
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// Return the days in a given year
|
||||
func days(ctx context.Context, f lister, prefix string, match []string) (entries fs.DirEntries, err error) {
|
||||
year := match[1]
|
||||
current, err := time.Parse("2006", year)
|
||||
if err != nil {
|
||||
return nil, errors.Errorf("bad year %q", match[1])
|
||||
}
|
||||
currentYear := current.Year()
|
||||
for current.Year() == currentYear {
|
||||
entries = append(entries, fs.NewDir(prefix+current.Format("2006-01-02"), f.dirTime()))
|
||||
current = current.AddDate(0, 0, 1)
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// This creates a search filter on year/month/day as provided
|
||||
func yearMonthDayFilter(ctx context.Context, f lister, match []string) (sf api.SearchFilter, err error) {
|
||||
year, err := strconv.Atoi(match[1])
|
||||
if err != nil || year < 1000 || year > 3000 {
|
||||
return sf, errors.Errorf("bad year %q", match[1])
|
||||
}
|
||||
sf = api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Year: year,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if len(match) >= 3 {
|
||||
month, err := strconv.Atoi(match[2])
|
||||
if err != nil || month < 1 || month > 12 {
|
||||
return sf, errors.Errorf("bad month %q", match[2])
|
||||
}
|
||||
sf.Filters.DateFilter.Dates[0].Month = month
|
||||
}
|
||||
if len(match) >= 4 {
|
||||
day, err := strconv.Atoi(match[3])
|
||||
if err != nil || day < 1 || day > 31 {
|
||||
return sf, errors.Errorf("bad day %q", match[3])
|
||||
}
|
||||
sf.Filters.DateFilter.Dates[0].Day = day
|
||||
}
|
||||
return sf, nil
|
||||
}
|
||||
|
||||
// Turns an albumPath into entries
|
||||
//
|
||||
// These can either be synthetic directory entries if the album path
|
||||
// is a prefix of another album, or actual files, or a combination of
|
||||
// the two.
|
||||
func albumsToEntries(ctx context.Context, f lister, shared bool, prefix string, albumPath string) (entries fs.DirEntries, err error) {
|
||||
albums, err := f.listAlbums(ctx, shared)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Put in the directories
|
||||
dirs, foundAlbumPath := albums.getDirs(albumPath)
|
||||
if foundAlbumPath {
|
||||
for _, dir := range dirs {
|
||||
d := fs.NewDir(prefix+dir, f.dirTime())
|
||||
dirPath := path.Join(albumPath, dir)
|
||||
// if this dir is an album add more special stuff
|
||||
album, ok := albums.get(dirPath)
|
||||
if ok {
|
||||
count, err := strconv.ParseInt(album.MediaItemsCount, 10, 64)
|
||||
if err != nil {
|
||||
fs.Debugf(f, "Error reading media count: %v", err)
|
||||
}
|
||||
d.SetID(album.ID).SetItems(count)
|
||||
}
|
||||
entries = append(entries, d)
|
||||
}
|
||||
}
|
||||
// if this is an album then return a filter to list it
|
||||
album, foundAlbum := albums.get(albumPath)
|
||||
if foundAlbum {
|
||||
filter := api.SearchFilter{AlbumID: album.ID}
|
||||
newEntries, err := f.listDir(ctx, prefix, filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
entries = append(entries, newEntries...)
|
||||
}
|
||||
if !foundAlbumPath && !foundAlbum && albumPath != "" {
|
||||
return nil, fs.ErrorDirNotFound
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
495
backend/googlephotos/pattern_test.go
Normal file
495
backend/googlephotos/pattern_test.go
Normal file
@@ -0,0 +1,495 @@
|
||||
package googlephotos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/backend/googlephotos/api"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/dirtree"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/mockobject"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// time for directories
|
||||
var startTime = fstest.Time("2019-06-24T15:53:05.999999999Z")
|
||||
|
||||
// mock Fs for testing patterns
|
||||
type testLister struct {
|
||||
t *testing.T
|
||||
albums *albums
|
||||
names []string
|
||||
uploaded dirtree.DirTree
|
||||
}
|
||||
|
||||
// newTestLister makes a mock for testing
|
||||
func newTestLister(t *testing.T) *testLister {
|
||||
return &testLister{
|
||||
t: t,
|
||||
albums: newAlbums(),
|
||||
uploaded: dirtree.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// mock listDir for testing
|
||||
func (f *testLister) listDir(ctx context.Context, prefix string, filter api.SearchFilter) (entries fs.DirEntries, err error) {
|
||||
for _, name := range f.names {
|
||||
entries = append(entries, mockobject.New(prefix+name))
|
||||
}
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// mock listAlbums for testing
|
||||
func (f *testLister) listAlbums(ctx context.Context, shared bool) (all *albums, err error) {
|
||||
return f.albums, nil
|
||||
}
|
||||
|
||||
// mock listUploads for testing
|
||||
func (f *testLister) listUploads(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
entries, _ = f.uploaded[dir]
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
// mock dirTime for testing
|
||||
func (f *testLister) dirTime() time.Time {
|
||||
return startTime
|
||||
}
|
||||
|
||||
func TestPatternMatch(t *testing.T) {
|
||||
for testNumber, test := range []struct {
|
||||
// input
|
||||
root string
|
||||
itemPath string
|
||||
isFile bool
|
||||
// expected output
|
||||
wantMatch []string
|
||||
wantPrefix string
|
||||
wantPattern *dirPattern
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{""},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[0],
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
itemPath: "",
|
||||
isFile: true,
|
||||
wantMatch: nil,
|
||||
wantPrefix: "",
|
||||
wantPattern: nil,
|
||||
},
|
||||
{
|
||||
root: "upload",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"upload", ""},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[1],
|
||||
},
|
||||
{
|
||||
root: "upload/dir",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"upload/dir", "dir"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[1],
|
||||
},
|
||||
{
|
||||
root: "upload/file.jpg",
|
||||
itemPath: "",
|
||||
isFile: true,
|
||||
wantMatch: []string{"upload/file.jpg", "file.jpg"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[2],
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[3],
|
||||
},
|
||||
{
|
||||
root: "",
|
||||
itemPath: "media",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media"},
|
||||
wantPrefix: "media/",
|
||||
wantPattern: &patterns[3],
|
||||
},
|
||||
{
|
||||
root: "media/all",
|
||||
itemPath: "",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "",
|
||||
wantPattern: &patterns[4],
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "all",
|
||||
isFile: false,
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "all/",
|
||||
wantPattern: &patterns[4],
|
||||
},
|
||||
{
|
||||
root: "media/all",
|
||||
itemPath: "file.jpg",
|
||||
isFile: true,
|
||||
wantMatch: []string{"media/all/file.jpg", "file.jpg"},
|
||||
wantPrefix: "file.jpg/",
|
||||
wantPattern: &patterns[5],
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q,isFile=%v", testNumber, test.root, test.itemPath, test.isFile), func(t *testing.T) {
|
||||
gotMatch, gotPrefix, gotPattern := patterns.match(test.root, test.itemPath, test.isFile)
|
||||
assert.Equal(t, test.wantMatch, gotMatch)
|
||||
assert.Equal(t, test.wantPrefix, gotPrefix)
|
||||
assert.Equal(t, test.wantPattern, gotPattern)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternMatchToEntries(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := newTestLister(t)
|
||||
f.names = []string{"file.jpg"}
|
||||
f.albums.add(&api.Album{
|
||||
ID: "1",
|
||||
Title: "sub/one",
|
||||
})
|
||||
f.albums.add(&api.Album{
|
||||
ID: "2",
|
||||
Title: "sub",
|
||||
})
|
||||
f.uploaded.AddEntry(mockobject.New("upload/file1.jpg"))
|
||||
f.uploaded.AddEntry(mockobject.New("upload/dir/file2.jpg"))
|
||||
|
||||
for testNumber, test := range []struct {
|
||||
// input
|
||||
root string
|
||||
itemPath string
|
||||
// expected output
|
||||
wantMatch []string
|
||||
wantPrefix string
|
||||
remotes []string
|
||||
}{
|
||||
{
|
||||
root: "",
|
||||
itemPath: "",
|
||||
wantMatch: []string{""},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"media/", "album/", "shared-album/", "upload/"},
|
||||
},
|
||||
{
|
||||
root: "upload",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"upload", ""},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"upload/file1.jpg", "upload/dir/"},
|
||||
},
|
||||
{
|
||||
root: "upload",
|
||||
itemPath: "dir",
|
||||
wantMatch: []string{"upload/dir", "dir"},
|
||||
wantPrefix: "dir/",
|
||||
remotes: []string{"upload/dir/file2.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"all/", "by-year/", "by-month/", "by-day/"},
|
||||
},
|
||||
{
|
||||
root: "media/all",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media",
|
||||
itemPath: "all",
|
||||
wantMatch: []string{"media/all"},
|
||||
wantPrefix: "all/",
|
||||
remotes: []string{"all/file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media/by-year",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-year"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-year/2000",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-year/2000", "2000"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media/by-month",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-month"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-month/2001",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-month/2001", "2001"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2001-01/", "2001-02/", "2001-03/", "2001-04/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-month/2001/2001-01",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-month/2001/2001-01", "2001", "01"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "media/by-day",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-day"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2000/", "2001/", "2002/", "2003/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-day/2001",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-day/2001", "2001"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"2001-01-01/", "2001-01-02/", "2001-01-03/", "2001-01-04/"},
|
||||
},
|
||||
{
|
||||
root: "media/by-day/2001/2001-01-02",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"media/by-day/2001/2001-01-02", "2001", "01", "02"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "album",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"album"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"sub/"},
|
||||
},
|
||||
{
|
||||
root: "album/sub",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"album/sub", "sub"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"one/", "file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "album/sub/one",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"album/sub/one", "sub/one"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "shared-album",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"shared-album"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"sub/"},
|
||||
},
|
||||
{
|
||||
root: "shared-album/sub",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"shared-album/sub", "sub"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"one/", "file.jpg"},
|
||||
},
|
||||
{
|
||||
root: "shared-album/sub/one",
|
||||
itemPath: "",
|
||||
wantMatch: []string{"shared-album/sub/one", "sub/one"},
|
||||
wantPrefix: "",
|
||||
remotes: []string{"file.jpg"},
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("#%d,root=%q,itemPath=%q", testNumber, test.root, test.itemPath), func(t *testing.T) {
|
||||
match, prefix, pattern := patterns.match(test.root, test.itemPath, false)
|
||||
assert.Equal(t, test.wantMatch, match)
|
||||
assert.Equal(t, test.wantPrefix, prefix)
|
||||
assert.NotNil(t, pattern)
|
||||
assert.NotNil(t, pattern.toEntries)
|
||||
|
||||
entries, err := pattern.toEntries(ctx, f, prefix, match)
|
||||
assert.NoError(t, err)
|
||||
var remotes = []string{}
|
||||
for _, entry := range entries {
|
||||
remote := entry.Remote()
|
||||
if _, isDir := entry.(fs.Directory); isDir {
|
||||
remote += "/"
|
||||
}
|
||||
remotes = append(remotes, remote)
|
||||
if len(remotes) >= 4 {
|
||||
break // only test first 4 entries
|
||||
}
|
||||
}
|
||||
assert.Equal(t, test.remotes, remotes)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternYears(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
entries, err := years(context.Background(), f, "potato/", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
year := 2000
|
||||
for _, entry := range entries {
|
||||
assert.Equal(t, "potato/"+fmt.Sprint(year), entry.Remote())
|
||||
year++
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternMonths(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
entries, err := months(context.Background(), f, "potato/", []string{"", "2020"})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 12, len(entries))
|
||||
for i, entry := range entries {
|
||||
assert.Equal(t, fmt.Sprintf("potato/2020-%02d", i+1), entry.Remote())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatternDays(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
entries, err := days(context.Background(), f, "potato/", []string{"", "2020"})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 366, len(entries))
|
||||
assert.Equal(t, "potato/2020-01-01", entries[0].Remote())
|
||||
assert.Equal(t, "potato/2020-12-31", entries[len(entries)-1].Remote())
|
||||
}
|
||||
|
||||
func TestPatternYearMonthDayFilter(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := newTestLister(t)
|
||||
|
||||
// Years
|
||||
sf, err := yearMonthDayFilter(ctx, f, []string{"", "2000"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Year: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, sf)
|
||||
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "potato"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "999"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "4000"})
|
||||
require.Error(t, err)
|
||||
|
||||
// Months
|
||||
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Month: 1,
|
||||
Year: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, sf)
|
||||
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "potato"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "0"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "13"})
|
||||
require.Error(t, err)
|
||||
|
||||
// Days
|
||||
sf, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "02"})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, api.SearchFilter{
|
||||
Filters: &api.Filters{
|
||||
DateFilter: &api.DateFilter{
|
||||
Dates: []api.Date{
|
||||
{
|
||||
Day: 2,
|
||||
Month: 1,
|
||||
Year: 2000,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}, sf)
|
||||
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "potato"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "0"})
|
||||
require.Error(t, err)
|
||||
_, err = yearMonthDayFilter(ctx, f, []string{"", "2000", "01", "32"})
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestPatternAlbumsToEntries(t *testing.T) {
|
||||
f := newTestLister(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||
assert.Equal(t, fs.ErrorDirNotFound, err)
|
||||
|
||||
f.albums.add(&api.Album{
|
||||
ID: "1",
|
||||
Title: "sub/one",
|
||||
})
|
||||
|
||||
entries, err := albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(entries))
|
||||
assert.Equal(t, "potato/one", entries[0].Remote())
|
||||
_, ok := entries[0].(fs.Directory)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
f.albums.add(&api.Album{
|
||||
ID: "1",
|
||||
Title: "sub",
|
||||
})
|
||||
f.names = []string{"file.jpg"}
|
||||
|
||||
entries, err = albumsToEntries(ctx, f, false, "potato/", "sub")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 2, len(entries))
|
||||
assert.Equal(t, "potato/one", entries[0].Remote())
|
||||
_, ok = entries[0].(fs.Directory)
|
||||
assert.Equal(t, true, ok)
|
||||
assert.Equal(t, "potato/file.jpg", entries[1].Remote())
|
||||
_, ok = entries[1].(fs.Object)
|
||||
assert.Equal(t, true, ok)
|
||||
|
||||
}
|
||||
BIN
backend/googlephotos/testfiles/rclone-test-image1.jpg
Normal file
BIN
backend/googlephotos/testfiles/rclone-test-image1.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
BIN
backend/googlephotos/testfiles/rclone-test-image2.jpg
Normal file
BIN
backend/googlephotos/testfiles/rclone-test-image2.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
@@ -5,6 +5,7 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"mime"
|
||||
"net/http"
|
||||
@@ -12,15 +13,16 @@ import (
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"golang.org/x/net/html"
|
||||
)
|
||||
|
||||
@@ -45,6 +47,21 @@ func init() {
|
||||
Value: "https://user:pass@example.com",
|
||||
Help: "Connect to example.com using a username and password",
|
||||
}},
|
||||
}, {
|
||||
Name: "headers",
|
||||
Help: `Set HTTP headers for all transactions
|
||||
|
||||
Use this to set additional HTTP headers for all transactions
|
||||
|
||||
The input format is comma separated list of key,value pairs. Standard
|
||||
[CSV encoding](https://godoc.org/encoding/csv) may be used.
|
||||
|
||||
For example to set a Cookie use 'Cookie,name=value', or '"Cookie","name=value"'.
|
||||
|
||||
You can set multiple headers, eg '"Cookie","name=value","Authorization","xxx"'.
|
||||
`,
|
||||
Default: fs.CommaSepList{},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_slash",
|
||||
Help: `Set this if the site doesn't end directories with /
|
||||
@@ -61,6 +78,26 @@ Note that this may cause rclone to confuse genuine HTML files with
|
||||
directories.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "no_head",
|
||||
Help: `Don't use HEAD requests to find file sizes in dir listing
|
||||
|
||||
If your site is being very slow to load then you can try this option.
|
||||
Normally rclone does a HEAD request for each potential file in a
|
||||
directory listing to:
|
||||
|
||||
- find its size
|
||||
- check it really exists
|
||||
- check to see if it is a directory
|
||||
|
||||
If you set this option, rclone will not do the HEAD request. This will mean
|
||||
|
||||
- directory listings are much quicker
|
||||
- rclone won't have the times or sizes of any files
|
||||
- some files that don't exist may be in the listing
|
||||
`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}},
|
||||
}
|
||||
fs.Register(fsi)
|
||||
@@ -68,8 +105,10 @@ directories.`,
|
||||
|
||||
// Options defines the configuration for this backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"url"`
|
||||
NoSlash bool `config:"no_slash"`
|
||||
Endpoint string `config:"url"`
|
||||
NoSlash bool `config:"no_slash"`
|
||||
NoHead bool `config:"no_head"`
|
||||
Headers fs.CommaSepList `config:"headers"`
|
||||
}
|
||||
|
||||
// Fs stores the interface to the remote HTTP files
|
||||
@@ -107,6 +146,7 @@ func statusError(res *http.Response, err error) error {
|
||||
// NewFs creates a new Fs object from the name and root. It connects to
|
||||
// the host specified in the config file.
|
||||
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
ctx := context.TODO()
|
||||
// Parse config into Options struct
|
||||
opt := new(Options)
|
||||
err := configstruct.Set(m, opt)
|
||||
@@ -114,6 +154,10 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(opt.Headers)%2 != 0 {
|
||||
return nil, errors.New("odd number of headers supplied")
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(opt.Endpoint, "/") {
|
||||
opt.Endpoint += "/"
|
||||
}
|
||||
@@ -139,10 +183,15 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
||||
return http.ErrUseLastResponse
|
||||
}
|
||||
// check to see if points to a file
|
||||
res, err := noRedir.Head(u.String())
|
||||
err = statusError(res, err)
|
||||
req, err := http.NewRequest("HEAD", u.String(), nil)
|
||||
if err == nil {
|
||||
isFile = true
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
addHeaders(req, opt)
|
||||
res, err := noRedir.Do(req)
|
||||
err = statusError(res, err)
|
||||
if err == nil {
|
||||
isFile = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -207,12 +256,12 @@ func (f *Fs) Precision() time.Duration {
|
||||
}
|
||||
|
||||
// NewObject creates a new remote http file object
|
||||
func (f *Fs) NewObject(remote string) (fs.Object, error) {
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
|
||||
o := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
err := o.stat()
|
||||
err := o.stat(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -315,8 +364,22 @@ func parse(base *url.URL, in io.Reader) (names []string, err error) {
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// Adds the configured headers to the request if any
|
||||
func addHeaders(req *http.Request, opt *Options) {
|
||||
for i := 0; i < len(opt.Headers); i += 2 {
|
||||
key := opt.Headers[i]
|
||||
value := opt.Headers[i+1]
|
||||
req.Header.Add(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Adds the configured headers to the request if any
|
||||
func (f *Fs) addHeaders(req *http.Request) {
|
||||
addHeaders(req, &f.opt)
|
||||
}
|
||||
|
||||
// Read the directory passed in
|
||||
func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
func (f *Fs) readDir(ctx context.Context, dir string) (names []string, err error) {
|
||||
URL := f.url(dir)
|
||||
u, err := url.Parse(URL)
|
||||
if err != nil {
|
||||
@@ -325,7 +388,14 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
if !strings.HasSuffix(URL, "/") {
|
||||
return nil, errors.Errorf("internal error: readDir URL %q didn't end in /", URL)
|
||||
}
|
||||
res, err := f.httpClient.Get(URL)
|
||||
// Do the request
|
||||
req, err := http.NewRequest("GET", URL, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "readDir failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
f.addHeaders(req)
|
||||
res, err := f.httpClient.Do(req)
|
||||
if err == nil {
|
||||
defer fs.CheckClose(res.Body, &err)
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
@@ -359,38 +429,57 @@ func (f *Fs) readDir(dir string) (names []string, err error) {
|
||||
//
|
||||
// This should return ErrDirNotFound if the directory isn't
|
||||
// found.
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
if !strings.HasSuffix(dir, "/") && dir != "" {
|
||||
dir += "/"
|
||||
}
|
||||
names, err := f.readDir(dir)
|
||||
names, err := f.readDir(ctx, dir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error listing %q", dir)
|
||||
}
|
||||
var (
|
||||
entriesMu sync.Mutex // to protect entries
|
||||
wg sync.WaitGroup
|
||||
in = make(chan string, fs.Config.Checkers)
|
||||
)
|
||||
add := func(entry fs.DirEntry) {
|
||||
entriesMu.Lock()
|
||||
entries = append(entries, entry)
|
||||
entriesMu.Unlock()
|
||||
}
|
||||
for i := 0; i < fs.Config.Checkers; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for remote := range in {
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err := file.stat(ctx); err {
|
||||
case nil:
|
||||
add(file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
for _, name := range names {
|
||||
isDir := name[len(name)-1] == '/'
|
||||
name = strings.TrimRight(name, "/")
|
||||
remote := path.Join(dir, name)
|
||||
if isDir {
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
add(fs.NewDir(remote, timeUnset))
|
||||
} else {
|
||||
file := &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
}
|
||||
switch err = file.stat(); err {
|
||||
case nil:
|
||||
entries = append(entries, file)
|
||||
case fs.ErrorNotAFile:
|
||||
// ...found a directory not a file
|
||||
dir := fs.NewDir(remote, timeUnset)
|
||||
entries = append(entries, dir)
|
||||
default:
|
||||
fs.Debugf(remote, "skipping because of error: %v", err)
|
||||
}
|
||||
in <- remote
|
||||
}
|
||||
}
|
||||
close(in)
|
||||
wg.Wait()
|
||||
return entries, nil
|
||||
}
|
||||
|
||||
@@ -399,12 +488,12 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
// May create the object even if it returns an error - if so
|
||||
// will return the object and the error, otherwise will return
|
||||
// nil and the error
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
// PutStream uploads to the remote path with the modTime given of indeterminate size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return nil, errorReadOnly
|
||||
}
|
||||
|
||||
@@ -427,7 +516,7 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// Hash returns "" since HTTP (in Go or OpenSSH) doesn't support remote calculation of hashes
|
||||
func (o *Object) Hash(r hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, r hash.Type) (string, error) {
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
|
||||
@@ -437,7 +526,7 @@ func (o *Object) Size() int64 {
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the remote http file
|
||||
func (o *Object) ModTime() time.Time {
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return o.modTime
|
||||
}
|
||||
|
||||
@@ -447,9 +536,21 @@ func (o *Object) url() string {
|
||||
}
|
||||
|
||||
// stat updates the info field in the Object
|
||||
func (o *Object) stat() error {
|
||||
func (o *Object) stat(ctx context.Context) error {
|
||||
if o.fs.opt.NoHead {
|
||||
o.size = -1
|
||||
o.modTime = timeUnset
|
||||
o.contentType = fs.MimeType(ctx, o)
|
||||
return nil
|
||||
}
|
||||
url := o.url()
|
||||
res, err := o.fs.httpClient.Head(url)
|
||||
req, err := http.NewRequest("HEAD", url, nil)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "stat failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
o.fs.addHeaders(req)
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
if err == nil && res.StatusCode == http.StatusNotFound {
|
||||
return fs.ErrorObjectNotFound
|
||||
}
|
||||
@@ -480,7 +581,7 @@ func (o *Object) stat() error {
|
||||
// SetModTime sets the modification and access time to the specified time
|
||||
//
|
||||
// it also updates the info field
|
||||
func (o *Object) SetModTime(modTime time.Time) error {
|
||||
func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
@@ -490,17 +591,19 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// Open a remote http file object for reading. Seek is supported
|
||||
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
|
||||
url := o.url()
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Open failed")
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
|
||||
// Add optional headers
|
||||
for k, v := range fs.OpenOptionHeaders(options) {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
o.fs.addHeaders(req)
|
||||
|
||||
// Do the request
|
||||
res, err := o.fs.httpClient.Do(req)
|
||||
@@ -517,27 +620,27 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
|
||||
// Mkdir makes the root directory of the Fs object
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Remove a remote http file object
|
||||
func (o *Object) Remove() error {
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Rmdir removes the root directory of the Fs object
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// Update in to the object with the modTime given of the given size
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
return errorReadOnly
|
||||
}
|
||||
|
||||
// MimeType of an Object if known, "" otherwise
|
||||
func (o *Object) MimeType() string {
|
||||
func (o *Object) MimeType(ctx context.Context) string {
|
||||
return o.contentType
|
||||
}
|
||||
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
// +build go1.8
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
@@ -11,14 +10,15 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fstest"
|
||||
"github.com/ncw/rclone/lib/rest"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/lib/rest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
@@ -27,6 +27,7 @@ var (
|
||||
remoteName = "TestHTTP"
|
||||
testPath = "test"
|
||||
filesPath = filepath.Join(testPath, "files")
|
||||
headers = []string{"X-Potato", "sausage", "X-Rhubarb", "cucumber"}
|
||||
)
|
||||
|
||||
// prepareServer the test server and return a function to tidy it up afterwards
|
||||
@@ -34,8 +35,16 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// file server for test/files
|
||||
fileServer := http.FileServer(http.Dir(filesPath))
|
||||
|
||||
// test the headers are there then pass on to fileServer
|
||||
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
what := fmt.Sprintf("%s %s: Header ", r.Method, r.URL.Path)
|
||||
assert.Equal(t, headers[1], r.Header.Get(headers[0]), what+headers[0])
|
||||
assert.Equal(t, headers[3], r.Header.Get(headers[2]), what+headers[2])
|
||||
fileServer.ServeHTTP(w, r)
|
||||
})
|
||||
|
||||
// Make the test server
|
||||
ts := httptest.NewServer(fileServer)
|
||||
ts := httptest.NewServer(handler)
|
||||
|
||||
// Configure the remote
|
||||
config.LoadConfig()
|
||||
@@ -46,8 +55,9 @@ func prepareServer(t *testing.T) (configmap.Simple, func()) {
|
||||
// config.FileSet(remoteName, "url", ts.URL)
|
||||
|
||||
m := configmap.Simple{
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
"type": "http",
|
||||
"url": ts.URL,
|
||||
"headers": strings.Join(headers, ","),
|
||||
}
|
||||
|
||||
// return a function to tidy up
|
||||
@@ -66,7 +76,7 @@ func prepare(t *testing.T) (fs.Fs, func()) {
|
||||
}
|
||||
|
||||
func testListRoot(t *testing.T, f fs.Fs, noSlash bool) {
|
||||
entries, err := f.List("")
|
||||
entries, err := f.List(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
@@ -122,7 +132,7 @@ func TestListSubDir(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
entries, err := f.List("three")
|
||||
entries, err := f.List(context.Background(), "three")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
@@ -140,7 +150,7 @@ func TestNewObject(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject("four/under four.txt")
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "four/under four.txt", o.Remote())
|
||||
@@ -150,7 +160,7 @@ func TestNewObject(t *testing.T) {
|
||||
|
||||
// Test the time is correct on the object
|
||||
|
||||
tObj := o.ModTime()
|
||||
tObj := o.ModTime(context.Background())
|
||||
|
||||
fi, err := os.Stat(filepath.Join(filesPath, "four", "under four.txt"))
|
||||
require.NoError(t, err)
|
||||
@@ -160,7 +170,7 @@ func TestNewObject(t *testing.T) {
|
||||
assert.True(t, ok, fmt.Sprintf("%s: Modification time difference too big |%s| > %s (%s vs %s) (precision %s)", o.Remote(), dt, time.Second, tObj, tFile, time.Second))
|
||||
|
||||
// check object not found
|
||||
o, err = f.NewObject("not found.txt")
|
||||
o, err = f.NewObject(context.Background(), "not found.txt")
|
||||
assert.Nil(t, o)
|
||||
assert.Equal(t, fs.ErrorObjectNotFound, err)
|
||||
}
|
||||
@@ -169,11 +179,11 @@ func TestOpen(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject("four/under four.txt")
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Test normal read
|
||||
fd, err := o.Open()
|
||||
fd, err := o.Open(context.Background())
|
||||
require.NoError(t, err)
|
||||
data, err := ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
@@ -181,7 +191,7 @@ func TestOpen(t *testing.T) {
|
||||
assert.Equal(t, "beetroot\n", string(data))
|
||||
|
||||
// Test with range request
|
||||
fd, err = o.Open(&fs.RangeOption{Start: 1, End: 5})
|
||||
fd, err = o.Open(context.Background(), &fs.RangeOption{Start: 1, End: 5})
|
||||
require.NoError(t, err)
|
||||
data, err = ioutil.ReadAll(fd)
|
||||
require.NoError(t, err)
|
||||
@@ -193,12 +203,12 @@ func TestMimeType(t *testing.T) {
|
||||
f, tidy := prepare(t)
|
||||
defer tidy()
|
||||
|
||||
o, err := f.NewObject("four/under four.txt")
|
||||
o, err := f.NewObject(context.Background(), "four/under four.txt")
|
||||
require.NoError(t, err)
|
||||
|
||||
do, ok := o.(fs.MimeTyper)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType())
|
||||
assert.Equal(t, "text/plain; charset=utf-8", do.MimeType(context.Background()))
|
||||
}
|
||||
|
||||
func TestIsAFileRoot(t *testing.T) {
|
||||
@@ -218,7 +228,7 @@ func TestIsAFileSubDir(t *testing.T) {
|
||||
f, err := NewFs(remoteName, "three/underthree.txt", m)
|
||||
assert.Equal(t, err, fs.ErrorIsFile)
|
||||
|
||||
entries, err := f.List("")
|
||||
entries, err := f.List(context.Background(), "")
|
||||
require.NoError(t, err)
|
||||
|
||||
sort.Sort(entries)
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
<tr><td valign="top"><img src="/icons/unknown.gif" alt="[ ]"></td><td><a href="timer-test">timer-test</a></td><td align="right">09-May-2017 17:05 </td><td align="right">1.5M</td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/text.gif" alt="[TXT]"></td><td><a href="words-to-regexp.pl">words-to-regexp.pl</a></td><td align="right">01-Mar-2005 20:43 </td><td align="right">6.0K</td><td> </td></tr>
|
||||
<tr><th colspan="5"><hr></th></tr>
|
||||
<!-- some extras from https://github.com/ncw/rclone/issues/1573 -->
|
||||
<!-- some extras from https://github.com/rclone/rclone/issues/1573 -->
|
||||
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20100%25%20better.mp3">Now 100% better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
|
||||
<tr><td valign="top"><img src="/icons/sound2.gif" alt="[SND]"></td><td><a href="Now%20better.mp3">Now better.mp3</a></td><td align="right">2017-08-01 11:41 </td><td align="right"> 0 </td><td> </td></tr>
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
package hubic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
)
|
||||
|
||||
// auth is an authenticator for swift
|
||||
@@ -26,7 +27,7 @@ func newAuth(f *Fs) *auth {
|
||||
func (a *auth) Request(*swift.Connection) (r *http.Request, err error) {
|
||||
const retries = 10
|
||||
for try := 1; try <= retries; try++ {
|
||||
err = a.f.getCredentials()
|
||||
err = a.f.getCredentials(context.TODO())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ package hubic
|
||||
// to be revisted after some actual experience.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -15,16 +16,16 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/backend/swift"
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/fshttp"
|
||||
"github.com/ncw/rclone/lib/oauthutil"
|
||||
swiftLib "github.com/ncw/swift"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/backend/swift"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/lib/oauthutil"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
@@ -115,11 +116,12 @@ func (f *Fs) String() string {
|
||||
// getCredentials reads the OpenStack Credentials using the Hubic API
|
||||
//
|
||||
// The credentials are read into the Fs
|
||||
func (f *Fs) getCredentials() (err error) {
|
||||
func (f *Fs) getCredentials(ctx context.Context) (err error) {
|
||||
req, err := http.NewRequest("GET", "https://api.hubic.com/1.0/account/credentials", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req = req.WithContext(ctx) // go1.13 can use NewRequestWithContext
|
||||
resp, err := f.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -4,14 +4,16 @@ package hubic_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/hubic"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/hubic"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
RemoteName: "TestHubic:",
|
||||
NilObject: (*hubic.Object)(nil),
|
||||
SkipFsCheckWrap: true,
|
||||
SkipObjectCheckWrap: true,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -46,6 +46,126 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
|
||||
// APIString returns Time string in Jottacloud API format
|
||||
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
|
||||
|
||||
// LoginToken is struct representing the login token generated in the WebUI
|
||||
type LoginToken struct {
|
||||
Username string `json:"username"`
|
||||
Realm string `json:"realm"`
|
||||
WellKnownLink string `json:"well_known_link"`
|
||||
AuthToken string `json:"auth_token"`
|
||||
}
|
||||
|
||||
// WellKnown contains some configuration parameters for setting up endpoints
|
||||
type WellKnown struct {
|
||||
Issuer string `json:"issuer"`
|
||||
AuthorizationEndpoint string `json:"authorization_endpoint"`
|
||||
TokenEndpoint string `json:"token_endpoint"`
|
||||
TokenIntrospectionEndpoint string `json:"token_introspection_endpoint"`
|
||||
UserinfoEndpoint string `json:"userinfo_endpoint"`
|
||||
EndSessionEndpoint string `json:"end_session_endpoint"`
|
||||
JwksURI string `json:"jwks_uri"`
|
||||
CheckSessionIframe string `json:"check_session_iframe"`
|
||||
GrantTypesSupported []string `json:"grant_types_supported"`
|
||||
ResponseTypesSupported []string `json:"response_types_supported"`
|
||||
SubjectTypesSupported []string `json:"subject_types_supported"`
|
||||
IDTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported"`
|
||||
UserinfoSigningAlgValuesSupported []string `json:"userinfo_signing_alg_values_supported"`
|
||||
RequestObjectSigningAlgValuesSupported []string `json:"request_object_signing_alg_values_supported"`
|
||||
ResponseNodesSupported []string `json:"response_modes_supported"`
|
||||
RegistrationEndpoint string `json:"registration_endpoint"`
|
||||
TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported"`
|
||||
TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported"`
|
||||
ClaimsSupported []string `json:"claims_supported"`
|
||||
ClaimTypesSupported []string `json:"claim_types_supported"`
|
||||
ClaimsParameterSupported bool `json:"claims_parameter_supported"`
|
||||
ScopesSupported []string `json:"scopes_supported"`
|
||||
RequestParameterSupported bool `json:"request_parameter_supported"`
|
||||
RequestURIParameterSupported bool `json:"request_uri_parameter_supported"`
|
||||
CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported"`
|
||||
TLSClientCertificateBoundAccessTokens bool `json:"tls_client_certificate_bound_access_tokens"`
|
||||
IntrospectionEndpoint string `json:"introspection_endpoint"`
|
||||
}
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
RefreshExpiresIn int32 `json:"refresh_expires_in"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
IDToken string `json:"id_token"`
|
||||
NotBeforePolicy int32 `json:"not-before-policy"`
|
||||
SessionState string `json:"session_state"`
|
||||
Scope string `json:"scope"`
|
||||
}
|
||||
|
||||
// JSON structures returned by new API
|
||||
|
||||
// AllocateFileRequest to prepare an upload to Jottacloud
|
||||
type AllocateFileRequest struct {
|
||||
Bytes int64 `json:"bytes"`
|
||||
Created string `json:"created"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// AllocateFileResponse for upload requests
|
||||
type AllocateFileResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
State string `json:"state"`
|
||||
UploadID string `json:"upload_id"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
ResumePos int64 `json:"resume_pos"`
|
||||
}
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
}
|
||||
|
||||
// DeviceRegistrationResponse is the response to registering a device
|
||||
type DeviceRegistrationResponse struct {
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
}
|
||||
|
||||
// CustomerInfo provides general information about the account. Required for finding the correct internal username.
|
||||
type CustomerInfo struct {
|
||||
Username string `json:"username"`
|
||||
Email string `json:"email"`
|
||||
Name string `json:"name"`
|
||||
CountryCode string `json:"country_code"`
|
||||
LanguageCode string `json:"language_code"`
|
||||
CustomerGroupCode string `json:"customer_group_code"`
|
||||
BrandCode string `json:"brand_code"`
|
||||
AccountType string `json:"account_type"`
|
||||
SubscriptionType string `json:"subscription_type"`
|
||||
Usage int64 `json:"usage"`
|
||||
Qouta int64 `json:"quota"`
|
||||
BusinessUsage int64 `json:"business_usage"`
|
||||
BusinessQouta int64 `json:"business_quota"`
|
||||
WriteLocked bool `json:"write_locked"`
|
||||
ReadLocked bool `json:"read_locked"`
|
||||
LockedCause interface{} `json:"locked_cause"`
|
||||
WebHash string `json:"web_hash"`
|
||||
AndroidHash string `json:"android_hash"`
|
||||
IOSHash string `json:"ios_hash"`
|
||||
}
|
||||
|
||||
// XML structures returned by the old API
|
||||
|
||||
// Flag is a hacky type for checking if an attribute is present
|
||||
type Flag bool
|
||||
|
||||
@@ -64,15 +184,6 @@ func (f *Flag) MarshalXMLAttr(name xml.Name) (xml.Attr, error) {
|
||||
return attr, errors.New("unimplemented")
|
||||
}
|
||||
|
||||
// TokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type TokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
}
|
||||
|
||||
/*
|
||||
GET http://www.jottacloud.com/JFS/<account>
|
||||
|
||||
@@ -102,8 +213,8 @@ GET http://www.jottacloud.com/JFS/<account>
|
||||
</user>
|
||||
*/
|
||||
|
||||
// AccountInfo represents a Jottacloud account
|
||||
type AccountInfo struct {
|
||||
// DriveInfo represents a Jottacloud account
|
||||
type DriveInfo struct {
|
||||
Username string `xml:"username"`
|
||||
AccountType string `xml:"account-type"`
|
||||
Locked bool `xml:"locked"`
|
||||
@@ -280,43 +391,3 @@ func (e *Error) Error() string {
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AllocateFileRequest to prepare an upload to Jottacloud
|
||||
type AllocateFileRequest struct {
|
||||
Bytes int64 `json:"bytes"`
|
||||
Created string `json:"created"`
|
||||
Md5 string `json:"md5"`
|
||||
Modified string `json:"modified"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// AllocateFileResponse for upload requests
|
||||
type AllocateFileResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
State string `json:"state"`
|
||||
UploadID string `json:"upload_id"`
|
||||
UploadURL string `json:"upload_url"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
ResumePos int64 `json:"resume_pos"`
|
||||
}
|
||||
|
||||
// UploadResponse after an upload
|
||||
type UploadResponse struct {
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
Kind string `json:"kind"`
|
||||
ContentID string `json:"content_id"`
|
||||
Bytes int64 `json:"bytes"`
|
||||
Md5 string `json:"md5"`
|
||||
Created int64 `json:"created"`
|
||||
Modified int64 `json:"modified"`
|
||||
Deleted interface{} `json:"deleted"`
|
||||
Mime string `json:"mime"`
|
||||
}
|
||||
|
||||
// DeviceRegistrationResponse is the response to registering a device
|
||||
type DeviceRegistrationResponse struct {
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -6,7 +6,7 @@ import (
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/lib/readers"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -4,8 +4,8 @@ package jottacloud_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/backend/jottacloud"
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/backend/jottacloud"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
/*
|
||||
Translate file names for JottaCloud adapted from OneDrive
|
||||
|
||||
|
||||
The following characters are JottaCloud reserved characters, and can't
|
||||
be used in JottaCloud folder and file names.
|
||||
|
||||
jottacloud = "/" / "\" / "*" / "<" / ">" / "?" / "!" / "&" / ":" / ";" / "|" / "#" / "%" / """ / "'" / "." / "~"
|
||||
|
||||
|
||||
*/
|
||||
|
||||
package jottacloud
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// charMap holds replacements for characters
|
||||
//
|
||||
// Onedrive has a restricted set of characters compared to other cloud
|
||||
// storage systems, so we to map these to the FULLWIDTH unicode
|
||||
// equivalents
|
||||
//
|
||||
// http://unicode-search.net/unicode-namesearch.pl?term=SOLIDUS
|
||||
var (
|
||||
charMap = map[rune]rune{
|
||||
'\\': '\', // FULLWIDTH REVERSE SOLIDUS
|
||||
'*': '*', // FULLWIDTH ASTERISK
|
||||
'<': '<', // FULLWIDTH LESS-THAN SIGN
|
||||
'>': '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
'?': '?', // FULLWIDTH QUESTION MARK
|
||||
':': ':', // FULLWIDTH COLON
|
||||
';': ';', // FULLWIDTH SEMICOLON
|
||||
'|': '|', // FULLWIDTH VERTICAL LINE
|
||||
'"': '"', // FULLWIDTH QUOTATION MARK - not on the list but seems to be reserved
|
||||
' ': '␠', // SYMBOL FOR SPACE
|
||||
}
|
||||
invCharMap map[rune]rune
|
||||
fixStartingWithSpace = regexp.MustCompile(`(/|^) `)
|
||||
fixEndingWithSpace = regexp.MustCompile(` (/|$)`)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Create inverse charMap
|
||||
invCharMap = make(map[rune]rune, len(charMap))
|
||||
for k, v := range charMap {
|
||||
invCharMap[v] = k
|
||||
}
|
||||
}
|
||||
|
||||
// replaceReservedChars takes a path and substitutes any reserved
|
||||
// characters in it
|
||||
func replaceReservedChars(in string) string {
|
||||
// Filenames can't start with space
|
||||
in = fixStartingWithSpace.ReplaceAllString(in, "$1"+string(charMap[' ']))
|
||||
// Filenames can't end with space
|
||||
in = fixEndingWithSpace.ReplaceAllString(in, string(charMap[' '])+"$1")
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := charMap[c]; ok && c != ' ' {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
|
||||
// restoreReservedChars takes a path and undoes any substitutions
|
||||
// made by replaceReservedChars
|
||||
func restoreReservedChars(in string) string {
|
||||
return strings.Map(func(c rune) rune {
|
||||
if replacement, ok := invCharMap[c]; ok {
|
||||
return replacement
|
||||
}
|
||||
return c
|
||||
}, in)
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package jottacloud
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestReplace(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
out string
|
||||
}{
|
||||
{"", ""},
|
||||
{"abc 123", "abc 123"},
|
||||
{`\*<>?:;|"`, `\*<>?:;|"`},
|
||||
{`\*<>?:;|"\*<>?:;|"`, `\*<>?:;|"\*<>?:;|"`},
|
||||
{" leading space", "␠leading space"},
|
||||
{"trailing space ", "trailing space␠"},
|
||||
{" leading space/ leading space/ leading space", "␠leading space/␠leading space/␠leading space"},
|
||||
{"trailing space /trailing space /trailing space ", "trailing space␠/trailing space␠/trailing space␠"},
|
||||
} {
|
||||
got := replaceReservedChars(test.in)
|
||||
if got != test.out {
|
||||
t.Errorf("replaceReservedChars(%q) want %q got %q", test.in, test.out, got)
|
||||
}
|
||||
got2 := restoreReservedChars(got)
|
||||
if got2 != test.in {
|
||||
t.Errorf("restoreReservedChars(%q) want %q got %q", got, test.in, got2)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package koofr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -10,11 +11,14 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/rclone/fs"
|
||||
"github.com/ncw/rclone/fs/config/configmap"
|
||||
"github.com/ncw/rclone/fs/config/configstruct"
|
||||
"github.com/ncw/rclone/fs/config/obscure"
|
||||
"github.com/ncw/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/config/obscure"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
|
||||
httpclient "github.com/koofr/go-httpclient"
|
||||
koofrclient "github.com/koofr/go-koofrclient"
|
||||
@@ -26,39 +30,53 @@ func init() {
|
||||
Name: "koofr",
|
||||
Description: "Koofr",
|
||||
NewFs: NewFs,
|
||||
Options: []fs.Option{
|
||||
{
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use",
|
||||
Default: "https://app.koofr.net",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
},
|
||||
},
|
||||
Options: []fs.Option{{
|
||||
Name: "endpoint",
|
||||
Help: "The Koofr API endpoint to use",
|
||||
Default: "https://app.koofr.net",
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "mountid",
|
||||
Help: "Mount ID of the mount to use. If omitted, the primary mount is used.",
|
||||
Required: false,
|
||||
Default: "",
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "setmtime",
|
||||
Help: "Does the backend support setting modification time. Set this to false if you use a mount ID that points to a Dropbox or Amazon Drive backend.",
|
||||
Default: true,
|
||||
Required: true,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "user",
|
||||
Help: "Your Koofr user name",
|
||||
Required: true,
|
||||
}, {
|
||||
Name: "password",
|
||||
Help: "Your Koofr password for rclone (generate one at https://app.koofr.net/app/admin/preferences/password)",
|
||||
IsPassword: true,
|
||||
Required: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
Advanced: true,
|
||||
// Encode invalid UTF-8 bytes as json doesn't handle them properly.
|
||||
Default: (encoder.Display |
|
||||
encoder.EncodeBackSlash |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
||||
// Options represent the configuration of the Koofr backend
|
||||
type Options struct {
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
Password string `config:"password"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
MountID string `config:"mountid"`
|
||||
User string `config:"user"`
|
||||
Password string `config:"password"`
|
||||
SetMTime bool `config:"setmtime"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
// A Fs is a representation of a remote Koofr Fs
|
||||
@@ -105,7 +123,7 @@ func (o *Object) Remote() string {
|
||||
}
|
||||
|
||||
// ModTime returns the modification time of the Object
|
||||
func (o *Object) ModTime() time.Time {
|
||||
func (o *Object) ModTime(ctx context.Context) time.Time {
|
||||
return time.Unix(o.info.Modified/1000, (o.info.Modified%1000)*1000*1000)
|
||||
}
|
||||
|
||||
@@ -120,7 +138,7 @@ func (o *Object) Fs() fs.Info {
|
||||
}
|
||||
|
||||
// Hash returns an MD5 hash of the Object
|
||||
func (o *Object) Hash(typ hash.Type) (string, error) {
|
||||
func (o *Object) Hash(ctx context.Context, typ hash.Type) (string, error) {
|
||||
if typ == hash.MD5 {
|
||||
return o.info.Hash, nil
|
||||
}
|
||||
@@ -138,14 +156,15 @@ func (o *Object) Storable() bool {
|
||||
}
|
||||
|
||||
// SetModTime is not supported
|
||||
func (o *Object) SetModTime(mtime time.Time) error {
|
||||
return nil
|
||||
func (o *Object) SetModTime(ctx context.Context, mtime time.Time) error {
|
||||
return fs.ErrorCantSetModTimeWithoutDelete
|
||||
}
|
||||
|
||||
// Open opens the Object for reading
|
||||
func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
var sOff, eOff int64 = 0, -1
|
||||
|
||||
fs.FixRangeOption(options, o.Size())
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
case *fs.SeekOption:
|
||||
@@ -162,13 +181,6 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
if sOff == 0 && eOff < 0 {
|
||||
return o.fs.client.FilesGet(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
if sOff < 0 {
|
||||
sOff = o.Size() - eOff
|
||||
eOff = o.Size()
|
||||
}
|
||||
if eOff > o.Size() {
|
||||
eOff = o.Size()
|
||||
}
|
||||
span := &koofrclient.FileSpan{
|
||||
Start: sOff,
|
||||
End: eOff,
|
||||
@@ -177,11 +189,13 @@ func (o *Object) Open(options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
}
|
||||
|
||||
// Update updates the Object contents
|
||||
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
putopts := &koofrclient.PutFilter{
|
||||
ForceOverwrite: true,
|
||||
NoRename: true,
|
||||
IgnoreNonExisting: true,
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
|
||||
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
|
||||
putopts := &koofrclient.PutOptions{
|
||||
ForceOverwrite: true,
|
||||
NoRename: true,
|
||||
OverwriteIgnoreNonExisting: true,
|
||||
SetModified: &mtime,
|
||||
}
|
||||
fullPath := o.fullPath()
|
||||
dirPath := dir(fullPath)
|
||||
@@ -190,7 +204,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := o.fs.client.FilesPutOptions(o.fs.mountID, dirPath, name, in, putopts)
|
||||
info, err := o.fs.client.FilesPutWithOptions(o.fs.mountID, dirPath, name, in, putopts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -199,7 +213,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio
|
||||
}
|
||||
|
||||
// Remove deletes the remote Object
|
||||
func (o *Object) Remove() error {
|
||||
func (o *Object) Remove(ctx context.Context) error {
|
||||
return o.fs.client.FilesDelete(o.fs.mountID, o.fullPath())
|
||||
}
|
||||
|
||||
@@ -225,7 +239,10 @@ func (f *Fs) Features() *fs.Features {
|
||||
|
||||
// Precision denotes that setting modification times is not supported
|
||||
func (f *Fs) Precision() time.Duration {
|
||||
return fs.ModTimeNotSupported
|
||||
if !f.opt.SetMTime {
|
||||
return fs.ModTimeNotSupported
|
||||
}
|
||||
return time.Millisecond
|
||||
}
|
||||
|
||||
// Hashes returns a set of hashes are Provided by the Fs
|
||||
@@ -235,7 +252,7 @@ func (f *Fs) Hashes() hash.Set {
|
||||
|
||||
// fullPath constructs a full, absolute path from a Fs root relative path,
|
||||
func (f *Fs) fullPath(part string) string {
|
||||
return path.Join("/", f.root, part)
|
||||
return f.opt.Enc.FromStandardPath(path.Join("/", f.root, part))
|
||||
}
|
||||
|
||||
// NewFs constructs a new filesystem given a root path and configuration options
|
||||
@@ -249,7 +266,9 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := koofrclient.NewKoofrClient(opt.Endpoint, false)
|
||||
httpClient := httpclient.New()
|
||||
httpClient.Client = fshttp.NewClient(fs.Config)
|
||||
client := koofrclient.NewKoofrClientWithHTTPClient(opt.Endpoint, httpClient)
|
||||
basicAuth := fmt.Sprintf("Basic %s",
|
||||
base64.StdEncoding.EncodeToString([]byte(opt.User+":"+pass)))
|
||||
client.HTTPClient.Headers.Set("Authorization", basicAuth)
|
||||
@@ -286,7 +305,7 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
}
|
||||
return nil, errors.New("Failed to find mount " + opt.MountID)
|
||||
}
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, "/"+f.root)
|
||||
rootFile, err := f.client.FilesInfo(f.mountID, f.opt.Enc.FromStandardPath("/"+f.root))
|
||||
if err == nil && rootFile.Type != "dir" {
|
||||
f.root = dir(f.root)
|
||||
err = fs.ErrorIsFile
|
||||
@@ -297,20 +316,21 @@ func NewFs(name, root string, m configmap.Mapper) (ff fs.Fs, err error) {
|
||||
}
|
||||
|
||||
// List returns a list of items in a directory
|
||||
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
|
||||
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return nil, translateErrorsDir(err)
|
||||
}
|
||||
entries = make([]fs.DirEntry, len(files))
|
||||
for i, file := range files {
|
||||
remote := path.Join(dir, f.opt.Enc.ToStandardName(file.Name))
|
||||
if file.Type == "dir" {
|
||||
entries[i] = fs.NewDir(path.Join(dir, file.Name), time.Unix(0, 0))
|
||||
entries[i] = fs.NewDir(remote, time.Unix(0, 0))
|
||||
} else {
|
||||
entries[i] = &Object{
|
||||
fs: f,
|
||||
info: file,
|
||||
remote: path.Join(dir, file.Name),
|
||||
remote: remote,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -318,7 +338,7 @@ func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
|
||||
}
|
||||
|
||||
// NewObject creates a new remote Object for a given remote path
|
||||
func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
|
||||
func (f *Fs) NewObject(ctx context.Context, remote string) (obj fs.Object, err error) {
|
||||
info, err := f.client.FilesInfo(f.mountID, f.fullPath(remote))
|
||||
if err != nil {
|
||||
return nil, translateErrorsObject(err)
|
||||
@@ -334,11 +354,13 @@ func (f *Fs) NewObject(remote string) (obj fs.Object, err error) {
|
||||
}
|
||||
|
||||
// Put updates a remote Object
|
||||
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
|
||||
putopts := &koofrclient.PutFilter{
|
||||
ForceOverwrite: true,
|
||||
NoRename: true,
|
||||
IgnoreNonExisting: true,
|
||||
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj fs.Object, err error) {
|
||||
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
|
||||
putopts := &koofrclient.PutOptions{
|
||||
ForceOverwrite: true,
|
||||
NoRename: true,
|
||||
OverwriteIgnoreNonExisting: true,
|
||||
SetModified: &mtime,
|
||||
}
|
||||
fullPath := f.fullPath(src.Remote())
|
||||
dirPath := dir(fullPath)
|
||||
@@ -347,7 +369,7 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info, err := f.client.FilesPutOptions(f.mountID, dirPath, name, in, putopts)
|
||||
info, err := f.client.FilesPutWithOptions(f.mountID, dirPath, name, in, putopts)
|
||||
if err != nil {
|
||||
return nil, translateErrorsObject(err)
|
||||
}
|
||||
@@ -359,8 +381,8 @@ func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (obj
|
||||
}
|
||||
|
||||
// PutStream updates a remote Object with a stream of unknown size
|
||||
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(in, src, options...)
|
||||
func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
|
||||
return f.Put(ctx, in, src, options...)
|
||||
}
|
||||
|
||||
// isBadRequest is a predicate which holds true iff the error returned was
|
||||
@@ -436,13 +458,13 @@ func (f *Fs) mkdir(fullPath string) error {
|
||||
|
||||
// Mkdir creates a directory at the given remote path. Creates ancestors if
|
||||
// necessary
|
||||
func (f *Fs) Mkdir(dir string) error {
|
||||
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
|
||||
fullPath := f.fullPath(dir)
|
||||
return f.mkdir(fullPath)
|
||||
}
|
||||
|
||||
// Rmdir removes an (empty) directory at the given remote path
|
||||
func (f *Fs) Rmdir(dir string) error {
|
||||
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
files, err := f.client.FilesList(f.mountID, f.fullPath(dir))
|
||||
if err != nil {
|
||||
return translateErrorsDir(err)
|
||||
@@ -458,24 +480,25 @@ func (f *Fs) Rmdir(dir string) error {
|
||||
}
|
||||
|
||||
// Copy copies a remote Object to the given path
|
||||
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
dstFullPath := f.fullPath(remote)
|
||||
dstDir := dir(dstFullPath)
|
||||
err := f.mkdir(dstDir)
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
mtime := src.ModTime(ctx).UnixNano() / 1000 / 1000
|
||||
err = f.client.FilesCopy((src.(*Object)).fs.mountID,
|
||||
(src.(*Object)).fs.fullPath((src.(*Object)).remote),
|
||||
f.mountID, dstFullPath)
|
||||
f.mountID, dstFullPath, koofrclient.CopyOptions{SetModified: &mtime})
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantCopy
|
||||
}
|
||||
return f.NewObject(remote)
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// Move moves a remote Object to the given path
|
||||
func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj := src.(*Object)
|
||||
dstFullPath := f.fullPath(remote)
|
||||
dstDir := dir(dstFullPath)
|
||||
@@ -488,11 +511,11 @@ func (f *Fs) Move(src fs.Object, remote string) (fs.Object, error) {
|
||||
if err != nil {
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
return f.NewObject(remote)
|
||||
return f.NewObject(ctx, remote)
|
||||
}
|
||||
|
||||
// DirMove moves a remote directory to the given path
|
||||
func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) error {
|
||||
srcFs := src.(*Fs)
|
||||
srcFullPath := srcFs.fullPath(srcRemote)
|
||||
dstFullPath := f.fullPath(dstRemote)
|
||||
@@ -512,7 +535,7 @@ func (f *Fs) DirMove(src fs.Fs, srcRemote, dstRemote string) error {
|
||||
}
|
||||
|
||||
// About reports space usage (with a MB precision)
|
||||
func (f *Fs) About() (*fs.Usage, error) {
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
mount, err := f.client.MountsDetails(f.mountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -528,7 +551,7 @@ func (f *Fs) About() (*fs.Usage, error) {
|
||||
}
|
||||
|
||||
// Purge purges the complete Fs
|
||||
func (f *Fs) Purge() error {
|
||||
func (f *Fs) Purge(ctx context.Context) error {
|
||||
err := translateErrorsDir(f.client.FilesDelete(f.mountID, f.fullPath("")))
|
||||
return err
|
||||
}
|
||||
@@ -580,7 +603,7 @@ func createLink(c *koofrclient.KoofrClient, mountID string, path string) (*link,
|
||||
}
|
||||
|
||||
// PublicLink creates a public link to the remote path
|
||||
func (f *Fs) PublicLink(remote string) (string, error) {
|
||||
func (f *Fs) PublicLink(ctx context.Context, remote string) (string, error) {
|
||||
linkData, err := createLink(f.client, f.mountID, f.fullPath(remote))
|
||||
if err != nil {
|
||||
return "", translateErrorsDir(err)
|
||||
|
||||
@@ -3,7 +3,7 @@ package koofr_test
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ncw/rclone/fstest/fstests"
|
||||
"github.com/rclone/rclone/fstest/fstests"
|
||||
)
|
||||
|
||||
// TestIntegration runs integration tests against the remote
|
||||
|
||||
0
backend/local/aaaa
Normal file
0
backend/local/aaaa
Normal file
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user