mirror of
https://github.com/gilbertchen/duplicacy
synced 2025-12-06 00:03:38 +00:00
Compare commits
738 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
175adb14cb | ||
|
|
ae706e3dcf | ||
|
|
5eed6c65f6 | ||
|
|
bec3a0edcd | ||
|
|
b392302c06 | ||
|
|
7c36311aa9 | ||
|
|
7f834e84f6 | ||
|
|
d7c1903d5a | ||
|
|
7da58c6d49 | ||
|
|
4402be6763 | ||
|
|
3abec4e37a | ||
|
|
dd40d4cd2f | ||
|
|
923e906b7e | ||
|
|
0da55f95ab | ||
|
|
2f407d6af9 | ||
|
|
bb680538ee | ||
|
|
7e372edd68 | ||
|
|
836a785798 | ||
|
|
e0a72efb34 | ||
|
|
d839f26b5a | ||
|
|
6ad698328f | ||
|
|
ace1ba5848 | ||
|
|
04a858b555 | ||
|
|
1fedfd1b1a | ||
|
|
3fd3f6b267 | ||
|
|
e3e3e97046 | ||
|
|
3f29ec2ffb | ||
|
|
947006411b | ||
|
|
6841c989c6 | ||
|
|
d0b3b5dc2e | ||
|
|
73ae3f809e | ||
|
|
67a3103467 | ||
|
|
6ee01a2e74 | ||
|
|
b7d820195a | ||
|
|
16d2c14c5a | ||
|
|
eecbb8fa99 | ||
|
|
97bae5f1a3 | ||
|
|
40243fb043 | ||
|
|
403df1fd06 | ||
|
|
4369bcfc0b | ||
|
|
d2b08aebee | ||
|
|
948994c2b6 | ||
|
|
ca4d004aca | ||
|
|
ce472fe375 | ||
|
|
923a6fbc5b | ||
|
|
670cbcd776 | ||
|
|
fd469bae9e | ||
|
|
acef01770a | ||
|
|
1eb1fb14a8 | ||
|
|
8b489f04eb | ||
|
|
089e19f8e6 | ||
|
|
1da7e2b536 | ||
|
|
ed8b4393be | ||
|
|
5e28dc4911 | ||
|
|
f2f07a120d | ||
|
|
153f6a2d20 | ||
|
|
5d45999077 | ||
|
|
1adcf56890 | ||
|
|
09e3cdfebf | ||
|
|
fe854d469d | ||
|
|
76f1274e13 | ||
|
|
9c3122b814 | ||
|
|
6ca8b8dff0 | ||
|
|
4ae16dec7f | ||
|
|
dae040681d | ||
|
|
51cbf73caa | ||
|
|
835af11334 | ||
|
|
4c3557eb80 | ||
|
|
eebcece9e0 | ||
|
|
8c80470c29 | ||
|
|
bcb889272d | ||
|
|
79d8654a12 | ||
|
|
6bf0d2265c | ||
|
|
749db78a1f | ||
|
|
0a51bd8d1a | ||
|
|
7208adbce2 | ||
|
|
e827662869 | ||
|
|
57dd5ba927 | ||
|
|
01a37b7828 | ||
|
|
57cd20bb84 | ||
|
|
0e970da222 | ||
|
|
e880636502 | ||
|
|
810303ce25 | ||
|
|
ffac83dd80 | ||
|
|
05674871fe | ||
|
|
22d6f3abfc | ||
|
|
d26ffe2cff | ||
|
|
a35f6c27be | ||
|
|
808ae4eb75 | ||
|
|
6699e2f440 | ||
|
|
733b68be2c | ||
|
|
b61906c99e | ||
|
|
a0a07d18cc | ||
|
|
a6ce64e715 | ||
|
|
499b612a0d | ||
|
|
46ce0ba1fb | ||
|
|
cc88abd547 | ||
|
|
e888b6d7e5 | ||
|
|
aa07feeac0 | ||
|
|
d43fe1a282 | ||
|
|
7719bb9f29 | ||
|
|
504d07bd51 | ||
|
|
0abb4099f6 | ||
|
|
694494ea54 | ||
|
|
165152493c | ||
|
|
e02041f4ed | ||
|
|
a99f059b52 | ||
|
|
f022a6f684 | ||
|
|
791c61eecb | ||
|
|
6ad27adaea | ||
|
|
9abfbe1ee0 | ||
|
|
b32c3b2cd5 | ||
|
|
9baafdafa2 | ||
|
|
ca7d927840 | ||
|
|
426110e961 | ||
|
|
0ca9cd476e | ||
|
|
abf9a94fc9 | ||
|
|
9a0d60ca84 | ||
|
|
90833f9d86 | ||
|
|
58387c0951 | ||
|
|
81bb188211 | ||
|
|
5821cad8c5 | ||
|
|
662805fbbd | ||
|
|
fc35ddf7d1 | ||
|
|
6efcd37c5c | ||
|
|
58558b8a2f | ||
|
|
045be3905b | ||
|
|
4da7f7b6f9 | ||
|
|
41668d4bbd | ||
|
|
9d4ac34f4b | ||
|
|
eba5aa6eea | ||
|
|
47c4c25d8b | ||
|
|
37781f9540 | ||
|
|
282fe4edd2 | ||
|
|
33c71ca5f8 | ||
|
|
6e7d45caac | ||
|
|
8e9caea201 | ||
|
|
18ba415f56 | ||
|
|
458687d543 | ||
|
|
57a408a577 | ||
|
|
a73ed462b6 | ||
|
|
e56efc1d3a | ||
|
|
bb58f42a37 | ||
|
|
22e8d9e60a | ||
|
|
4eb174cec5 | ||
|
|
6fd3fbd568 | ||
|
|
a6fe3d785e | ||
|
|
1da151f9d9 | ||
|
|
4b69c1162e | ||
|
|
abcb4d75c1 | ||
|
|
10d2058738 | ||
|
|
43a5ffe011 | ||
|
|
d16273fe2b | ||
|
|
2eb8ea6094 | ||
|
|
a55ac1b7ad | ||
|
|
2b56d576c7 | ||
|
|
82c6c15f1c | ||
|
|
bebd7c4b77 | ||
|
|
46376d82ed | ||
|
|
c4a3dd1eeb | ||
|
|
31c25e98f7 | ||
|
|
242db8377e | ||
|
|
e6d8b7d070 | ||
|
|
bb652d0a8c | ||
|
|
a354d03bc9 | ||
|
|
4b9524bd43 | ||
|
|
a782d42ad6 | ||
|
|
0762c448c4 | ||
|
|
741644b575 | ||
|
|
df7487cc0b | ||
|
|
8aa67c8162 | ||
|
|
53548a895f | ||
|
|
5e8baab4ec | ||
|
|
e1fa39008d | ||
|
|
aaebf4510c | ||
|
|
96dd28995b | ||
|
|
166f6e6266 | ||
|
|
86c89f43a0 | ||
|
|
2e5cbc73b9 | ||
|
|
21b3d9e57f | ||
|
|
244b797a1c | ||
|
|
073292018c | ||
|
|
15f15aa2ca | ||
|
|
d8e13d8d85 | ||
|
|
bfb4b44c0a | ||
|
|
a1efbe3b73 | ||
|
|
cce798ceac | ||
|
|
22a0b222db | ||
|
|
674d35e5ca | ||
|
|
ab28115f95 | ||
|
|
a7d2a941be | ||
|
|
39d71a3256 | ||
|
|
9d10cc77fc | ||
|
|
e8b8922754 | ||
|
|
93cc632021 | ||
|
|
48cc5eaedb | ||
|
|
f304b64b3f | ||
|
|
8ae7d2a97d | ||
|
|
fce4234861 | ||
|
|
e499a24202 | ||
|
|
89769f3906 | ||
|
|
798cec0714 | ||
|
|
72dfaa8b6b | ||
|
|
117cfd997f | ||
|
|
84f7c513d5 | ||
|
|
dfdbfed64b | ||
|
|
d4a65ffbcf | ||
|
|
736003323a | ||
|
|
0af74616b7 | ||
|
|
0f552c8c50 | ||
|
|
1adf92e879 | ||
|
|
f92f1a728c | ||
|
|
9a0dcdb0b2 | ||
|
|
20172e07e6 | ||
|
|
9ae306644d | ||
|
|
6f0166be6d | ||
|
|
f68eb13584 | ||
|
|
dd53b4797e | ||
|
|
7e021f26d3 | ||
|
|
0e585e4be4 | ||
|
|
e03cd2a880 | ||
|
|
f80a5b1025 | ||
|
|
aadd2aa390 | ||
|
|
72239a31c4 | ||
|
|
c9b60cc0e0 | ||
|
|
f4cdd1f01b | ||
|
|
b1c1b47983 | ||
|
|
8c3ef6cae9 | ||
|
|
acd7addc9a | ||
|
|
c23ea30da4 | ||
|
|
a4c46624ea | ||
|
|
5747d6763f | ||
|
|
ef1d316f33 | ||
|
|
714a45c34a | ||
|
|
23a2d91608 | ||
|
|
23b98a3034 | ||
|
|
8a3c5847a8 | ||
|
|
4c3d5dbc2f | ||
|
|
85bc55e374 | ||
|
|
cd0c7b07a9 | ||
|
|
0ea26a92dd | ||
|
|
ca889fca9f | ||
|
|
fbaea6e8b1 | ||
|
|
2290c4ace0 | ||
|
|
02cd41f4d0 | ||
|
|
0db8b9831b | ||
|
|
4dd5c43307 | ||
|
|
6aedc37118 | ||
|
|
9a56ede07c | ||
|
|
c6e9460b7b | ||
|
|
e74ab809ae | ||
|
|
5d2242d39d | ||
|
|
b99f4bffec | ||
|
|
be2856ebbd | ||
|
|
1ea615fb45 | ||
|
|
7d933a2576 | ||
|
|
13fffc2a11 | ||
|
|
9658463ebe | ||
|
|
cd77a029ea | ||
|
|
4948806d3d | ||
|
|
42c317c477 | ||
|
|
013eac0cf2 | ||
|
|
bc9ccd860f | ||
|
|
25935ca324 | ||
|
|
bcace5aee2 | ||
|
|
8fdb399e1b | ||
|
|
e07226bd62 | ||
|
|
9d632c0434 | ||
|
|
cc6e96527e | ||
|
|
ddf61aee9d | ||
|
|
52fd553bb9 | ||
|
|
7230ddbef5 | ||
|
|
ffe04d691b | ||
|
|
e0d7355494 | ||
|
|
d330f61d25 | ||
|
|
e5beb55336 | ||
|
|
57082cd1d2 | ||
|
|
bd5a689b7d | ||
|
|
b52d6b3f7f | ||
|
|
8aaca37a2b | ||
|
|
9898f77d9c | ||
|
|
30f753e499 | ||
|
|
d0771be2dd | ||
|
|
25fbc9ad03 | ||
|
|
91f02768f9 | ||
|
|
8e8a116028 | ||
|
|
771323510d | ||
|
|
61fb0f7b40 | ||
|
|
f1060491ae | ||
|
|
837fd5e4fd | ||
|
|
0670f709f3 | ||
|
|
f944e01a02 | ||
|
|
f6ef9094bc | ||
|
|
36d7c583fa | ||
|
|
9fdff7b150 | ||
|
|
dfbc5ece00 | ||
|
|
50d2e2603a | ||
|
|
61e4329522 | ||
|
|
801433340a | ||
|
|
91a95d0cd3 | ||
|
|
612f6e27cb | ||
|
|
430d7b6241 | ||
|
|
c5e2032715 | ||
|
|
048827742c | ||
|
|
0576efe36c | ||
|
|
8bd463288f | ||
|
|
2f4e7422ca | ||
|
|
9dbf517e8a | ||
|
|
e93ee2d776 | ||
|
|
3371ea445e | ||
|
|
6f69aff712 | ||
|
|
7a7ea3ad18 | ||
|
|
4aa2edb164 | ||
|
|
29bbd49a1c | ||
|
|
c829b80527 | ||
|
|
81e889ef3f | ||
|
|
1d5b910f5e | ||
|
|
ce946f7745 | ||
|
|
b9e89b2530 | ||
|
|
63aa47f193 | ||
|
|
214a119507 | ||
|
|
34e49d4589 | ||
|
|
8da36e9998 | ||
|
|
fe9cd7c8a8 | ||
|
|
90e1639611 | ||
|
|
1925b8d5fd | ||
|
|
65fca6f5c8 | ||
|
|
8fbef22429 | ||
|
|
1dcb3a05fc | ||
|
|
91d31f4091 | ||
|
|
579330b23c | ||
|
|
5caa15eeb8 | ||
|
|
652ebaca16 | ||
|
|
2bd9406244 | ||
|
|
9ac6e8713f | ||
|
|
dc9df61d37 | ||
|
|
73ed56e9cc | ||
|
|
69286a5413 | ||
|
|
5e6c2cc9c5 | ||
|
|
a6d071e1b5 | ||
|
|
8600803ba0 | ||
|
|
a6de3c1e74 | ||
|
|
669d5ed3f4 | ||
|
|
eb1c26b319 | ||
|
|
86767b3df6 | ||
|
|
5d905c83b8 | ||
|
|
57edf5823d | ||
|
|
7e1fb6130a | ||
|
|
8ad981b64d | ||
|
|
787c421a0c | ||
|
|
b0b08cec4c | ||
|
|
9608a7f6b6 | ||
|
|
bdea4bed15 | ||
|
|
0db7470af5 | ||
|
|
c08a26a0c2 | ||
|
|
b788b9887c | ||
|
|
4640c20dec | ||
|
|
47137b85e3 | ||
|
|
9d38b49e42 | ||
|
|
b0a67cefb7 | ||
|
|
6fd85fc687 | ||
|
|
b2ad6da364 | ||
|
|
a342431b3c | ||
|
|
ff27cec2af | ||
|
|
746c1656a8 | ||
|
|
2f6287a45d | ||
|
|
32d0f97bfb | ||
|
|
86a6ededab | ||
|
|
6e3c1657fa | ||
|
|
be89d8d0dc | ||
|
|
f044d37b28 | ||
|
|
04debec0a1 | ||
|
|
0784644996 | ||
|
|
f57fe55543 | ||
|
|
be2c3931cd | ||
|
|
a5d3340837 | ||
|
|
bd39302eee | ||
|
|
0dd138e16f | ||
|
|
7162d8916e | ||
|
|
f9603dad3c | ||
|
|
80742ce2ba | ||
|
|
ce52ec1e5d | ||
|
|
8841ced1f5 | ||
|
|
5031ae15d0 | ||
|
|
3dad87f13a | ||
|
|
6c96c52a93 | ||
|
|
2c2884abfb | ||
|
|
ed52850c98 | ||
|
|
46917ddf6b | ||
|
|
923cd0aa63 | ||
|
|
0fee771a74 | ||
|
|
b3d1eb36bd | ||
|
|
3c03b566ae | ||
|
|
978212fd75 | ||
|
|
bb1a15382e | ||
|
|
d20ea41cd0 | ||
|
|
ef19a3705f | ||
|
|
fc71cb1b49 | ||
|
|
6a03a98f55 | ||
|
|
45bc778898 | ||
|
|
d5d7649041 | ||
|
|
f1fe64b9cc | ||
|
|
e2fe57e959 | ||
|
|
ae44bf7226 | ||
|
|
fab9cc77c6 | ||
|
|
c63621cb8c | ||
|
|
f20e823119 | ||
|
|
805f6fd15d | ||
|
|
f25783d59d | ||
|
|
3cf3ad06fa | ||
|
|
d3cea2c7d0 | ||
|
|
f74ea0368e | ||
|
|
6bffef36bf | ||
|
|
b56d7dedba | ||
|
|
554f63263f | ||
|
|
bfb7370ff2 | ||
|
|
03c2a190ee | ||
|
|
491252e3e4 | ||
|
|
84fc1343a7 | ||
|
|
c42a5a86a4 | ||
|
|
d1817ae557 | ||
|
|
eb4c875fd0 | ||
|
|
cecb73071e | ||
|
|
0bf66168fb | ||
|
|
d8573ca789 | ||
|
|
6b2f50a1e8 | ||
|
|
81b8550232 | ||
|
|
f6e2877948 | ||
|
|
de2f7c447f | ||
|
|
3c1057a3c6 | ||
|
|
8808ad5c28 | ||
|
|
707967e91b | ||
|
|
3f83890859 | ||
|
|
68fb6d671e | ||
|
|
b04ef67d26 | ||
|
|
72ba2dfa87 | ||
|
|
b41e8a24a9 | ||
|
|
a3aa575c68 | ||
|
|
e765575210 | ||
|
|
044e1862e5 | ||
|
|
612c5b7746 | ||
|
|
457e518151 | ||
|
|
34afc6f93c | ||
|
|
030cd274c2 | ||
|
|
197d20f0e0 | ||
|
|
93cfbf27cb | ||
|
|
46ec852d4d | ||
|
|
dfa6113279 | ||
|
|
d7fdb5fe7f | ||
|
|
37ebbc4736 | ||
|
|
3ae2de241e | ||
|
|
4adb8dbf70 | ||
|
|
41e3d267e5 | ||
|
|
3e23b0c61c | ||
|
|
b7f537de3c | ||
|
|
0c8a88d15a | ||
|
|
204f56e939 | ||
|
|
4a80d94b63 | ||
|
|
3729de1c67 | ||
|
|
6f70b37d61 | ||
|
|
7baf8702a3 | ||
|
|
8fce6f5f83 | ||
|
|
fd362be54a | ||
|
|
0c13da9872 | ||
|
|
4912911017 | ||
|
|
f69550d0db | ||
|
|
799b040913 | ||
|
|
41e3843bfa | ||
|
|
9e1d2ac1e6 | ||
|
|
bc40498d1b | ||
|
|
446bb4bcc8 | ||
|
|
150ea13a0d | ||
|
|
8c5b7d5f63 | ||
|
|
315dfff7d6 | ||
|
|
0bc475ca4d | ||
|
|
a0fa0fe7da | ||
|
|
01db72080c | ||
|
|
22ddc04698 | ||
|
|
2aa3b2b737 | ||
|
|
76f75cb0cb | ||
|
|
ea4c4339e6 | ||
|
|
fa294eabf4 | ||
|
|
0ec262fd93 | ||
|
|
db3e0946bb | ||
|
|
c426bf5af2 | ||
|
|
823b82060c | ||
|
|
4308e3e6e9 | ||
|
|
0391ecf941 | ||
|
|
7ecf895d85 | ||
|
|
a43114da99 | ||
|
|
caaff6b4b2 | ||
|
|
18964e89a1 | ||
|
|
2d1ea86d8e | ||
|
|
d881ac9169 | ||
|
|
1aee9bd6ef | ||
|
|
f3447bb611 | ||
|
|
9be4927c87 | ||
|
|
a0fcb8802b | ||
|
|
58cfeec6ab | ||
|
|
0d442e736d | ||
|
|
b32bda162d | ||
|
|
e6767bfad4 | ||
|
|
0b9e23fcd8 | ||
|
|
7f04a79111 | ||
|
|
211c6867d3 | ||
|
|
4a31fcfb68 | ||
|
|
6a4b1f2a3f | ||
|
|
483ae5e6eb | ||
|
|
f8d879d414 | ||
|
|
c2120ad3d5 | ||
|
|
f8764a5a79 | ||
|
|
736b4da0c3 | ||
|
|
0aa122609a | ||
|
|
18462cf585 | ||
|
|
e06283f0b3 | ||
|
|
b4f3142275 | ||
|
|
cdd1f26079 | ||
|
|
199e312bea | ||
|
|
88141216e9 | ||
|
|
f9ede565ff | ||
|
|
93a61a6e49 | ||
|
|
7d31199631 | ||
|
|
f2451911f2 | ||
|
|
ac655c8780 | ||
|
|
c31d2a30d9 | ||
|
|
83da36cae0 | ||
|
|
96e2f78096 | ||
|
|
593b409329 | ||
|
|
5334f45998 | ||
|
|
b56baa80c3 | ||
|
|
74ab8d8c23 | ||
|
|
a7613ab7d9 | ||
|
|
65127c7ab7 | ||
|
|
09f695b3e1 | ||
|
|
2908b807b9 | ||
|
|
ba3702647b | ||
|
|
0a149cd509 | ||
|
|
2cbb72c2d0 | ||
|
|
12134ea6ad | ||
|
|
4291bc775b | ||
|
|
817e36c7a6 | ||
|
|
b7b54478fc | ||
|
|
8d06fa491a | ||
|
|
42a6ab9140 | ||
|
|
bad990e702 | ||
|
|
d27335ad8d | ||
|
|
a584828e1b | ||
|
|
d0c376f593 | ||
|
|
a54029cf2b | ||
|
|
839be6094f | ||
|
|
84a4c86ca7 | ||
|
|
651d82e511 | ||
|
|
6a73a62591 | ||
|
|
169d6db544 | ||
|
|
25684942b3 | ||
|
|
746431d5e0 | ||
|
|
28da4d15e2 | ||
|
|
d36e80a5eb | ||
|
|
fe1de10f22 | ||
|
|
112d5b22e5 | ||
|
|
3da8830592 | ||
|
|
04b01fa87d | ||
|
|
4b60859054 | ||
|
|
7e5fc0972d | ||
|
|
c9951d6036 | ||
|
|
92b3594e89 | ||
|
|
2424a2eeed | ||
|
|
2ace6c74e1 | ||
|
|
2fcc4d44b9 | ||
|
|
3f45b0a15a | ||
|
|
2d69f64c20 | ||
|
|
7a1a541c98 | ||
|
|
7aa0eca47c | ||
|
|
aa909c0c15 | ||
|
|
9e1740c1d6 | ||
|
|
ae34347741 | ||
|
|
a37bc206d0 | ||
|
|
dd11641611 | ||
|
|
1361b553ac | ||
|
|
26f2ebd8dd | ||
|
|
fa8c99747e | ||
|
|
53f8a51b12 | ||
|
|
c688c501d3 | ||
|
|
1e8442311d | ||
|
|
eeed2a4ff2 | ||
|
|
42337d84c3 | ||
|
|
c88e148d59 | ||
|
|
36044e13c0 | ||
|
|
e73354c0f5 | ||
|
|
4a1dc01ff4 | ||
|
|
89f7a2e8df | ||
|
|
a3a7a79ad3 | ||
|
|
6baeef3d60 | ||
|
|
8e0d2294a2 | ||
|
|
bdf017e552 | ||
|
|
86843b4d11 | ||
|
|
c67f07f2db | ||
|
|
04aaaaf82d | ||
|
|
3df53ae610 | ||
|
|
9952fd9410 | ||
|
|
e479f7dddc | ||
|
|
42d902687f | ||
|
|
0124aecd8d | ||
|
|
49b4b285a1 | ||
|
|
78b164cdfb | ||
|
|
6c3f4a6992 | ||
|
|
8cb6635ba6 | ||
|
|
ee56652c90 | ||
|
|
cd7f18f284 | ||
|
|
18766c86dc | ||
|
|
f8d2671038 | ||
|
|
1d12fa3dd8 | ||
|
|
6c793f25ee | ||
|
|
901605ca68 | ||
|
|
1f83a6e793 | ||
|
|
117317af3f | ||
|
|
77dbabf5d3 | ||
|
|
2fde44c2ec | ||
|
|
6828843dfc | ||
|
|
895a785765 | ||
|
|
f71570728a | ||
|
|
405cad1d7b | ||
|
|
1be6a78cc0 | ||
|
|
6ebc2394e5 | ||
|
|
79c440f9fe | ||
|
|
b54558e6fe | ||
|
|
0d8d691664 | ||
|
|
7abcd5b45e | ||
|
|
1d4979cde4 | ||
|
|
79ccd78a3b | ||
|
|
51a99d9a2c | ||
|
|
2b8e9a1f11 | ||
|
|
8c76154a21 | ||
|
|
3a46779a58 | ||
|
|
f0ff4a3ec1 | ||
|
|
50eaaa94f2 | ||
|
|
ee682bad52 | ||
|
|
38a778557b | ||
|
|
a08e54d6d3 | ||
|
|
de3e2b9823 | ||
|
|
b44dfc3ba5 | ||
|
|
09ebcf79d2 | ||
|
|
a93faa84b1 | ||
|
|
b51d979bec | ||
|
|
c2cc41d47c | ||
|
|
8ba06efb85 | ||
|
|
f00c47faf1 | ||
|
|
da478ae340 | ||
|
|
d76576e508 | ||
|
|
21fb36a078 | ||
|
|
22ba312ca8 | ||
|
|
34a8090ca6 | ||
|
|
58a876c4d6 | ||
|
|
efca1b4459 | ||
|
|
93a640cfc1 | ||
|
|
d9f2eab28b | ||
|
|
b09190a79c | ||
|
|
2e58b8739e | ||
|
|
38ba6a3400 | ||
|
|
7ea2139b37 | ||
|
|
1a1983cc09 | ||
|
|
96b10648b5 | ||
|
|
fe86a58a64 | ||
|
|
30d4201192 | ||
|
|
5631fc4a50 | ||
|
|
fb55d31dd0 | ||
|
|
74a72ac93c | ||
|
|
c74b0681bc | ||
|
|
76e4f4f267 | ||
|
|
3410f4fff1 | ||
|
|
0b44bed06e | ||
|
|
f9df29edca | ||
|
|
a96f0ba3d3 | ||
|
|
a6dba8707c | ||
|
|
0870af3008 | ||
|
|
b9affbdb0d | ||
|
|
46b22cc161 | ||
|
|
d5bdd0bf81 | ||
|
|
59fd834b14 | ||
|
|
ab91fd414b | ||
|
|
c3d0da9983 | ||
|
|
c80a021542 | ||
|
|
106ddd0581 | ||
|
|
6153fdd254 | ||
|
|
2bf7df9189 | ||
|
|
1d4cf6f48b | ||
|
|
013eaa5611 | ||
|
|
011ed4e66e | ||
|
|
0c67f47e2c | ||
|
|
f5f1eeaaa5 | ||
|
|
9a8bd41057 | ||
|
|
fdb788b026 | ||
|
|
0aa3199291 | ||
|
|
2255b756f6 | ||
|
|
475b9ed378 | ||
|
|
e23b486f15 | ||
|
|
217dd99adc | ||
|
|
fd0974e35a | ||
|
|
460baafe8e | ||
|
|
b6aa983290 | ||
|
|
24fb80ea61 | ||
|
|
04b27d06ec | ||
|
|
67d6706a2b | ||
|
|
6f963a8194 | ||
|
|
102150459e | ||
|
|
04c8b69817 | ||
|
|
d4b0e77ab4 | ||
|
|
2a8126cf30 | ||
|
|
36adbf59e4 | ||
|
|
0ada49d11d | ||
|
|
85d52feb42 | ||
|
|
a8ad7d130c | ||
|
|
d1a5874fc2 | ||
|
|
4d50cf8622 | ||
|
|
f1f16b5bab | ||
|
|
bf4bfad413 | ||
|
|
0a54795b7f | ||
|
|
04a62b4ca2 | ||
|
|
97419646f0 | ||
|
|
0957eeba47 | ||
|
|
1f585e2df3 | ||
|
|
b3da6ad762 | ||
|
|
328843b399 | ||
|
|
896c2b5074 | ||
|
|
9336fc97ae | ||
|
|
ef9f1b7cb7 | ||
|
|
9f1f5b7b23 | ||
|
|
566a081224 | ||
|
|
0e960106e4 | ||
|
|
be187b7314 | ||
|
|
19b0af86fc | ||
|
|
b0d9fed137 | ||
|
|
1c8fe0810d | ||
|
|
9f816547b4 | ||
|
|
73e5b398a4 | ||
|
|
60881fb112 | ||
|
|
57b297edfe | ||
|
|
80c8ef7869 |
17
.github/ISSUE_TEMPLATE.md
vendored
Normal file
17
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
name: Please use the official forum
|
||||||
|
about: Please use the official forum instead of Github
|
||||||
|
title: 'Please use the official forum'
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
|
||||||
|
Please **use the [Duplicacy Forum](https://forum.duplicacy.com/)** when reporting bugs, making feature requests, asking for help or simply praising Duplicacy for its ease of use.
|
||||||
|
|
||||||
|
We strongly encourage you to create an account on the forum and use that platform for discussion as there is a higher chance that someone there will talk to you.
|
||||||
|
|
||||||
|
There is a handful of people watching the Github Issues and we are in the process of moving **all** of them to the forum as well. Most likely you will not receive an answer here or it will be very slow and you will be pointed to the forum.
|
||||||
|
|
||||||
|
We have already created a comprehensive [Guide](https://forum.duplicacy.com/t/duplicacy-user-guide/1197), and a [How-To](https://forum.duplicacy.com/c/how-to) category which stores more wisdom than these issues on Github.
|
||||||
17
ACKNOWLEDGEMENTS.md
Normal file
17
ACKNOWLEDGEMENTS.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
Duplicacy is based on the following open source projects:
|
||||||
|
|
||||||
|
| Projects | License |
|
||||||
|
|--------|:-------:|
|
||||||
|
|https://github.com/urfave/cli | MIT |
|
||||||
|
|https://github.com/aryann/difflib | MIT |
|
||||||
|
|https://github.com/bkaradzic/go-lz4 | BSD-2-Clause |
|
||||||
|
|https://github.com/Azure/azure-sdk-for-go | Apache-2.0 |
|
||||||
|
|https://github.com/tj/go-dropbox | MIT |
|
||||||
|
|https://github.com/aws/aws-sdk-go | Apache-2.0 |
|
||||||
|
|https://github.com/goamz/goamz | LGPL with static link exception |
|
||||||
|
|https://github.com/howeyc/gopass | ISC |
|
||||||
|
|https://github.com/tmc/keyring | ISC |
|
||||||
|
|https://github.com/pcwizz/xattr | BSD-2-Clause |
|
||||||
|
|https://github.com/minio/blake2b-simd | Apache-2.0 |
|
||||||
|
|https://github.com/go-ole/go-ole | MIT |
|
||||||
|
https://github.com/ncw/swift | MIT |
|
||||||
5
DESIGN.md
Normal file
5
DESIGN.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
All documentation has been moved to our wiki page:
|
||||||
|
|
||||||
|
* [Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy/wiki/Lock-Free-Deduplication)
|
||||||
|
* [Snapshot Format](https://github.com/gilbertchen/duplicacy/wiki/Snapshot-Format)
|
||||||
|
* [Encryption](https://github.com/gilbertchen/duplicacy/wiki/Encryption)
|
||||||
20
GUIDE.md
Normal file
20
GUIDE.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
All documentation has been moved to our wiki page:
|
||||||
|
|
||||||
|
* Commands
|
||||||
|
* [init](https://github.com/gilbertchen/duplicacy/wiki/init)
|
||||||
|
* [backup](https://github.com/gilbertchen/duplicacy/wiki/backup)
|
||||||
|
* [restore](https://github.com/gilbertchen/duplicacy/wiki/restore)
|
||||||
|
* [list](https://github.com/gilbertchen/duplicacy/wiki/list)
|
||||||
|
* [check](https://github.com/gilbertchen/duplicacy/wiki/check)
|
||||||
|
* [prune](https://github.com/gilbertchen/duplicacy/wiki/prune)
|
||||||
|
* [cat](https://github.com/gilbertchen/duplicacy/wiki/cat)
|
||||||
|
* [history](https://github.com/gilbertchen/duplicacy/wiki/history)
|
||||||
|
* [diff](https://github.com/gilbertchen/duplicacy/wiki/diff)
|
||||||
|
* [password](https://github.com/gilbertchen/duplicacy/wiki/password)
|
||||||
|
* [add](https://github.com/gilbertchen/duplicacy/wiki/add)
|
||||||
|
* [set](https://github.com/gilbertchen/duplicacy/wiki/set)
|
||||||
|
* [copy](https://github.com/gilbertchen/duplicacy/wiki/copy)
|
||||||
|
* [Include/Exclude Patterns](https://github.com/gilbertchen/duplicacy/wiki/Include-Exclude-Patterns)
|
||||||
|
* [Managing Passwords](https://github.com/gilbertchen/duplicacy/wiki/Managing-Passwords)
|
||||||
|
* [Cache](https://github.com/gilbertchen/duplicacy/wiki/Cache)
|
||||||
|
* [Pre-Command and Post-Command Scripts](https://github.com/gilbertchen/duplicacy/wiki/Pre-Command-and-Post-Command-Scripts)
|
||||||
276
Gopkg.lock
generated
Normal file
276
Gopkg.lock
generated
Normal file
@@ -0,0 +1,276 @@
|
|||||||
|
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||||
|
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "cloud.google.com/go"
|
||||||
|
packages = ["compute/metadata","iam","internal","internal/optional","internal/version","storage"]
|
||||||
|
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
|
||||||
|
version = "v0.16.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/Azure/go-autorest"
|
||||||
|
packages = ["autorest","autorest/adal","autorest/azure","autorest/date","logger","version"]
|
||||||
|
revision = "9bc4033dd347c7f416fca46b2f42a043dc1fbdf6"
|
||||||
|
version = "v10.15.5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/aryann/difflib"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "e206f873d14a916d3d26c40ab667bca123f365a3"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/aws/aws-sdk-go"
|
||||||
|
packages = ["aws","aws/arn","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/processcreds","aws/credentials/stscreds","aws/csm","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/context","internal/ini","internal/s3err","internal/sdkio","internal/sdkmath","internal/sdkrand","internal/sdkuri","internal/shareddefaults","internal/strings","internal/sync/singleflight","private/protocol","private/protocol/eventstream","private/protocol/eventstream/eventstreamapi","private/protocol/json/jsonutil","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/internal/arn","service/sts","service/sts/stsiface"]
|
||||||
|
revision = "851d5ffb66720c2540cc68020d4d8708950686c8"
|
||||||
|
version = "v1.30.7"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/bkaradzic/go-lz4"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "74ddf82598bc4745b965729e9c6a463bedd33049"
|
||||||
|
version = "v1.0.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/dgrijalva/jwt-go"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||||
|
version = "v3.2.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/gilbertchen/azure-sdk-for-go"
|
||||||
|
packages = ["storage","version"]
|
||||||
|
revision = "8fd4663cab7c7c1c46d00449291c92ad23b0d0d9"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/gilbertchen/cli"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "1de0a1836ce9c3ae1bf737a0869c4f04f28a7f98"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/gilbertchen/go-dropbox"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "2233fa1dd846b3a3e8060b6c1ea12883deb9d288"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/gilbertchen/go-ole"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "0e87ea779d9deb219633b828a023b32e1244dd57"
|
||||||
|
version = "v1.2.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/gilbertchen/go.dbus"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "8591994fa32f1dbe3fa9486bc6f4d4361ac16649"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/gilbertchen/goamz"
|
||||||
|
packages = ["aws","s3"]
|
||||||
|
revision = "eada9f4e8cc2a45db775dee08a2c37597ce4760a"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/gilbertchen/gopass"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/gilbertchen/keyring"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "8855f5632086e51468cd7ce91056f8da69687ef6"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/gilbertchen/xattr"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "68e7a6806b0137a396d7d05601d7403ae1abac58"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/golang/groupcache"
|
||||||
|
packages = ["lru"]
|
||||||
|
revision = "8c9f03a8e57eb486e42badaed3fb287da51807ba"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/golang/protobuf"
|
||||||
|
packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
|
||||||
|
revision = "84668698ea25b64748563aa20726db66a6b8d299"
|
||||||
|
version = "v1.3.5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/googleapis/gax-go"
|
||||||
|
packages = [".","v2"]
|
||||||
|
revision = "c8a15bac9b9fe955bd9f900272f9a306465d28cf"
|
||||||
|
version = "v2.0.3"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/jmespath/go-jmespath"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "c2b33e84"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/klauspost/cpuid"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "750c0591dbbd50ef88371c665ad49e426a4b830b"
|
||||||
|
version = "v1.3.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/klauspost/reedsolomon"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "7daa20bf74337a939c54f892a2eca9d9b578eb7f"
|
||||||
|
version = "v1.9.9"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/kr/fs"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
|
||||||
|
version = "v0.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/marstr/guid"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
|
||||||
|
version = "v1.1.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/minio/blake2b-simd"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/minio/highwayhash"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "86a2a969d04373bf05ca722517d30fb1c9a3e4f9"
|
||||||
|
version = "v1.0.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/mmcloughlin/avo"
|
||||||
|
packages = ["attr","build","buildtags","gotypes","internal/prnt","internal/stack","ir","operand","pass","printer","reg","src","x86"]
|
||||||
|
revision = "443f81d771042b019379ae4bfcd0a591cb47c88a"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/ncw/swift"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "3e1a09f21340e4828e7265aa89f4dc1495fa7ccc"
|
||||||
|
version = "v1.0.50"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/pkg/errors"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "614d223910a179a466c1767a985424175c39b465"
|
||||||
|
version = "v0.9.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/pkg/sftp"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "5616182052227b951e76d9c9b79a616c608bd91b"
|
||||||
|
version = "v1.11.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/pkg/xattr"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "dd870b5cfebab49617ea0c1da6176474e8a52bf4"
|
||||||
|
version = "v0.4.1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/satori/go.uuid"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||||
|
version = "v1.2.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/vaughan0/go-ini"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "go.opencensus.io"
|
||||||
|
packages = [".","internal","internal/tagencoding","metric/metricdata","metric/metricproducer","plugin/ochttp","plugin/ochttp/propagation/b3","resource","stats","stats/internal","stats/view","tag","trace","trace/internal","trace/propagation","trace/tracestate"]
|
||||||
|
revision = "d835ff86be02193d324330acdb7d65546b05f814"
|
||||||
|
version = "v0.22.3"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/crypto"
|
||||||
|
packages = ["blowfish","chacha20","curve25519","ed25519","ed25519/internal/edwards25519","internal/subtle","pbkdf2","poly1305","ssh","ssh/agent","ssh/internal/bcrypt_pbkdf","ssh/terminal"]
|
||||||
|
revision = "056763e48d71961566155f089ac0f02f1dda9b5a"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "golang.org/x/mod"
|
||||||
|
packages = ["semver"]
|
||||||
|
revision = "859b3ef565e237f9f1a0fb6b55385c497545680d"
|
||||||
|
version = "v0.3.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
packages = ["context","context/ctxhttp","http/httpguts","http2","http2/hpack","idna","internal/timeseries","trace"]
|
||||||
|
revision = "d3edc9973b7eb1fb302b0ff2c62357091cea9a30"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "golang.org/x/oauth2"
|
||||||
|
packages = [".","google","internal","jws","jwt"]
|
||||||
|
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/sys"
|
||||||
|
packages = ["cpu","unix","windows"]
|
||||||
|
revision = "59c9f1ba88faf592b225274f69c5ef1e4ebacf82"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "golang.org/x/text"
|
||||||
|
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/language","internal/language/compact","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||||
|
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
|
||||||
|
version = "v0.3.2"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/tools"
|
||||||
|
packages = ["go/ast/astutil","go/gcexportdata","go/internal/gcimporter","go/internal/packagesdriver","go/packages","go/types/typeutil","internal/event","internal/event/core","internal/event/keys","internal/event/label","internal/gocommand","internal/packagesinternal","internal/typesinternal"]
|
||||||
|
revision = "5d1fdd8fa3469142b9369713b23d8413d6d83189"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/xerrors"
|
||||||
|
packages = [".","internal"]
|
||||||
|
revision = "5ec99f83aff198f5fbd629d6c8d8eb38a04218ca"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "google.golang.org/api"
|
||||||
|
packages = ["drive/v3","googleapi","googleapi/transport","internal","internal/gensupport","internal/third_party/uritemplates","iterator","option","option/internaloption","storage/v1","transport/cert","transport/http","transport/http/internal/propagation"]
|
||||||
|
revision = "52f0532eadbcc6f6b82d6f5edf66e610d10bfde6"
|
||||||
|
version = "v0.21.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "google.golang.org/appengine"
|
||||||
|
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
|
||||||
|
revision = "971852bfffca25b069c31162ae8f247a3dba083b"
|
||||||
|
version = "v1.6.5"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "google.golang.org/genproto"
|
||||||
|
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status","googleapis/type/expr"]
|
||||||
|
revision = "baae70f3302d3efdff74db41e48a5d476d036906"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "google.golang.org/grpc"
|
||||||
|
packages = [".","attributes","backoff","balancer","balancer/base","balancer/roundrobin","binarylog/grpc_binarylog_v1","codes","connectivity","credentials","credentials/internal","encoding","encoding/proto","grpclog","internal","internal/backoff","internal/balancerload","internal/binarylog","internal/buffer","internal/channelz","internal/envconfig","internal/grpclog","internal/grpcrand","internal/grpcsync","internal/grpcutil","internal/resolver/dns","internal/resolver/passthrough","internal/syscall","internal/transport","keepalive","metadata","naming","peer","resolver","serviceconfig","stats","status","tap"]
|
||||||
|
revision = "ac54eec90516cee50fc6b9b113b34628a85f976f"
|
||||||
|
version = "v1.28.1"
|
||||||
|
|
||||||
|
[solve-meta]
|
||||||
|
analyzer-name = "dep"
|
||||||
|
analyzer-version = 1
|
||||||
|
inputs-digest = "e46f7c2dac527af6d5a0d47f1444421a6738d28252eb5d6084fc1c65f2b41bd8"
|
||||||
|
solver-name = "gps-cdcl"
|
||||||
|
solver-version = 1
|
||||||
98
Gopkg.toml
Normal file
98
Gopkg.toml
Normal file
@@ -0,0 +1,98 @@
|
|||||||
|
|
||||||
|
# Gopkg.toml example
|
||||||
|
#
|
||||||
|
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||||
|
# for detailed Gopkg.toml documentation.
|
||||||
|
#
|
||||||
|
# required = ["github.com/user/thing/cmd/thing"]
|
||||||
|
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||||
|
#
|
||||||
|
# [[constraint]]
|
||||||
|
# name = "github.com/user/project"
|
||||||
|
# version = "1.0.0"
|
||||||
|
#
|
||||||
|
# [[constraint]]
|
||||||
|
# name = "github.com/user/project2"
|
||||||
|
# branch = "dev"
|
||||||
|
# source = "github.com/myfork/project2"
|
||||||
|
#
|
||||||
|
# [[override]]
|
||||||
|
# name = "github.com/x/y"
|
||||||
|
# version = "2.4.0"
|
||||||
|
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "cloud.google.com/go"
|
||||||
|
version = "0.16.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/aryann/difflib"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/aws/aws-sdk-go"
|
||||||
|
version = "1.30.7"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/bkaradzic/go-lz4"
|
||||||
|
version = "1.0.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/gilbertchen/azure-sdk-for-go"
|
||||||
|
branch = "master"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/gilbertchen/cli"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/gilbertchen/go-dropbox"
|
||||||
|
revision = "2233fa1dd846b3a3e8060b6c1ea12883deb9d288"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/gilbertchen/go-ole"
|
||||||
|
version = "1.2.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/gilbertchen/goamz"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/gilbertchen/gopass"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/gilbertchen/keyring"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/gilbertchen/xattr"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/minio/blake2b-simd"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "github.com/pkg/sftp"
|
||||||
|
version = "1.10.1"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/crypto"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
branch = "master"
|
||||||
|
name = "golang.org/x/net"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "golang.org/x/oauth2"
|
||||||
|
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "google.golang.org/api"
|
||||||
|
version = "0.21.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "google.golang.org/grpc"
|
||||||
|
version = "1.28.0"
|
||||||
7
LICENSE.md
Normal file
7
LICENSE.md
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
Copyright © 2017 Acrosync LLC
|
||||||
|
|
||||||
|
* Free for personal use or commercial trial
|
||||||
|
* Non-trial commercial use requires per-computer CLI licenses available from [duplicacy.com](https://duplicacy.com/buy.html) at a cost of $50 per year
|
||||||
|
* The computer with a valid commercial license for the GUI version may run the CLI version without a CLI license
|
||||||
|
* CLI licenses are not required to restore or manage backups; only the backup command requires valid CLI licenses
|
||||||
|
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
||||||
111
README.md
111
README.md
@@ -1,43 +1,98 @@
|
|||||||
# Duplicacy: A new generation cloud backup tool based on Lock-Free Deduplication
|
# Duplicacy: A lock-free deduplication cloud backup tool
|
||||||
|
|
||||||
Duplicacy supports major cloud storage providers (Amazon S3, Googld Cloud Storage, Microsoft Azure, Dropbox, and BackBlaze) and at the same time offers all essential features of a modern backup tool:
|
Duplicacy is a new generation cross-platform cloud backup tool based on the idea of [Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy/wiki/Lock-Free-Deduplication).
|
||||||
|
|
||||||
* Incremental backup: only back up what has been changed
|
This repository hosts source code, design documents, and binary releases of the command line version of Duplicacy. There is also a Web GUI frontend built for Windows, macOS, and Linux, available from https://duplicacy.com.
|
||||||
* Full snapshot : although each backup is incremental, it must appear to be a full snapshot independent of others
|
|
||||||
* Deduplication: identical files must be stored as one copy (file-level deduplication), and identical parts from different files must be stored as one copy (block-level deduplication)
|
|
||||||
* Encryption: encrypt not only file contents but also file paths, sizes, times, etc.
|
|
||||||
* Deletion: every backup can be deleted independently without affecting others
|
|
||||||
* Concurrent access: multiple clients can back up to the same storage at the same time
|
|
||||||
|
|
||||||
The key idea behind Duplicacy is a technique called **Lock-Free Deduplication**. There are three elements of lock-free deduplication:
|
There is a special edition of Duplicacy developed for VMware vSphere (ESXi) named [Vertical Backup](https://www.verticalbackup.com) that can back up virtual machine files on ESXi to local drives, network or cloud storages.
|
||||||
|
|
||||||
|
## Features
|
||||||
|
|
||||||
|
There are 3 core advantages of Duplicacy over any other open-source or commercial backup tools:
|
||||||
|
|
||||||
|
* Duplicacy is the *only* cloud backup tool that allows multiple computers to back up to the same cloud storage, taking advantage of cross-computer deduplication whenever possible, without direct communication among them. This feature turns any cloud storage server supporting only a basic set of file operations into a sophisticated deduplication-aware server.
|
||||||
|
|
||||||
|
* Unlike other chunk-based backup tools where chunks are grouped into pack files and a chunk database is used to track which chunks are stored inside each pack file, Duplicacy takes a database-less approach where every chunk is saved independently using its hash as the file name to facilitate quick lookups. The avoidance of a centralized chunk database not only produces a simpler and less error-prone implementation, but also makes it easier to develop advanced features, such as [Asymmetric Encryption](https://github.com/gilbertchen/duplicacy/wiki/RSA-encryption) for stronger encryption and [Erasure Coding](https://github.com/gilbertchen/duplicacy/wiki/Erasure-coding) for resilient data protection.
|
||||||
|
|
||||||
|
* Duplicacy is fast. While the performance wasn't the top-priority design goal, Duplicacy has been shown to outperform other backup tools by a considerable margin, as indicated by the following results obtained from a [benchmarking experiment](https://github.com/gilbertchen/benchmarking) backing up the [Linux code base](https://github.com/torvalds/linux) using Duplicacy and 3 other open-source backup tools.
|
||||||
|
|
||||||
|
[](https://github.com/gilbertchen/benchmarking)
|
||||||
|
|
||||||
* Use variable-size chunking algorithm to split files into chunks
|
|
||||||
* Store each chunk in the storage using a file name derived from its hash, and rely on the file system API to manage chunks without using a centralized indexing database
|
|
||||||
* Apply a *two-step fossil collection* algorithm to remove chunks that become unreferenced after a backup is deleted
|
|
||||||
|
|
||||||
## Getting Started
|
## Getting Started
|
||||||
|
|
||||||
```sh
|
* [A brief introduction](https://github.com/gilbertchen/duplicacy/wiki/Quick-Start)
|
||||||
$ cd path/to/your/dir
|
* [Command references](https://github.com/gilbertchen/duplicacy/wiki)
|
||||||
$ duplicacy init mywork sftp://192.168.1.100/Duplicacy
|
* [Building from source](https://github.com/gilbertchen/duplicacy/wiki/Installation)
|
||||||
```
|
|
||||||
|
|
||||||
```sh
|
## Storages
|
||||||
$ duplicacy backup
|
|
||||||
```
|
|
||||||
|
|
||||||
```sh
|
Duplicacy currently provides the following storage backends:
|
||||||
$ duplicacy list
|
|
||||||
```
|
|
||||||
|
|
||||||
```sh
|
* Local disk
|
||||||
$ duplicacy add s3 mywork s3://amazon.com/duplicacy/mywork
|
* SFTP
|
||||||
```
|
* Dropbox
|
||||||
|
* Amazon S3
|
||||||
|
* Wasabi
|
||||||
|
* DigitalOcean Spaces
|
||||||
|
* Google Cloud Storage
|
||||||
|
* Microsoft Azure
|
||||||
|
* Backblaze B2
|
||||||
|
* Google Drive
|
||||||
|
* Microsoft OneDrive
|
||||||
|
* Hubic
|
||||||
|
* OpenStack Swift
|
||||||
|
* WebDAV (under beta testing)
|
||||||
|
* pcloud (via WebDAV)
|
||||||
|
* Box.com (via WebDAV)
|
||||||
|
* File Fabric by [Storage Made Easy](https://storagemadeeasy.com/)
|
||||||
|
|
||||||
```sh
|
Please consult the [wiki page](https://github.com/gilbertchen/duplicacy/wiki/Storage-Backends) on how to set up Duplicacy to work with each cloud storage.
|
||||||
$ duplicacy copy -r 1-2 -to s3
|
|
||||||
```
|
For reference, the following chart shows the running times (in seconds) of backing up the [Linux code base](https://github.com/torvalds/linux) to each of those supported storages:
|
||||||
|
|
||||||
|
|
||||||
|
[](https://github.com/gilbertchen/cloud-storage-comparison)
|
||||||
|
|
||||||
|
|
||||||
|
For complete benchmark results please visit https://github.com/gilbertchen/cloud-storage-comparison.
|
||||||
|
|
||||||
|
## Comparison with Other Backup Tools
|
||||||
|
|
||||||
|
[duplicity](http://duplicity.nongnu.org) works by applying the rsync algorithm (or more specific, the [librsync](https://github.com/librsync/librsync) library)
|
||||||
|
to find the differences from previous backups and only then uploading the differences. It is the only existing backup tool with extensive cloud support -- the [long list](http://duplicity.nongnu.org/duplicity.1.html#sect7) of storage backends covers almost every cloud provider one can think of. However, duplicity's biggest flaw lies in its incremental model -- a chain of dependent backups starts with a full backup followed by a number of incremental ones, and ends when another full backup is uploaded. Deleting one backup will render useless all the subsequent backups on the same chain. Periodic full backups are required, in order to make previous backups disposable.
|
||||||
|
|
||||||
|
[bup](https://github.com/bup/bup) also uses librsync to split files into chunks but save chunks in the git packfile format. It doesn't support any cloud storage, or deletion of old backups.
|
||||||
|
|
||||||
|
[Duplicati](https://duplicati.com) is one of the first backup tools that adopt the chunk-based approach to split files into chunks which are then uploaded to the storage. The chunk-based approach got the incremental backup model right in the sense that every incremental backup is actually a full snapshot. As Duplicati splits files into fixed-size chunks, deletions or insertions of a few bytes will foil the deduplication. Cloud support is extensive, but multiple clients can't back up to the same storage location.
|
||||||
|
|
||||||
|
[Attic](https://attic-backup.org) has been acclaimed by some as the [Holy Grail of backups](https://www.stavros.io/posts/holy-grail-backups). It follows the same incremental backup model like Duplicati but embraces the variable-size chunk algorithm for better performance and higher deduplication efficiency (not susceptible to byte insertion and deletion any more). Deletions of old backup are also supported. However, no cloud backends are implemented. Although concurrent backups from multiple clients to the same storage is in theory possible by the use of locking, it is
|
||||||
|
[not recommended](http://librelist.com/browser//attic/2014/11/11/backing-up-multiple-servers-into-a-single-repository/#e96345aa5a3469a87786675d65da492b) by the developer due to chunk indices being kept in a local cache.
|
||||||
|
Concurrent access is not only a convenience; it is a necessity for better deduplication. For instance, if multiple machines with the same OS installed can back up their entire drives to the same storage, only one copy of the system files needs to be stored, greatly reducing the storage space regardless of the number of machines. Attic still adopts the traditional approach of using a centralized indexing database to manage chunks and relies heavily on caching to improve performance. The presence of exclusive locking makes it hard to be extended to cloud storages.
|
||||||
|
|
||||||
|
[restic](https://restic.github.io) is a more recent addition. It uses a format similar to the git packfile format. Multiple clients backing up to the same storage are still guarded by
|
||||||
|
[locks](https://github.com/restic/restic/blob/master/doc/Design.md#locks), and because a chunk database is used, deduplication isn't real-time (different clients sharing the same files will upload different copies of the same chunks). A prune operation will completely block all other clients connected to the storage from doing their regular backups. Moreover, since most cloud storage services do not provide a locking service, the best effort is to use some basic file operations to simulate a lock, but distributed locking is known to be a hard problem and it is unclear how reliable restic's lock implementation is. A faulty implementation may cause a prune operation to accidentally delete data still in use, resulting in unrecoverable data loss. This is the exact problem that we avoided by taking the lock-free approach.
|
||||||
|
|
||||||
|
|
||||||
|
The following table compares the feature lists of all these backup tools:
|
||||||
|
|
||||||
|
|
||||||
|
| Feature/Tool | duplicity | bup | Duplicati | Attic | restic | **Duplicacy** |
|
||||||
|
|:------------------:|:---------:|:---:|:-----------------:|:---------------:|:-----------------:|:-------------:|
|
||||||
|
| Incremental Backup | Yes | Yes | Yes | Yes | Yes | **Yes** |
|
||||||
|
| Full Snapshot | No | Yes | Yes | Yes | Yes | **Yes** |
|
||||||
|
| Compression | Yes | Yes | Yes | Yes | No | **Yes** |
|
||||||
|
| Deduplication | Weak | Yes | Weak | Yes | Yes | **Yes** |
|
||||||
|
| Encryption | Yes | Yes | Yes | Yes | Yes | **Yes** |
|
||||||
|
| Deletion | No | No | Yes | Yes | No | **Yes** |
|
||||||
|
| Concurrent Access | No | No | No | Not recommended | Exclusive locking | **Lock-free** |
|
||||||
|
| Cloud Support | Extensive | No | Extensive | No | Limited | **Extensive** |
|
||||||
|
| Snapshot Migration | No | No | No | No | No | **Yes** |
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
* Free for personal use or commercial trial
|
||||||
|
* Non-trial commercial use requires per-computer CLI licenses available from [duplicacy.com](https://duplicacy.com/buy.html) at a cost of $50 per year
|
||||||
|
* The computer with a valid commercial license for the GUI version may run the CLI version without a CLI license
|
||||||
|
* CLI licenses are not required to restore or manage backups; only the backup command requires valid CLI licenses
|
||||||
|
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
||||||
|
|||||||
2206
duplicacy/duplicacy_main.go
Normal file
2206
duplicacy/duplicacy_main.go
Normal file
File diff suppressed because it is too large
Load Diff
BIN
images/duplicacy_benchmark_cloud.png
Normal file
BIN
images/duplicacy_benchmark_cloud.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 60 KiB |
BIN
images/duplicacy_benchmark_speed.png
Normal file
BIN
images/duplicacy_benchmark_speed.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 56 KiB |
BIN
images/duplicacy_encryption.png
Normal file
BIN
images/duplicacy_encryption.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 29 KiB |
BIN
images/fossil_collection_1.png
Normal file
BIN
images/fossil_collection_1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 29 KiB |
BIN
images/fossil_collection_2.png
Normal file
BIN
images/fossil_collection_2.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 28 KiB |
31
integration_tests/copy_test.sh
Executable file
31
integration_tests/copy_test.sh
Executable file
@@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
|
||||||
|
. ./test_functions.sh
|
||||||
|
|
||||||
|
fixture
|
||||||
|
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} init integration-tests $TEST_STORAGE -c 1k
|
||||||
|
${DUPLICACY} add -copy default secondary integration-tests $SECONDARY_STORAGE
|
||||||
|
add_file file1
|
||||||
|
add_file file2
|
||||||
|
${DUPLICACY} backup
|
||||||
|
${DUPLICACY} copy -from default -to secondary
|
||||||
|
add_file file3
|
||||||
|
add_file file4
|
||||||
|
${DUPLICACY} backup
|
||||||
|
${DUPLICACY} copy -from default -to secondary
|
||||||
|
${DUPLICACY} check --files -stats -storage default
|
||||||
|
${DUPLICACY} check --files -stats -storage secondary
|
||||||
|
# Prune revisions from default storage
|
||||||
|
${DUPLICACY} -d -v -log prune -r 1-2 -exclusive -exhaustive -storage default
|
||||||
|
# Copy snapshot revisions from secondary back to default
|
||||||
|
${DUPLICACY} copy -from secondary -to default
|
||||||
|
# Check snapshot revisions again to make sure we're ok!
|
||||||
|
${DUPLICACY} check --files -stats -storage default
|
||||||
|
${DUPLICACY} check --files -stats -storage secondary
|
||||||
|
# Check for orphaned or missing chunks
|
||||||
|
${DUPLICACY} prune -exhaustive -exclusive -storage default
|
||||||
|
${DUPLICACY} prune -exhaustive -exclusive -storage secondary
|
||||||
|
popd
|
||||||
18
integration_tests/fixed_test.sh
Executable file
18
integration_tests/fixed_test.sh
Executable file
@@ -0,0 +1,18 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Sanity test for the fixed-size chunking algorithm
|
||||||
|
|
||||||
|
. ./test_functions.sh
|
||||||
|
|
||||||
|
fixture
|
||||||
|
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} init integration-tests $TEST_STORAGE -c 64 -max 64 -min 64
|
||||||
|
|
||||||
|
add_file file3
|
||||||
|
add_file file4
|
||||||
|
|
||||||
|
|
||||||
|
${DUPLICACY} backup
|
||||||
|
${DUPLICACY} check --files -stats
|
||||||
|
popd
|
||||||
38
integration_tests/resume_test.sh
Executable file
38
integration_tests/resume_test.sh
Executable file
@@ -0,0 +1,38 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
|
||||||
|
. ./test_functions.sh
|
||||||
|
|
||||||
|
fixture
|
||||||
|
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} init integration-tests $TEST_STORAGE -c 4
|
||||||
|
|
||||||
|
# Create 10 small files
|
||||||
|
add_file file1 20
|
||||||
|
add_file file2 20
|
||||||
|
rm file3; touch file3
|
||||||
|
add_file file4 20
|
||||||
|
chmod u-r file4
|
||||||
|
add_file file5 20
|
||||||
|
add_file file6 20
|
||||||
|
add_file file7 20
|
||||||
|
add_file file8 20
|
||||||
|
add_file file9 20
|
||||||
|
add_file file10 20
|
||||||
|
|
||||||
|
# Fail at the 10th chunk
|
||||||
|
env DUPLICACY_FAIL_CHUNK=10 ${DUPLICACY} backup
|
||||||
|
|
||||||
|
# Try it again to test the multiple-resume case
|
||||||
|
env DUPLICACY_FAIL_CHUNK=5 ${DUPLICACY} backup
|
||||||
|
add_file file1 20
|
||||||
|
add_file file2 20
|
||||||
|
|
||||||
|
# Fail the backup before uploading the snapshot
|
||||||
|
env DUPLICACY_FAIL_SNAPSHOT=true ${DUPLICACY} backup
|
||||||
|
|
||||||
|
# Now complete the backup
|
||||||
|
${DUPLICACY} backup
|
||||||
|
${DUPLICACY} check --files
|
||||||
|
popd
|
||||||
28
integration_tests/sparse_test.sh
Executable file
28
integration_tests/sparse_test.sh
Executable file
@@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Testing backup and restore of sparse files
|
||||||
|
|
||||||
|
. ./test_functions.sh
|
||||||
|
|
||||||
|
fixture
|
||||||
|
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} init integration-tests $TEST_STORAGE -c 1m
|
||||||
|
|
||||||
|
for i in `seq 1 10`; do
|
||||||
|
dd if=/dev/urandom of=file3 bs=1000 count=1000 seek=$((100000 * $i))
|
||||||
|
done
|
||||||
|
|
||||||
|
ls -lsh file3
|
||||||
|
|
||||||
|
${DUPLICACY} backup
|
||||||
|
${DUPLICACY} check --files -stats
|
||||||
|
|
||||||
|
rm file1 file3
|
||||||
|
|
||||||
|
${DUPLICACY} restore -r 1
|
||||||
|
${DUPLICACY} -v restore -r 1 -overwrite -stats -hash
|
||||||
|
|
||||||
|
ls -lsh file3
|
||||||
|
|
||||||
|
popd
|
||||||
18
integration_tests/test.sh
Executable file
18
integration_tests/test.sh
Executable file
@@ -0,0 +1,18 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
|
||||||
|
. ./test_functions.sh
|
||||||
|
|
||||||
|
fixture
|
||||||
|
init_repo_pref_dir
|
||||||
|
|
||||||
|
backup
|
||||||
|
add_file file3
|
||||||
|
backup
|
||||||
|
add_file file4
|
||||||
|
chmod u-r ${TEST_REPO}/file4
|
||||||
|
backup
|
||||||
|
add_file file5
|
||||||
|
restore
|
||||||
|
check
|
||||||
|
|
||||||
123
integration_tests/test_functions.sh
Normal file
123
integration_tests/test_functions.sh
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
get_abs_filename() {
|
||||||
|
# $1 : relative filename
|
||||||
|
echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")"
|
||||||
|
}
|
||||||
|
|
||||||
|
pushd () {
|
||||||
|
command pushd "$@" > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
popd () {
|
||||||
|
command popd "$@" > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Functions used to create integration tests suite
|
||||||
|
|
||||||
|
DUPLICACY=$(get_abs_filename ../duplicacy_main)
|
||||||
|
|
||||||
|
# Base directory where test repositories will be created
|
||||||
|
TEST_ZONE=$HOME/DUPLICACY_TEST_ZONE
|
||||||
|
# Test Repository
|
||||||
|
TEST_REPO=$TEST_ZONE/TEST_REPO
|
||||||
|
|
||||||
|
# Storage for test ( For now, only local path storage is supported by test suite)
|
||||||
|
TEST_STORAGE=$TEST_ZONE/TEST_STORAGE
|
||||||
|
|
||||||
|
# Extra storage for copy operation
|
||||||
|
SECONDARY_STORAGE=$TEST_ZONE/SECONDARY_STORAGE
|
||||||
|
|
||||||
|
# Preference directory ( for testing the -pref-dir option)
|
||||||
|
DUPLICACY_PREF_DIR=$TEST_ZONE/TEST_DUPLICACY_PREF_DIR
|
||||||
|
|
||||||
|
# Scratch pad for testing restore
|
||||||
|
TEST_RESTORE_POINT=$TEST_ZONE/RESTORE_POINT
|
||||||
|
|
||||||
|
# Make sure $TEST_ZONE is in know state
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
function fixture()
|
||||||
|
{
|
||||||
|
# clean TEST_RESTORE_POINT
|
||||||
|
rm -rf $TEST_RESTORE_POINT
|
||||||
|
mkdir -p $TEST_RESTORE_POINT
|
||||||
|
|
||||||
|
# clean TEST_STORAGE
|
||||||
|
rm -rf $TEST_STORAGE
|
||||||
|
mkdir -p $TEST_STORAGE
|
||||||
|
|
||||||
|
# clean SECONDARY_STORAGE
|
||||||
|
rm -rf $SECONDARY_STORAGE
|
||||||
|
mkdir -p $SECONDARY_STORAGE
|
||||||
|
|
||||||
|
|
||||||
|
# clean TEST_DOT_DUPLICACY
|
||||||
|
rm -rf $DUPLICACY_PREF_DIR
|
||||||
|
mkdir -p $DUPLICACY_PREF_DIR
|
||||||
|
|
||||||
|
# Create test repository
|
||||||
|
rm -rf ${TEST_REPO}
|
||||||
|
mkdir -p ${TEST_REPO}
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
echo "file1" > file1
|
||||||
|
mkdir dir1
|
||||||
|
echo "file2" > dir1/file2
|
||||||
|
popd
|
||||||
|
}
|
||||||
|
|
||||||
|
function init_repo()
|
||||||
|
{
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} init integration-tests $TEST_STORAGE
|
||||||
|
${DUPLICACY} add -copy default secondary integration-tests $SECONDARY_STORAGE
|
||||||
|
${DUPLICACY} backup
|
||||||
|
popd
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
function init_repo_pref_dir()
|
||||||
|
{
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} init -pref-dir "${DUPLICACY_PREF_DIR}" integration-tests ${TEST_STORAGE}
|
||||||
|
${DUPLICACY} add -copy default secondary integration-tests $SECONDARY_STORAGE
|
||||||
|
${DUPLICACY} backup
|
||||||
|
popd
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
function add_file()
|
||||||
|
{
|
||||||
|
FILE_NAME=$1
|
||||||
|
FILE_SIZE=${2:-20000000}
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
dd if=/dev/urandom of=${FILE_NAME} bs=1 count=$(($RANDOM % ${FILE_SIZE})) &> /dev/null
|
||||||
|
popd
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function backup()
|
||||||
|
{
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} backup
|
||||||
|
${DUPLICACY} copy -from default -to secondary
|
||||||
|
popd
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function restore()
|
||||||
|
{
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} restore -r 2 -delete
|
||||||
|
popd
|
||||||
|
}
|
||||||
|
|
||||||
|
function check()
|
||||||
|
{
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} check -files
|
||||||
|
${DUPLICACY} check -storage secondary -files
|
||||||
|
popd
|
||||||
|
}
|
||||||
17
integration_tests/threaded_test.sh
Executable file
17
integration_tests/threaded_test.sh
Executable file
@@ -0,0 +1,17 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
|
||||||
|
. ./test_functions.sh
|
||||||
|
|
||||||
|
fixture
|
||||||
|
|
||||||
|
pushd ${TEST_REPO}
|
||||||
|
${DUPLICACY} init integration-tests $TEST_STORAGE -c 1k
|
||||||
|
|
||||||
|
add_file file3
|
||||||
|
add_file file4
|
||||||
|
|
||||||
|
|
||||||
|
${DUPLICACY} backup -threads 16
|
||||||
|
${DUPLICACY} check --files -stats
|
||||||
|
popd
|
||||||
454
src/duplicacy_acdclient.go
Normal file
454
src/duplicacy_acdclient.go
Normal file
@@ -0,0 +1,454 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ACDError struct {
|
||||||
|
Status int
|
||||||
|
Message string `json:"message"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err ACDError) Error() string {
|
||||||
|
return fmt.Sprintf("%d %s", err.Status, err.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
var ACDRefreshTokenURL = "https://duplicacy.com/acd_refresh"
|
||||||
|
|
||||||
|
type ACDClient struct {
|
||||||
|
HTTPClient *http.Client
|
||||||
|
|
||||||
|
TokenFile string
|
||||||
|
Token *oauth2.Token
|
||||||
|
TokenLock *sync.Mutex
|
||||||
|
|
||||||
|
ContentURL string
|
||||||
|
MetadataURL string
|
||||||
|
|
||||||
|
TestMode bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewACDClient(tokenFile string) (*ACDClient, error) {
|
||||||
|
|
||||||
|
description, err := ioutil.ReadFile(tokenFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
token := new(oauth2.Token)
|
||||||
|
if err := json.Unmarshal(description, token); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &ACDClient{
|
||||||
|
HTTPClient: http.DefaultClient,
|
||||||
|
TokenFile: tokenFile,
|
||||||
|
Token: token,
|
||||||
|
TokenLock: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
|
||||||
|
client.GetEndpoint()
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *ACDClient) call(url string, method string, input interface{}, contentType string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
|
//LOG_DEBUG("ACD_CALL", "%s %s", method, url)
|
||||||
|
|
||||||
|
var response *http.Response
|
||||||
|
|
||||||
|
backoff := 1
|
||||||
|
for i := 0; i < 8; i++ {
|
||||||
|
var inputReader io.Reader
|
||||||
|
|
||||||
|
switch input.(type) {
|
||||||
|
default:
|
||||||
|
jsonInput, err := json.Marshal(input)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
inputReader = bytes.NewReader(jsonInput)
|
||||||
|
case []byte:
|
||||||
|
inputReader = bytes.NewReader(input.([]byte))
|
||||||
|
case int:
|
||||||
|
inputReader = bytes.NewReader([]byte(""))
|
||||||
|
case *bytes.Buffer:
|
||||||
|
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
|
||||||
|
case *RateLimitedReader:
|
||||||
|
input.(*RateLimitedReader).Reset()
|
||||||
|
inputReader = input.(*RateLimitedReader)
|
||||||
|
}
|
||||||
|
|
||||||
|
request, err := http.NewRequest(method, url, inputReader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if reader, ok := inputReader.(*RateLimitedReader); ok {
|
||||||
|
request.ContentLength = reader.Length()
|
||||||
|
}
|
||||||
|
|
||||||
|
if url != ACDRefreshTokenURL {
|
||||||
|
client.TokenLock.Lock()
|
||||||
|
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
|
||||||
|
client.TokenLock.Unlock()
|
||||||
|
}
|
||||||
|
if contentType != "" {
|
||||||
|
request.Header.Set("Content-Type", contentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err = client.HTTPClient.Do(request)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode < 400 {
|
||||||
|
return response.Body, response.ContentLength, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode == 404 {
|
||||||
|
buffer := new(bytes.Buffer)
|
||||||
|
buffer.ReadFrom(response.Body)
|
||||||
|
response.Body.Close()
|
||||||
|
return nil, 0, ACDError{Status: response.StatusCode, Message: buffer.String()}
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode == 400 {
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
e := &ACDError{
|
||||||
|
Status: response.StatusCode,
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(response.Body).Decode(e); err == nil {
|
||||||
|
return nil, 0, e
|
||||||
|
} else {
|
||||||
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Bad input parameter"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Body.Close()
|
||||||
|
|
||||||
|
if response.StatusCode == 401 {
|
||||||
|
|
||||||
|
if url == ACDRefreshTokenURL {
|
||||||
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Unauthorized"}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.RefreshToken()
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
continue
|
||||||
|
} else if response.StatusCode == 403 {
|
||||||
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Forbidden"}
|
||||||
|
} else if response.StatusCode == 404 {
|
||||||
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Resource not found"}
|
||||||
|
} else if response.StatusCode == 409 {
|
||||||
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Conflict"}
|
||||||
|
} else if response.StatusCode == 411 {
|
||||||
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Length required"}
|
||||||
|
} else if response.StatusCode == 412 {
|
||||||
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Precondition failed"}
|
||||||
|
} else if response.StatusCode == 429 || response.StatusCode == 500 {
|
||||||
|
reason := "Too many requests"
|
||||||
|
if response.StatusCode == 500 {
|
||||||
|
reason = "Internal server error"
|
||||||
|
}
|
||||||
|
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
||||||
|
LOG_INFO("ACD_RETRY", "%s; retry after %d milliseconds", reason, retryAfter)
|
||||||
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
|
backoff *= 2
|
||||||
|
continue
|
||||||
|
} else if response.StatusCode == 503 {
|
||||||
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Service unavailable"}
|
||||||
|
} else {
|
||||||
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Unknown error"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, 0, fmt.Errorf("Maximum number of retries reached")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *ACDClient) RefreshToken() (err error) {
|
||||||
|
|
||||||
|
client.TokenLock.Lock()
|
||||||
|
defer client.TokenLock.Unlock()
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(ACDRefreshTokenURL, "POST", client.Token, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
description, err := json.Marshal(client.Token)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(client.TokenFile, description, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ACDGetEndpointOutput struct {
|
||||||
|
CustomerExists bool `json:"customerExists"`
|
||||||
|
ContentURL string `json:"contentUrl"`
|
||||||
|
MetadataURL string `json:"metadataUrl"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *ACDClient) GetEndpoint() (err error) {
|
||||||
|
|
||||||
|
readCloser, _, err := client.call("https://drive.amazonaws.com/drive/v1/account/endpoint", "GET", 0, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
output := &ACDGetEndpointOutput{}
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
client.ContentURL = output.ContentURL
|
||||||
|
client.MetadataURL = output.MetadataURL
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ACDEntry struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
ID string `json:"id"`
|
||||||
|
Size int64 `json:"size"`
|
||||||
|
Kind string `json:"kind"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ACDListEntriesOutput struct {
|
||||||
|
Count int `json:"count"`
|
||||||
|
NextToken string `json:"nextToken"`
|
||||||
|
Entries []ACDEntry `json:"data"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *ACDClient) ListEntries(parentID string, listFiles bool, listDirectories bool) ([]ACDEntry, error) {
|
||||||
|
|
||||||
|
startToken := ""
|
||||||
|
|
||||||
|
entries := []ACDEntry{}
|
||||||
|
|
||||||
|
for {
|
||||||
|
|
||||||
|
url := client.MetadataURL + "nodes/" + parentID + "/children?"
|
||||||
|
|
||||||
|
if listFiles && !listDirectories {
|
||||||
|
url += "filters=kind:FILE&"
|
||||||
|
} else if !listFiles && listDirectories {
|
||||||
|
url += "filters=kind:FOLDER&"
|
||||||
|
}
|
||||||
|
|
||||||
|
if startToken != "" {
|
||||||
|
url += "startToken=" + startToken + "&"
|
||||||
|
}
|
||||||
|
|
||||||
|
if client.TestMode {
|
||||||
|
url += "limit=8"
|
||||||
|
} else {
|
||||||
|
url += "limit=200"
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(url, "GET", 0, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
output := &ACDListEntriesOutput{}
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
entries = append(entries, output.Entries...)
|
||||||
|
|
||||||
|
startToken = output.NextToken
|
||||||
|
if startToken == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *ACDClient) ListByName(parentID string, name string) (string, bool, int64, error) {
|
||||||
|
|
||||||
|
url := client.MetadataURL + "nodes"
|
||||||
|
|
||||||
|
if parentID == "" {
|
||||||
|
url += "?filters=Kind:FOLDER+AND+isRoot:true"
|
||||||
|
} else {
|
||||||
|
url += "/" + parentID + "/children?filters=name:" + name
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(url, "GET", 0, "")
|
||||||
|
if err != nil {
|
||||||
|
return "", false, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
output := &ACDListEntriesOutput{}
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return "", false, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(output.Entries) == 0 {
|
||||||
|
return "", false, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return output.Entries[0].ID, output.Entries[0].Kind == "FOLDER", output.Entries[0].Size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *ACDClient) DownloadFile(fileID string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
|
url := client.ContentURL + "nodes/" + fileID + "/content"
|
||||||
|
|
||||||
|
return client.call(url, "GET", 0, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *ACDClient) UploadFile(parentID string, name string, content []byte, rateLimit int) (fileID string, err error) {
|
||||||
|
|
||||||
|
url := client.ContentURL + "nodes?suppress=deduplication"
|
||||||
|
|
||||||
|
body := &bytes.Buffer{}
|
||||||
|
writer := multipart.NewWriter(body)
|
||||||
|
|
||||||
|
metadata := make(map[string]interface{})
|
||||||
|
metadata["name"] = name
|
||||||
|
metadata["kind"] = "FILE"
|
||||||
|
metadata["parents"] = []string{parentID}
|
||||||
|
|
||||||
|
metadataJSON, err := json.Marshal(metadata)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = writer.WriteField("metadata", string(metadataJSON))
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
part, err := writer.CreateFormFile("content", name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = part.Write(content)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
writer.Close()
|
||||||
|
|
||||||
|
var input interface{}
|
||||||
|
input = body
|
||||||
|
if rateLimit > 0 {
|
||||||
|
input = CreateRateLimitedReader(body.Bytes(), rateLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(url, "POST", input, writer.FormDataContentType())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
entry := ACDEntry{}
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&entry); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return entry.ID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *ACDClient) DeleteFile(fileID string) error {
|
||||||
|
|
||||||
|
url := client.MetadataURL + "trash/" + fileID
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(url, "PUT", 0, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *ACDClient) MoveFile(fileID string, fromParentID string, toParentID string) error {
|
||||||
|
|
||||||
|
url := client.MetadataURL + "nodes/" + toParentID + "/children"
|
||||||
|
|
||||||
|
parameters := make(map[string]string)
|
||||||
|
parameters["fromParent"] = fromParentID
|
||||||
|
parameters["childId"] = fileID
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(url, "POST", parameters, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *ACDClient) CreateDirectory(parentID string, name string) (string, error) {
|
||||||
|
|
||||||
|
url := client.MetadataURL + "nodes"
|
||||||
|
|
||||||
|
parameters := make(map[string]interface{})
|
||||||
|
parameters["name"] = name
|
||||||
|
parameters["kind"] = "FOLDER"
|
||||||
|
parameters["parents"] = []string{parentID}
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(url, "POST", parameters, "")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
entry := ACDEntry{}
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&entry); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return entry.ID, nil
|
||||||
|
}
|
||||||
153
src/duplicacy_acdclient_test.go
Normal file
153
src/duplicacy_acdclient_test.go
Normal file
@@ -0,0 +1,153 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
crypto_rand "crypto/rand"
|
||||||
|
"math/rand"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestACDClient(t *testing.T) {
|
||||||
|
|
||||||
|
acdClient, err := NewACDClient("acd-token.json")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create the ACD client: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
acdClient.TestMode = true
|
||||||
|
|
||||||
|
rootID, _, _, err := acdClient.ListByName("", "")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to get the root node: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if rootID == "" {
|
||||||
|
t.Errorf("No root node")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
testID, _, _, err := acdClient.ListByName(rootID, "test")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list the test directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if testID == "" {
|
||||||
|
testID, err = acdClient.CreateDirectory(rootID, "test")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create the test directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test1ID, _, _, err := acdClient.ListByName(testID, "test1")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list the test1 directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if test1ID == "" {
|
||||||
|
test1ID, err = acdClient.CreateDirectory(testID, "test1")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create the test1 directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test2ID, _, _, err := acdClient.ListByName(testID, "test2")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list the test2 directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if test2ID == "" {
|
||||||
|
test2ID, err = acdClient.CreateDirectory(testID, "test2")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create the test2 directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Printf("test1: %s, test2: %s\n", test1ID, test2ID)
|
||||||
|
|
||||||
|
numberOfFiles := 20
|
||||||
|
maxFileSize := 64 * 1024
|
||||||
|
|
||||||
|
for i := 0; i < numberOfFiles; i++ {
|
||||||
|
content := make([]byte, rand.Int()%maxFileSize+1)
|
||||||
|
_, err = crypto_rand.Read(content)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error generating random content: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := sha256.New()
|
||||||
|
hasher.Write(content)
|
||||||
|
filename := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
|
fmt.Printf("file: %s\n", filename)
|
||||||
|
|
||||||
|
_, err = acdClient.UploadFile(test1ID, filename, content, 100)
|
||||||
|
if err != nil {
|
||||||
|
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
|
||||||
|
t.Errorf("Failed to upload the file %s: %v", filename, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err := acdClient.ListEntries(test1ID, true, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error list randomly generated files: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
err = acdClient.MoveFile(entry.ID, test1ID, test2ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to move %s: %v", entry.Name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err = acdClient.ListEntries(test2ID, true, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error list randomly generated files: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
readCloser, _, err := acdClient.DownloadFile(entry.ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error downloading file %s: %v", entry.Name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := sha256.New()
|
||||||
|
io.Copy(hasher, readCloser)
|
||||||
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
|
if hash != entry.Name {
|
||||||
|
t.Errorf("File %s, hash %s", entry.Name, hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
|
||||||
|
err = acdClient.DeleteFile(entry.ID)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
453
src/duplicacy_acdstorage.go
Normal file
453
src/duplicacy_acdstorage.go
Normal file
@@ -0,0 +1,453 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ACDStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
client *ACDClient
|
||||||
|
idCache map[string]string
|
||||||
|
idCacheLock *sync.Mutex
|
||||||
|
numberOfThreads int
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateACDStorage creates an ACD storage object.
|
||||||
|
func CreateACDStorage(tokenFile string, storagePath string, threads int) (storage *ACDStorage, err error) {
|
||||||
|
|
||||||
|
client, err := NewACDClient(tokenFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &ACDStorage{
|
||||||
|
client: client,
|
||||||
|
idCache: make(map[string]string),
|
||||||
|
idCacheLock: &sync.Mutex{},
|
||||||
|
numberOfThreads: threads,
|
||||||
|
}
|
||||||
|
|
||||||
|
storagePathID, err := storage.getIDFromPath(0, storagePath, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set 'storagePath' as the root of the storage and clean up the id cache accordingly
|
||||||
|
storage.idCache = make(map[string]string)
|
||||||
|
storage.idCache[""] = storagePathID
|
||||||
|
|
||||||
|
for _, dir := range []string{"chunks", "fossils", "snapshots"} {
|
||||||
|
dirID, isDir, _, err := client.ListByName(storagePathID, dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if dirID == "" {
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else if !isDir {
|
||||||
|
return nil, fmt.Errorf("%s is not a directory", storagePath+"/"+dir)
|
||||||
|
}
|
||||||
|
storage.idCache[dir] = dirID
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *ACDStorage) getPathID(path string) string {
|
||||||
|
storage.idCacheLock.Lock()
|
||||||
|
pathID := storage.idCache[path]
|
||||||
|
storage.idCacheLock.Unlock()
|
||||||
|
return pathID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *ACDStorage) findPathID(path string) (string, bool) {
|
||||||
|
storage.idCacheLock.Lock()
|
||||||
|
pathID, ok := storage.idCache[path]
|
||||||
|
storage.idCacheLock.Unlock()
|
||||||
|
return pathID, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *ACDStorage) savePathID(path string, pathID string) {
|
||||||
|
storage.idCacheLock.Lock()
|
||||||
|
storage.idCache[path] = pathID
|
||||||
|
storage.idCacheLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *ACDStorage) deletePathID(path string) {
|
||||||
|
storage.idCacheLock.Lock()
|
||||||
|
delete(storage.idCache, path)
|
||||||
|
storage.idCacheLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertFilePath converts the path for a fossil in the form of 'chunks/id.fsl' to 'fossils/id'. This is because
|
||||||
|
// ACD doesn't support file renaming. Instead, it only allows one file to be moved from one directory to another.
|
||||||
|
// By adding a layer of path conversion we're pretending that we can rename between 'chunks/id' and 'chunks/id.fsl'
|
||||||
|
func (storage *ACDStorage) convertFilePath(filePath string) string {
|
||||||
|
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
||||||
|
return "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
||||||
|
}
|
||||||
|
return filePath
|
||||||
|
}
|
||||||
|
|
||||||
|
// getIDFromPath returns the id of the given path. If 'createDirectories' is true, create the given path and all its
|
||||||
|
// parent directories if they don't exist. Note that if 'createDirectories' is false, it may return an empty 'fileID'
|
||||||
|
// if the file doesn't exist.
|
||||||
|
func (storage *ACDStorage) getIDFromPath(threadIndex int, filePath string, createDirectories bool) (fileID string, err error) {
|
||||||
|
|
||||||
|
if fileID, ok := storage.findPathID(filePath); ok {
|
||||||
|
return fileID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parentID, ok := storage.findPathID("")
|
||||||
|
if !ok {
|
||||||
|
parentID, _, _, err = storage.client.ListByName("", "")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
storage.savePathID("", parentID)
|
||||||
|
}
|
||||||
|
|
||||||
|
names := strings.Split(filePath, "/")
|
||||||
|
current := ""
|
||||||
|
for i, name := range names {
|
||||||
|
|
||||||
|
current = path.Join(current, name)
|
||||||
|
fileID, ok := storage.findPathID(current)
|
||||||
|
if ok {
|
||||||
|
parentID = fileID
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
isDir := false
|
||||||
|
fileID, isDir, _, err = storage.client.ListByName(parentID, name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if fileID == "" {
|
||||||
|
if !createDirectories {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
// Create the current directory
|
||||||
|
fileID, err = storage.client.CreateDirectory(parentID, name)
|
||||||
|
if err != nil {
|
||||||
|
// Check if the directory has been created by another thread
|
||||||
|
if e, ok := err.(ACDError); !ok || e.Status != 409 {
|
||||||
|
return "", fmt.Errorf("Failed to create directory '%s': %v", current, err)
|
||||||
|
}
|
||||||
|
// A 409 means the directory may have already created by another thread. Wait 10 seconds
|
||||||
|
// until we seed the directory.
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
var createErr error
|
||||||
|
fileID, isDir, _, createErr = storage.client.ListByName(parentID, name)
|
||||||
|
if createErr != nil {
|
||||||
|
return "", createErr
|
||||||
|
}
|
||||||
|
if fileID == "" {
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if fileID == "" {
|
||||||
|
return "", fmt.Errorf("All attempts to create directory '%s' failed: %v", current, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
isDir = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
storage.savePathID(current, fileID)
|
||||||
|
}
|
||||||
|
if i != len(names)-1 && !isDir {
|
||||||
|
return "", fmt.Errorf("Path '%s' is not a directory", current)
|
||||||
|
}
|
||||||
|
parentID = fileID
|
||||||
|
}
|
||||||
|
|
||||||
|
return parentID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
|
func (storage *ACDStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if dir == "snapshots" {
|
||||||
|
|
||||||
|
entries, err := storage.client.ListEntries(storage.getPathID(dir), false, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
subDirs := []string{}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
storage.savePathID(entry.Name, entry.ID)
|
||||||
|
subDirs = append(subDirs, entry.Name+"/")
|
||||||
|
}
|
||||||
|
return subDirs, nil, nil
|
||||||
|
} else if strings.HasPrefix(dir, "snapshots/") {
|
||||||
|
name := dir[len("snapshots/"):]
|
||||||
|
pathID, ok := storage.findPathID(dir)
|
||||||
|
if !ok {
|
||||||
|
pathID, _, _, err = storage.client.ListByName(storage.getPathID("snapshots"), name)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if pathID == "" {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
storage.savePathID(dir, pathID)
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err := storage.client.ListEntries(pathID, true, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
files := []string{}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
storage.savePathID(dir+"/"+entry.Name, entry.ID)
|
||||||
|
files = append(files, entry.Name)
|
||||||
|
}
|
||||||
|
return files, nil, nil
|
||||||
|
} else {
|
||||||
|
files := []string{}
|
||||||
|
sizes := []int64{}
|
||||||
|
parents := []string{"chunks", "fossils"}
|
||||||
|
for i := 0; i < len(parents); i++ {
|
||||||
|
parent := parents[i]
|
||||||
|
pathID, ok := storage.findPathID(parent)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
entries, err := storage.client.ListEntries(pathID, true, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.Kind != "FOLDER" {
|
||||||
|
name := entry.Name
|
||||||
|
if strings.HasPrefix(parent, "fossils") {
|
||||||
|
name = parent + "/" + name + ".fsl"
|
||||||
|
name = name[len("fossils/"):]
|
||||||
|
} else {
|
||||||
|
name = parent + "/" + name
|
||||||
|
name = name[len("chunks/"):]
|
||||||
|
}
|
||||||
|
files = append(files, name)
|
||||||
|
sizes = append(sizes, entry.Size)
|
||||||
|
} else {
|
||||||
|
parents = append(parents, parent+"/"+entry.Name)
|
||||||
|
}
|
||||||
|
storage.savePathID(parent+"/"+entry.Name, entry.ID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *ACDStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
filePath = storage.convertFilePath(filePath)
|
||||||
|
fileID, err := storage.getIDFromPath(threadIndex, filePath, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if fileID == "" {
|
||||||
|
LOG_TRACE("ACD_STORAGE", "File '%s' to be deleted does not exist", filePath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.client.DeleteFile(fileID)
|
||||||
|
if e, ok := err.(ACDError); ok && e.Status == 409 {
|
||||||
|
LOG_DEBUG("ACD_DELETE", "Ignore 409 conflict error")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *ACDStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
from = storage.convertFilePath(from)
|
||||||
|
to = storage.convertFilePath(to)
|
||||||
|
|
||||||
|
fileID, ok := storage.findPathID(from)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Attempting to rename file %s with unknown id", from)
|
||||||
|
}
|
||||||
|
|
||||||
|
fromParent := path.Dir(from)
|
||||||
|
fromParentID, err := storage.getIDFromPath(threadIndex, fromParent, false)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to retrieve the id of the parent directory '%s': %v", fromParent, err)
|
||||||
|
}
|
||||||
|
if fromParentID == "" {
|
||||||
|
return fmt.Errorf("The parent directory '%s' does not exist", fromParent)
|
||||||
|
}
|
||||||
|
|
||||||
|
toParent := path.Dir(to)
|
||||||
|
toParentID, err := storage.getIDFromPath(threadIndex, toParent, true)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to retrieve the id of the parent directory '%s': %v", toParent, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.client.MoveFile(fileID, fromParentID, toParentID)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(ACDError); ok && e.Status == 409 {
|
||||||
|
LOG_DEBUG("ACD_MOVE", "Ignore 409 conflict error")
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.savePathID(to, storage.getPathID(from))
|
||||||
|
storage.deletePathID(from)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *ACDStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
|
||||||
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
parentPath := path.Dir(dir)
|
||||||
|
if parentPath == "." {
|
||||||
|
parentPath = ""
|
||||||
|
}
|
||||||
|
parentID, ok := storage.findPathID(parentPath)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Path directory '%s' has unknown id", parentPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
name := path.Base(dir)
|
||||||
|
dirID, err := storage.client.CreateDirectory(parentID, name)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(ACDError); ok && e.Status == 409 {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
storage.savePathID(dir, dirID)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *ACDStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
|
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
|
||||||
|
filePath = filePath[:len(filePath)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
filePath = storage.convertFilePath(filePath)
|
||||||
|
|
||||||
|
parentPath := path.Dir(filePath)
|
||||||
|
if parentPath == "." {
|
||||||
|
parentPath = ""
|
||||||
|
}
|
||||||
|
parentID, err := storage.getIDFromPath(threadIndex, parentPath, false)
|
||||||
|
if err != nil {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
if parentID == "" {
|
||||||
|
return false, false, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
name := path.Base(filePath)
|
||||||
|
fileID, isDir, size, err := storage.client.ListByName(parentID, name)
|
||||||
|
if err != nil {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
if fileID == "" {
|
||||||
|
return false, false, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.savePathID(filePath, fileID)
|
||||||
|
return true, isDir, size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *ACDStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
fileID, err := storage.getIDFromPath(threadIndex, filePath, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if fileID == "" {
|
||||||
|
return fmt.Errorf("File path '%s' does not exist", filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser, _, err := storage.client.DownloadFile(fileID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *ACDStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
parent := path.Dir(filePath)
|
||||||
|
if parent == "." {
|
||||||
|
parent = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
parentID, err := storage.getIDFromPath(threadIndex, parent, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if parentID == "" {
|
||||||
|
return fmt.Errorf("File path '%s' does not exist", parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
fileID, err := storage.client.UploadFile(parentID, path.Base(filePath), content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
|
if err == nil {
|
||||||
|
storage.savePathID(filePath, fileID)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if e, ok := err.(ACDError); ok && e.Status == 409 {
|
||||||
|
LOG_TRACE("ACD_UPLOAD", "File %s already exists", filePath)
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *ACDStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *ACDStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *ACDStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *ACDStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *ACDStorage) EnableTestMode() {}
|
||||||
201
src/duplicacy_azurestorage.go
Normal file
201
src/duplicacy_azurestorage.go
Normal file
@@ -0,0 +1,201 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gilbertchen/azure-sdk-for-go/storage"
|
||||||
|
)
|
||||||
|
|
||||||
|
type AzureStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
containers []*storage.Container
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateAzureStorage(accountName string, accountKey string,
|
||||||
|
containerName string, threads int) (azureStorage *AzureStorage, err error) {
|
||||||
|
|
||||||
|
var containers []*storage.Container
|
||||||
|
for i := 0; i < threads; i++ {
|
||||||
|
|
||||||
|
client, err := storage.NewBasicClient(accountName, accountKey)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
blobService := client.GetBlobService()
|
||||||
|
container := blobService.GetContainerReference(containerName)
|
||||||
|
containers = append(containers, container)
|
||||||
|
}
|
||||||
|
|
||||||
|
exist, err := containers[0].Exists()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exist {
|
||||||
|
return nil, fmt.Errorf("container %s does not exist", containerName)
|
||||||
|
}
|
||||||
|
|
||||||
|
azureStorage = &AzureStorage{
|
||||||
|
containers: containers,
|
||||||
|
}
|
||||||
|
|
||||||
|
azureStorage.DerivedStorage = azureStorage
|
||||||
|
azureStorage.SetDefaultNestingLevels([]int{0}, 0)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
|
func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
|
||||||
|
type ListBlobsParameters struct {
|
||||||
|
Prefix string
|
||||||
|
Delimiter string
|
||||||
|
Marker string
|
||||||
|
Include string
|
||||||
|
MaxResults uint
|
||||||
|
Timeout uint
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dir) > 0 && dir[len(dir)-1] != '/' {
|
||||||
|
dir += "/"
|
||||||
|
}
|
||||||
|
dirLength := len(dir)
|
||||||
|
|
||||||
|
parameters := storage.ListBlobsParameters{
|
||||||
|
Prefix: dir,
|
||||||
|
Delimiter: "",
|
||||||
|
}
|
||||||
|
|
||||||
|
subDirs := make(map[string]bool)
|
||||||
|
|
||||||
|
for {
|
||||||
|
|
||||||
|
results, err := azureStorage.containers[threadIndex].ListBlobs(parameters)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if dir == "snapshots/" {
|
||||||
|
for _, blob := range results.Blobs {
|
||||||
|
name := strings.Split(blob.Name[dirLength:], "/")[0]
|
||||||
|
subDirs[name+"/"] = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, blob := range results.Blobs {
|
||||||
|
files = append(files, blob.Name[dirLength:])
|
||||||
|
sizes = append(sizes, blob.Properties.ContentLength)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if results.NextMarker == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
parameters.Marker = results.NextMarker
|
||||||
|
}
|
||||||
|
|
||||||
|
if dir == "snapshots/" {
|
||||||
|
|
||||||
|
for subDir := range subDirs {
|
||||||
|
files = append(files, subDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, sizes, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *AzureStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
_, err = storage.containers[threadIndex].GetBlobReference(filePath).DeleteIfExists(nil)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *AzureStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
source := storage.containers[threadIndex].GetBlobReference(from)
|
||||||
|
destination := storage.containers[threadIndex].GetBlobReference(to)
|
||||||
|
err = destination.Copy(source.GetURL(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return storage.DeleteFile(threadIndex, from)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *AzureStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *AzureStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||||
|
err = blob.GetProperties(nil)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "404") {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, false, blob.Properties.ContentLength, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
readCloser, err := storage.containers[threadIndex].GetBlobReference(filePath).Get(nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.containers))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
|
tries := 0
|
||||||
|
|
||||||
|
for {
|
||||||
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.containers))
|
||||||
|
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||||
|
err = blob.CreateBlockBlobFromReader(reader, nil)
|
||||||
|
|
||||||
|
if err == nil || !strings.Contains(err.Error(), "write: broken pipe") || tries >= 3 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("AZURE_RETRY", "Connection unexpectedly terminated: %v; retrying", err)
|
||||||
|
tries++
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *AzureStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *AzureStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *AzureStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *AzureStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *AzureStorage) EnableTestMode() {}
|
||||||
642
src/duplicacy_b2client.go
Normal file
642
src/duplicacy_b2client.go
Normal file
@@ -0,0 +1,642 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"fmt"
|
||||||
|
"bytes"
|
||||||
|
"time"
|
||||||
|
"sync"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"net/url"
|
||||||
|
"net/http"
|
||||||
|
"math/rand"
|
||||||
|
"io/ioutil"
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"encoding/base64"
|
||||||
|
)
|
||||||
|
|
||||||
|
type B2Error struct {
|
||||||
|
Status int
|
||||||
|
Code string
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err *B2Error) Error() string {
|
||||||
|
return fmt.Sprintf("%d %s", err.Status, err.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
type B2UploadArgument struct {
|
||||||
|
URL string
|
||||||
|
Token string
|
||||||
|
}
|
||||||
|
|
||||||
|
var B2AuthorizationURL = "https://api.backblazeb2.com/b2api/v1/b2_authorize_account"
|
||||||
|
|
||||||
|
type B2Client struct {
|
||||||
|
HTTPClient *http.Client
|
||||||
|
|
||||||
|
AccountID string
|
||||||
|
ApplicationKeyID string
|
||||||
|
ApplicationKey string
|
||||||
|
BucketName string
|
||||||
|
BucketID string
|
||||||
|
StorageDir string
|
||||||
|
|
||||||
|
Lock sync.Mutex
|
||||||
|
AuthorizationToken string
|
||||||
|
APIURL string
|
||||||
|
DownloadURL string
|
||||||
|
IsAuthorized bool
|
||||||
|
|
||||||
|
UploadURLs []string
|
||||||
|
UploadTokens []string
|
||||||
|
|
||||||
|
Threads int
|
||||||
|
MaximumRetries int
|
||||||
|
TestMode bool
|
||||||
|
|
||||||
|
LastAuthorizationTime int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// URL encode the given path but keep the slashes intact
|
||||||
|
func B2Escape(path string) string {
|
||||||
|
var components []string
|
||||||
|
for _, c := range strings.Split(path, "/") {
|
||||||
|
components = append(components, url.QueryEscape(c))
|
||||||
|
}
|
||||||
|
return strings.Join(components, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewB2Client(applicationKeyID string, applicationKey string, downloadURL string, storageDir string, threads int) *B2Client {
|
||||||
|
|
||||||
|
for storageDir != "" && storageDir[0] == '/' {
|
||||||
|
storageDir = storageDir[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
if storageDir != "" && storageDir[len(storageDir) - 1] != '/' {
|
||||||
|
storageDir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
maximumRetries := 15
|
||||||
|
if value, found := os.LookupEnv("DUPLICACY_B2_RETRIES"); found && value != "" {
|
||||||
|
maximumRetries, _ = strconv.Atoi(value)
|
||||||
|
LOG_INFO("B2_RETRIES", "Setting maximum retries for B2 to %d", maximumRetries)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &B2Client{
|
||||||
|
HTTPClient: http.DefaultClient,
|
||||||
|
ApplicationKeyID: applicationKeyID,
|
||||||
|
ApplicationKey: applicationKey,
|
||||||
|
DownloadURL: downloadURL,
|
||||||
|
StorageDir: storageDir,
|
||||||
|
UploadURLs: make([]string, threads),
|
||||||
|
UploadTokens: make([]string, threads),
|
||||||
|
Threads: threads,
|
||||||
|
MaximumRetries: maximumRetries,
|
||||||
|
}
|
||||||
|
return client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *B2Client) getAPIURL() string {
|
||||||
|
client.Lock.Lock()
|
||||||
|
defer client.Lock.Unlock()
|
||||||
|
return client.APIURL
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *B2Client) getDownloadURL() string {
|
||||||
|
client.Lock.Lock()
|
||||||
|
defer client.Lock.Unlock()
|
||||||
|
return client.DownloadURL
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *B2Client) retry(retries int, response *http.Response) int {
|
||||||
|
if response != nil {
|
||||||
|
if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 {
|
||||||
|
retryAfter, _ := strconv.Atoi(backoffList[0])
|
||||||
|
if retryAfter >= 1 {
|
||||||
|
time.Sleep(time.Duration(retryAfter) * time.Second)
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if retries >= client.MaximumRetries + 1 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
retries++
|
||||||
|
delay := 1 << uint(retries)
|
||||||
|
if delay > 64 {
|
||||||
|
delay = 64
|
||||||
|
}
|
||||||
|
delayInSeconds := (rand.Float32() + 1.0) * float32(delay) / 2.0
|
||||||
|
|
||||||
|
time.Sleep(time.Duration(delayInSeconds) * time.Second)
|
||||||
|
return retries
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *B2Client) call(threadIndex int, requestURL string, method string, requestHeaders map[string]string, input interface{}) (
|
||||||
|
io.ReadCloser, http.Header, int64, error) {
|
||||||
|
|
||||||
|
var response *http.Response
|
||||||
|
|
||||||
|
retries := 0
|
||||||
|
for {
|
||||||
|
var inputReader io.Reader
|
||||||
|
isUpload := false
|
||||||
|
|
||||||
|
switch input.(type) {
|
||||||
|
default:
|
||||||
|
jsonInput, err := json.Marshal(input)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
inputReader = bytes.NewReader(jsonInput)
|
||||||
|
case int:
|
||||||
|
inputReader = bytes.NewReader([]byte(""))
|
||||||
|
case []byte:
|
||||||
|
isUpload = true
|
||||||
|
inputReader = bytes.NewReader(input.([]byte))
|
||||||
|
case *RateLimitedReader:
|
||||||
|
isUpload = true
|
||||||
|
rateLimitedReader := input.(*RateLimitedReader)
|
||||||
|
rateLimitedReader.Reset()
|
||||||
|
inputReader = rateLimitedReader
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if isUpload {
|
||||||
|
if client.UploadURLs[threadIndex] == "" || client.UploadTokens[threadIndex] == "" {
|
||||||
|
err := client.getUploadURL(threadIndex)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
requestURL = client.UploadURLs[threadIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
request, err := http.NewRequest(method, requestURL, inputReader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if requestURL == B2AuthorizationURL {
|
||||||
|
request.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(client.ApplicationKeyID+":"+client.ApplicationKey)))
|
||||||
|
} else if isUpload {
|
||||||
|
request.ContentLength, _ = strconv.ParseInt(requestHeaders["Content-Length"], 10, 64)
|
||||||
|
request.Header.Set("Authorization", client.UploadTokens[threadIndex])
|
||||||
|
} else {
|
||||||
|
client.Lock.Lock()
|
||||||
|
request.Header.Set("Authorization", client.AuthorizationToken)
|
||||||
|
client.Lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
if requestHeaders != nil {
|
||||||
|
for key, value := range requestHeaders {
|
||||||
|
request.Header.Set(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if client.TestMode {
|
||||||
|
r := rand.Float32()
|
||||||
|
if r < 0.5 && isUpload {
|
||||||
|
request.Header.Set("X-Bz-Test-Mode", "fail_some_uploads")
|
||||||
|
} else if r < 0.75 {
|
||||||
|
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
|
||||||
|
} else {
|
||||||
|
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err = client.HTTPClient.Do(request)
|
||||||
|
if err != nil {
|
||||||
|
|
||||||
|
// Don't retry when the first authorization request fails
|
||||||
|
if requestURL == B2AuthorizationURL && !client.IsAuthorized {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s' returned an error: %v", threadIndex, requestURL, err)
|
||||||
|
|
||||||
|
retries = client.retry(retries, response)
|
||||||
|
if retries <= 0 {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear the upload url to requrest a new one on retry
|
||||||
|
if isUpload {
|
||||||
|
client.UploadURLs[threadIndex] = ""
|
||||||
|
client.UploadTokens[threadIndex] = ""
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode < 300 {
|
||||||
|
return response.Body, response.Header, response.ContentLength, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
e := &B2Error{}
|
||||||
|
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
|
||||||
|
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s %s' returned status code %d", threadIndex, method, requestURL, response.StatusCode)
|
||||||
|
} else {
|
||||||
|
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s %s' returned %d %s", threadIndex, method, requestURL, response.StatusCode, e.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
response.Body.Close()
|
||||||
|
|
||||||
|
if response.StatusCode == 401 {
|
||||||
|
if requestURL == B2AuthorizationURL {
|
||||||
|
return nil, nil, 0, fmt.Errorf("Authorization failure")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attempt authorization again. If authorization is actually not done, run the random backoff
|
||||||
|
_, allowed := client.AuthorizeAccount(threadIndex)
|
||||||
|
if allowed {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
} else if response.StatusCode == 403 {
|
||||||
|
if !client.TestMode {
|
||||||
|
return nil, nil, 0, fmt.Errorf("B2 cap exceeded")
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else if response.StatusCode == 404 {
|
||||||
|
if http.MethodHead == method {
|
||||||
|
return nil, nil, 0, nil
|
||||||
|
}
|
||||||
|
} else if response.StatusCode == 416 {
|
||||||
|
if http.MethodHead == method {
|
||||||
|
// 416 Requested Range Not Satisfiable
|
||||||
|
return nil, nil, 0, fmt.Errorf("URL request '%s' returned %d %s", requestURL, response.StatusCode, e.Message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
retries = client.retry(retries, response)
|
||||||
|
if retries <= 0 {
|
||||||
|
return nil, nil, 0, fmt.Errorf("URL request '%s' returned %d %s", requestURL, response.StatusCode, e.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
if isUpload {
|
||||||
|
client.UploadURLs[threadIndex] = ""
|
||||||
|
client.UploadTokens[threadIndex] = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
type B2AuthorizeAccountOutput struct {
|
||||||
|
AccountID string
|
||||||
|
AuthorizationToken string
|
||||||
|
APIURL string
|
||||||
|
DownloadURL string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *B2Client) AuthorizeAccount(threadIndex int) (err error, allowed bool) {
|
||||||
|
client.Lock.Lock()
|
||||||
|
defer client.Lock.Unlock()
|
||||||
|
|
||||||
|
// Don't authorize if the previous one was done less than 30 seconds ago
|
||||||
|
if client.LastAuthorizationTime != 0 && client.LastAuthorizationTime > time.Now().Unix() - 30 {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser, _, _, err := client.call(threadIndex, B2AuthorizationURL, http.MethodPost, nil, make(map[string]string))
|
||||||
|
if err != nil {
|
||||||
|
return err, true
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
output := &B2AuthorizeAccountOutput{}
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return err, true
|
||||||
|
}
|
||||||
|
|
||||||
|
// The account id may be different from the application key id so we're getting the account id from the returned
|
||||||
|
// json object here, which is needed by the b2_list_buckets call.
|
||||||
|
client.AccountID = output.AccountID
|
||||||
|
|
||||||
|
client.AuthorizationToken = output.AuthorizationToken
|
||||||
|
client.APIURL = output.APIURL
|
||||||
|
if client.DownloadURL == "" {
|
||||||
|
client.DownloadURL = output.DownloadURL
|
||||||
|
}
|
||||||
|
LOG_INFO("BACKBLAZE_URL", "download URL is: %s", client.DownloadURL)
|
||||||
|
client.IsAuthorized = true
|
||||||
|
|
||||||
|
client.LastAuthorizationTime = time.Now().Unix()
|
||||||
|
|
||||||
|
return nil, true
|
||||||
|
}
|
||||||
|
|
||||||
|
type ListBucketOutput struct {
|
||||||
|
AccountID string
|
||||||
|
BucketID string
|
||||||
|
BucketName string
|
||||||
|
BucketType string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *B2Client) FindBucket(bucketName string) (err error) {
|
||||||
|
|
||||||
|
input := make(map[string]string)
|
||||||
|
input["accountId"] = client.AccountID
|
||||||
|
input["bucketName"] = bucketName
|
||||||
|
|
||||||
|
url := client.getAPIURL() + "/b2api/v1/b2_list_buckets"
|
||||||
|
|
||||||
|
readCloser, _, _, err := client.call(0, url, http.MethodPost, nil, input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
output := make(map[string][]ListBucketOutput, 0)
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, bucket := range output["buckets"] {
|
||||||
|
if bucket.BucketName == bucketName {
|
||||||
|
client.BucketName = bucket.BucketName
|
||||||
|
client.BucketID = bucket.BucketID
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if client.BucketID == "" {
|
||||||
|
return fmt.Errorf("Bucket %s not found", bucketName)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type B2Entry struct {
|
||||||
|
FileID string
|
||||||
|
FileName string
|
||||||
|
Action string
|
||||||
|
Size int64
|
||||||
|
UploadTimestamp int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type B2ListFileNamesOutput struct {
|
||||||
|
Files []*B2Entry
|
||||||
|
NextFileName string
|
||||||
|
NextFileId string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *B2Client) ListFileNames(threadIndex int, startFileName string, singleFile bool, includeVersions bool) (files []*B2Entry, err error) {
|
||||||
|
|
||||||
|
maxFileCount := 1000
|
||||||
|
if singleFile {
|
||||||
|
if includeVersions {
|
||||||
|
maxFileCount = 4
|
||||||
|
if client.TestMode {
|
||||||
|
maxFileCount = 1
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
maxFileCount = 1
|
||||||
|
}
|
||||||
|
} else if client.TestMode {
|
||||||
|
maxFileCount = 10
|
||||||
|
}
|
||||||
|
|
||||||
|
input := make(map[string]interface{})
|
||||||
|
input["bucketId"] = client.BucketID
|
||||||
|
input["startFileName"] = client.StorageDir + startFileName
|
||||||
|
input["maxFileCount"] = maxFileCount
|
||||||
|
input["prefix"] = client.StorageDir
|
||||||
|
|
||||||
|
for {
|
||||||
|
apiURL := client.getAPIURL() + "/b2api/v1/b2_list_file_names"
|
||||||
|
requestHeaders := map[string]string{}
|
||||||
|
requestMethod := http.MethodPost
|
||||||
|
var requestInput interface{}
|
||||||
|
requestInput = input
|
||||||
|
if includeVersions {
|
||||||
|
apiURL = client.getAPIURL() + "/b2api/v1/b2_list_file_versions"
|
||||||
|
} else if singleFile {
|
||||||
|
// handle a single file with no versions as a special case to download the last byte of the file
|
||||||
|
apiURL = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + startFileName)
|
||||||
|
// requesting byte -1 works for empty files where 0-0 fails with a 416 error
|
||||||
|
requestHeaders["Range"] = "bytes=-1"
|
||||||
|
// HEAD request
|
||||||
|
requestMethod = http.MethodHead
|
||||||
|
requestInput = 0
|
||||||
|
}
|
||||||
|
var readCloser io.ReadCloser
|
||||||
|
var responseHeader http.Header
|
||||||
|
var err error
|
||||||
|
readCloser, responseHeader, _, err = client.call(threadIndex, apiURL, requestMethod, requestHeaders, requestInput)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if readCloser != nil {
|
||||||
|
defer readCloser.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
output := B2ListFileNamesOutput{}
|
||||||
|
|
||||||
|
if singleFile && !includeVersions {
|
||||||
|
if responseHeader == nil {
|
||||||
|
LOG_DEBUG("BACKBLAZE_LIST", "%s did not return headers", apiURL)
|
||||||
|
return []*B2Entry{}, nil
|
||||||
|
}
|
||||||
|
requiredHeaders := []string{
|
||||||
|
"x-bz-file-id",
|
||||||
|
"x-bz-file-name",
|
||||||
|
}
|
||||||
|
missingKeys := []string{}
|
||||||
|
for _, headerKey := range requiredHeaders {
|
||||||
|
if "" == responseHeader.Get(headerKey) {
|
||||||
|
missingKeys = append(missingKeys, headerKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(missingKeys) > 0 {
|
||||||
|
return nil, fmt.Errorf("%s missing headers: %s", apiURL, missingKeys)
|
||||||
|
}
|
||||||
|
// construct the B2Entry from the response headers of the download request
|
||||||
|
fileID := responseHeader.Get("x-bz-file-id")
|
||||||
|
fileName := responseHeader.Get("x-bz-file-name")
|
||||||
|
unescapedFileName, err := url.QueryUnescape(fileName)
|
||||||
|
if err == nil {
|
||||||
|
fileName = unescapedFileName
|
||||||
|
} else {
|
||||||
|
LOG_WARN("BACKBLAZE_UNESCAPE", "Failed to unescape the file name %s", fileName)
|
||||||
|
}
|
||||||
|
fileAction := "upload"
|
||||||
|
// byte range that is returned: "bytes #-#/#
|
||||||
|
rangeString := responseHeader.Get("Content-Range")
|
||||||
|
// total file size; 1 if file has content, 0 if it's empty
|
||||||
|
lengthString := responseHeader.Get("Content-Length")
|
||||||
|
var fileSize int64
|
||||||
|
if "" != rangeString {
|
||||||
|
fileSize, _ = strconv.ParseInt(rangeString[strings.Index(rangeString, "/")+1:], 0, 64)
|
||||||
|
} else if "" != lengthString {
|
||||||
|
// this should only execute if the requested file is empty and the range request didn't result in a Content-Range header
|
||||||
|
fileSize, _ = strconv.ParseInt(lengthString, 0, 64)
|
||||||
|
if fileSize != 0 {
|
||||||
|
return nil, fmt.Errorf("%s returned non-zero file length", apiURL)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("could not parse headers returned by %s", apiURL)
|
||||||
|
}
|
||||||
|
fileUploadTimestamp, _ := strconv.ParseInt(responseHeader.Get("X-Bz-Upload-Timestamp"), 0, 64)
|
||||||
|
|
||||||
|
return []*B2Entry{{fileID, fileName[len(client.StorageDir):], fileAction, fileSize, fileUploadTimestamp}}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ioutil.ReadAll(readCloser)
|
||||||
|
|
||||||
|
for _, file := range output.Files {
|
||||||
|
file.FileName = file.FileName[len(client.StorageDir):]
|
||||||
|
if singleFile {
|
||||||
|
if file.FileName == startFileName {
|
||||||
|
files = append(files, file)
|
||||||
|
if !includeVersions {
|
||||||
|
output.NextFileName = ""
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
output.NextFileName = ""
|
||||||
|
break
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if strings.HasPrefix(file.FileName, startFileName) {
|
||||||
|
files = append(files, file)
|
||||||
|
} else {
|
||||||
|
output.NextFileName = ""
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(output.NextFileName) == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
input["startFileName"] = output.NextFileName
|
||||||
|
if includeVersions {
|
||||||
|
input["startFileId"] = output.NextFileId
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *B2Client) DeleteFile(threadIndex int, fileName string, fileID string) (err error) {
|
||||||
|
|
||||||
|
input := make(map[string]string)
|
||||||
|
input["fileName"] = client.StorageDir + fileName
|
||||||
|
input["fileId"] = fileID
|
||||||
|
|
||||||
|
url := client.getAPIURL() + "/b2api/v1/b2_delete_file_version"
|
||||||
|
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type B2HideFileOutput struct {
|
||||||
|
FileID string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *B2Client) HideFile(threadIndex int, fileName string) (fileID string, err error) {
|
||||||
|
|
||||||
|
input := make(map[string]string)
|
||||||
|
input["bucketId"] = client.BucketID
|
||||||
|
input["fileName"] = client.StorageDir + fileName
|
||||||
|
|
||||||
|
url := client.getAPIURL() + "/b2api/v1/b2_hide_file"
|
||||||
|
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
output := &B2HideFileOutput{}
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return output.FileID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *B2Client) DownloadFile(threadIndex int, filePath string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
|
url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + filePath)
|
||||||
|
|
||||||
|
readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0)
|
||||||
|
return readCloser, len, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type B2GetUploadArgumentOutput struct {
|
||||||
|
BucketID string
|
||||||
|
UploadURL string
|
||||||
|
AuthorizationToken string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *B2Client) getUploadURL(threadIndex int) error {
|
||||||
|
input := make(map[string]string)
|
||||||
|
input["bucketId"] = client.BucketID
|
||||||
|
|
||||||
|
url := client.getAPIURL() + "/b2api/v1/b2_get_upload_url"
|
||||||
|
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
output := &B2GetUploadArgumentOutput{}
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
client.UploadURLs[threadIndex] = output.UploadURL
|
||||||
|
client.UploadTokens[threadIndex] = output.AuthorizationToken
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *B2Client) UploadFile(threadIndex int, filePath string, content []byte, rateLimit int) (err error) {
|
||||||
|
|
||||||
|
hasher := sha1.New()
|
||||||
|
hasher.Write(content)
|
||||||
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
|
headers := make(map[string]string)
|
||||||
|
headers["X-Bz-File-Name"] = B2Escape(client.StorageDir + filePath)
|
||||||
|
headers["Content-Length"] = fmt.Sprintf("%d", len(content))
|
||||||
|
headers["Content-Type"] = "application/octet-stream"
|
||||||
|
headers["X-Bz-Content-Sha1"] = hash
|
||||||
|
|
||||||
|
readCloser, _, _, err := client.call(threadIndex, "", http.MethodPost, headers, CreateRateLimitedReader(content, rateLimit))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
133
src/duplicacy_b2client_test.go
Normal file
133
src/duplicacy_b2client_test.go
Normal file
@@ -0,0 +1,133 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
crypto_rand "crypto/rand"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createB2ClientForTest(t *testing.T) (*B2Client, string) {
|
||||||
|
config, err := ioutil.ReadFile("test_storage.conf")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to read config file: %v", err)
|
||||||
|
return nil, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
storages := make(map[string]map[string]string)
|
||||||
|
|
||||||
|
err = json.Unmarshal(config, &storages)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to parse config file: %v", err)
|
||||||
|
return nil, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
b2, found := storages["b2"]
|
||||||
|
if !found {
|
||||||
|
t.Errorf("Failed to find b2 config")
|
||||||
|
return nil, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewB2Client(b2["account"], b2["key"], "", b2["directory"], 1), b2["bucket"]
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestB2Client(t *testing.T) {
|
||||||
|
|
||||||
|
b2Client, bucket := createB2ClientForTest(t)
|
||||||
|
if b2Client == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
b2Client.TestMode = true
|
||||||
|
|
||||||
|
err, _ := b2Client.AuthorizeAccount(0)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to authorize the b2 account: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = b2Client.FindBucket(bucket)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to find bucket '%s': %v", bucket, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
testDirectory := "b2client_test/"
|
||||||
|
|
||||||
|
files, err := b2Client.ListFileNames(0, testDirectory, false, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list files: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
err = b2Client.DeleteFile(0, file.FileName, file.FileID)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
maxSize := 10000
|
||||||
|
for i := 0; i < 20; i++ {
|
||||||
|
size := rand.Int()%maxSize + 1
|
||||||
|
content := make([]byte, size)
|
||||||
|
_, err := crypto_rand.Read(content)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error generating random content: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hash := sha256.Sum256(content)
|
||||||
|
name := hex.EncodeToString(hash[:])
|
||||||
|
|
||||||
|
err = b2Client.UploadFile(0, testDirectory+name, content, 100)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error uploading file '%s': %v", name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
files, err = b2Client.ListFileNames(0, testDirectory, false, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list files: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
|
||||||
|
readCloser, _, err := b2Client.DownloadFile(0, file.FileName)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error downloading file '%s': %v", file.FileName, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
hasher := sha256.New()
|
||||||
|
_, err = io.Copy(hasher, readCloser)
|
||||||
|
|
||||||
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
|
if testDirectory+hash != file.FileName {
|
||||||
|
t.Errorf("File %s has hash %s", file.FileName, hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
err = b2Client.DeleteFile(0, file.FileName, file.FileID)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
239
src/duplicacy_b2storage.go
Normal file
239
src/duplicacy_b2storage.go
Normal file
@@ -0,0 +1,239 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type B2Storage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
client *B2Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateB2Storage creates a B2 storage object.
|
||||||
|
func CreateB2Storage(accountID string, applicationKey string, downloadURL string, bucket string, storageDir string, threads int) (storage *B2Storage, err error) {
|
||||||
|
|
||||||
|
client := NewB2Client(accountID, applicationKey, downloadURL, storageDir, threads)
|
||||||
|
|
||||||
|
err, _ = client.AuthorizeAccount(0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.FindBucket(bucket)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &B2Storage{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
|
func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
length := len(dir) + 1
|
||||||
|
|
||||||
|
includeVersions := false
|
||||||
|
if dir == "chunks" {
|
||||||
|
includeVersions = true
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err := storage.client.ListFileNames(threadIndex, dir, false, includeVersions)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if dir == "snapshots" {
|
||||||
|
|
||||||
|
subDirs := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
name := entry.FileName[length:]
|
||||||
|
subDir := strings.Split(name, "/")[0]
|
||||||
|
subDirs[subDir+"/"] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for subDir := range subDirs {
|
||||||
|
files = append(files, subDir)
|
||||||
|
}
|
||||||
|
} else if dir == "chunks" {
|
||||||
|
lastFile := ""
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.FileName == lastFile {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
lastFile = entry.FileName
|
||||||
|
if entry.Action == "hide" {
|
||||||
|
files = append(files, entry.FileName[length:]+".fsl")
|
||||||
|
} else {
|
||||||
|
files = append(files, entry.FileName[length:])
|
||||||
|
}
|
||||||
|
sizes = append(sizes, entry.Size)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, entry := range entries {
|
||||||
|
files = append(files, entry.FileName[length:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
|
||||||
|
if strings.HasSuffix(filePath, ".fsl") {
|
||||||
|
filePath = filePath[:len(filePath)-len(".fsl")]
|
||||||
|
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
toBeDeleted := false
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.FileName != filePath || (!toBeDeleted && entry.Action != "hide") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
toBeDeleted = true
|
||||||
|
|
||||||
|
err = storage.client.DeleteFile(threadIndex, filePath, entry.FileID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
} else {
|
||||||
|
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entries) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return storage.client.DeleteFile(threadIndex, filePath, entries[0].FileID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *B2Storage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
|
||||||
|
filePath := ""
|
||||||
|
|
||||||
|
if strings.HasSuffix(from, ".fsl") {
|
||||||
|
filePath = to
|
||||||
|
if from != to+".fsl" {
|
||||||
|
filePath = ""
|
||||||
|
}
|
||||||
|
} else if strings.HasSuffix(to, ".fsl") {
|
||||||
|
filePath = from
|
||||||
|
if to != from+".fsl" {
|
||||||
|
filePath = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if filePath == "" {
|
||||||
|
LOG_FATAL("STORAGE_MOVE", "Moving file '%s' to '%s' is not supported", from, to)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if filePath == from {
|
||||||
|
_, err = storage.client.HideFile(threadIndex, from)
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(entries) == 0 || entries[0].FileName != filePath || entries[0].Action != "hide" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return storage.client.DeleteFile(threadIndex, filePath, entries[0].FileID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *B2Storage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
isFossil := false
|
||||||
|
if strings.HasSuffix(filePath, ".fsl") {
|
||||||
|
isFossil = true
|
||||||
|
filePath = filePath[:len(filePath)-len(".fsl")]
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, isFossil)
|
||||||
|
if err != nil {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entries) == 0 || entries[0].FileName != filePath {
|
||||||
|
return false, false, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if isFossil {
|
||||||
|
if entries[0].Action == "hide" {
|
||||||
|
return true, false, entries[0].Size, nil
|
||||||
|
} else {
|
||||||
|
return false, false, 0, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true, false, entries[0].Size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
|
readCloser, _, err := storage.client.DownloadFile(threadIndex, filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.client.Threads)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
return storage.client.UploadFile(threadIndex, filePath, content, storage.UploadRateLimit/storage.client.Threads)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *B2Storage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *B2Storage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *B2Storage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *B2Storage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *B2Storage) EnableTestMode() {
|
||||||
|
storage.client.TestMode = true
|
||||||
|
}
|
||||||
1844
src/duplicacy_backupmanager.go
Normal file
1844
src/duplicacy_backupmanager.go
Normal file
File diff suppressed because it is too large
Load Diff
723
src/duplicacy_backupmanager_test.go
Normal file
723
src/duplicacy_backupmanager_test.go
Normal file
@@ -0,0 +1,723 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
crypto_rand "crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"runtime/debug"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createRandomFile(path string, maxSize int) {
|
||||||
|
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RANDOM_FILE", "Can't open %s for writing: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
size := maxSize/2 + rand.Int()%(maxSize/2)
|
||||||
|
|
||||||
|
buffer := make([]byte, 32*1024)
|
||||||
|
for size > 0 {
|
||||||
|
bytes := size
|
||||||
|
if bytes > cap(buffer) {
|
||||||
|
bytes = cap(buffer)
|
||||||
|
}
|
||||||
|
crypto_rand.Read(buffer[:bytes])
|
||||||
|
bytes, err = file.Write(buffer[:bytes])
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RANDOM_FILE", "Failed to write to %s: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
size -= bytes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func modifyFile(path string, portion float32) {
|
||||||
|
|
||||||
|
stat, err := os.Stat(path)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("MODIFY_FILE", "Can't stat the file %s: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
modifiedTime := stat.ModTime()
|
||||||
|
|
||||||
|
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("MODIFY_FILE", "Can't open %s for writing: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if file != nil {
|
||||||
|
file.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
size, err := file.Seek(0, 2)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("MODIFY_FILE", "Can't seek to the end of the file %s: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
length := int(float32(size) * portion)
|
||||||
|
start := rand.Int() % (int(size) - length)
|
||||||
|
|
||||||
|
_, err = file.Seek(int64(start), 0)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("MODIFY_FILE", "Can't seek to the offset %d: %v", start, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer := make([]byte, length)
|
||||||
|
crypto_rand.Read(buffer)
|
||||||
|
|
||||||
|
_, err = file.Write(buffer)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("MODIFY_FILE", "Failed to write to %s: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
file.Close()
|
||||||
|
file = nil
|
||||||
|
|
||||||
|
// Add 2 seconds to the modified time for the changes to be detectable in quick mode.
|
||||||
|
modifiedTime = modifiedTime.Add(time.Second * 2)
|
||||||
|
err = os.Chtimes(path, modifiedTime, modifiedTime)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("MODIFY_FILE", "Failed to change the modification time of %s: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkExistence(t *testing.T, path string, exists bool, isDir bool) {
|
||||||
|
stat, err := os.Stat(path)
|
||||||
|
if exists {
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s does not exist: %v", path, err)
|
||||||
|
} else if isDir {
|
||||||
|
if !stat.Mode().IsDir() {
|
||||||
|
t.Errorf("%s is not a directory", path)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if stat.Mode().IsDir() {
|
||||||
|
t.Errorf("%s is not a file", path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err == nil || !os.IsNotExist(err) {
|
||||||
|
t.Errorf("%s may exist: %v", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func truncateFile(path string) {
|
||||||
|
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("TRUNCATE_FILE", "Can't open %s for writing: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
oldSize, err := file.Seek(0, 2)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("TRUNCATE_FILE", "Can't seek to the end of the file %s: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
newSize := rand.Int63() % oldSize
|
||||||
|
|
||||||
|
err = file.Truncate(newSize)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("TRUNCATE_FILE", "Can't truncate the file %s to size %d: %v", path, newSize, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileHash(path string) (hash string) {
|
||||||
|
|
||||||
|
file, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("FILE_HASH", "Can't open %s for reading: %v", path, err)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
hasher := sha256.New()
|
||||||
|
_, err = io.Copy(hasher, file)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("FILE_HASH", "Can't read file %s: %v", path, err)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func assertRestoreFailures(t *testing.T, failedFiles int, expectedFailedFiles int) {
|
||||||
|
if failedFiles != expectedFailedFiles {
|
||||||
|
t.Errorf("Failed to restore %d instead of %d file(s)", failedFiles, expectedFailedFiles)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBackupManager(t *testing.T) {
|
||||||
|
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
setTestingT(t)
|
||||||
|
SetLoggingLevel(INFO)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
switch e := r.(type) {
|
||||||
|
case Exception:
|
||||||
|
t.Errorf("%s %s", e.LogID, e.Message)
|
||||||
|
debug.PrintStack()
|
||||||
|
default:
|
||||||
|
t.Errorf("%v", e)
|
||||||
|
debug.PrintStack()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test")
|
||||||
|
os.RemoveAll(testDir)
|
||||||
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
|
os.Mkdir(testDir+"/repository1", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository1/dir1", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository1/.duplicacy", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository2", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository2/.duplicacy", 0700)
|
||||||
|
|
||||||
|
maxFileSize := 1000000
|
||||||
|
//maxFileSize := 200000
|
||||||
|
|
||||||
|
createRandomFile(testDir+"/repository1/file1", maxFileSize)
|
||||||
|
createRandomFile(testDir+"/repository1/file2", maxFileSize)
|
||||||
|
createRandomFile(testDir+"/repository1/dir1/file3", maxFileSize)
|
||||||
|
|
||||||
|
threads := 1
|
||||||
|
|
||||||
|
storage, err := loadStorage(testDir+"/storage", threads)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create storage: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
delay := 0
|
||||||
|
if _, ok := storage.(*ACDStorage); ok {
|
||||||
|
delay = 1
|
||||||
|
}
|
||||||
|
if _, ok := storage.(*OneDriveStorage); ok {
|
||||||
|
delay = 5
|
||||||
|
}
|
||||||
|
|
||||||
|
password := "duplicacy"
|
||||||
|
|
||||||
|
cleanStorage(storage)
|
||||||
|
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
dataShards := 0
|
||||||
|
parityShards := 0
|
||||||
|
if testErasureCoding {
|
||||||
|
dataShards = 5
|
||||||
|
parityShards = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
if testFixedChunkSize {
|
||||||
|
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "", dataShards, parityShards) {
|
||||||
|
t.Errorf("Failed to initialize the storage")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false, "", dataShards, parityShards) {
|
||||||
|
t.Errorf("Failed to initialize the storage")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
backupManager := CreateBackupManager("host1", storage, testDir, password, "", "", false)
|
||||||
|
backupManager.SetupSnapshotCache("default")
|
||||||
|
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
|
failedFiles := backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
|
||||||
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
|
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
|
||||||
|
t.Errorf("File %s does not exist", f)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
|
hash2 := getFileHash(testDir + "/repository2/" + f)
|
||||||
|
if hash1 != hash2 {
|
||||||
|
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
modifyFile(testDir+"/repository1/file1", 0.1)
|
||||||
|
modifyFile(testDir+"/repository1/file2", 0.2)
|
||||||
|
modifyFile(testDir+"/repository1/dir1/file3", 0.3)
|
||||||
|
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false)
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
|
failedFiles = backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
|
||||||
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
|
hash2 := getFileHash(testDir + "/repository2/" + f)
|
||||||
|
if hash1 != hash2 {
|
||||||
|
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Truncate file2 and add a few empty directories
|
||||||
|
truncateFile(testDir + "/repository1/file2")
|
||||||
|
os.Mkdir(testDir+"/repository1/dir2", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository1/dir2/dir3", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository1/dir4", 0700)
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "third", false, false, 0, false)
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
// Create some directories and files under repository2 that will be deleted during restore
|
||||||
|
os.Mkdir(testDir+"/repository2/dir5", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository2/dir5/dir6", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository2/dir7", 0700)
|
||||||
|
createRandomFile(testDir+"/repository2/file4", 100)
|
||||||
|
createRandomFile(testDir+"/repository2/dir5/file5", 100)
|
||||||
|
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
|
failedFiles = backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
|
/*deleteMode=*/ true /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
|
||||||
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
|
hash2 := getFileHash(testDir + "/repository2/" + f)
|
||||||
|
if hash1 != hash2 {
|
||||||
|
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// These files/dirs should not exist because deleteMode == true
|
||||||
|
checkExistence(t, testDir+"/repository2/dir5", false, false)
|
||||||
|
checkExistence(t, testDir+"/repository2/dir5/dir6", false, false)
|
||||||
|
checkExistence(t, testDir+"/repository2/dir7", false, false)
|
||||||
|
checkExistence(t, testDir+"/repository2/file4", false, false)
|
||||||
|
checkExistence(t, testDir+"/repository2/dir5/file5", false, false)
|
||||||
|
|
||||||
|
// These empty dirs should exist
|
||||||
|
checkExistence(t, testDir+"/repository2/dir2", true, true)
|
||||||
|
checkExistence(t, testDir+"/repository2/dir2/dir3", true, true)
|
||||||
|
checkExistence(t, testDir+"/repository2/dir4", true, true)
|
||||||
|
|
||||||
|
// Remove file2 and dir1/file3 and restore them from revision 3
|
||||||
|
os.Remove(testDir + "/repository1/file2")
|
||||||
|
os.Remove(testDir + "/repository1/dir1/file3")
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
failedFiles = backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"} /*allowFailures=*/, false)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
|
||||||
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
|
hash2 := getFileHash(testDir + "/repository2/" + f)
|
||||||
|
if hash1 != hash2 {
|
||||||
|
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
numberOfSnapshots := backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
|
||||||
|
if numberOfSnapshots != 3 {
|
||||||
|
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
|
||||||
|
}
|
||||||
|
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1, 2, 3} /*tag*/, "",
|
||||||
|
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||||
|
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, []int{1} /*tags*/, nil /*retentions*/, nil,
|
||||||
|
/*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
|
||||||
|
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
|
||||||
|
if numberOfSnapshots != 2 {
|
||||||
|
t.Errorf("Expected 2 snapshots but got %d", numberOfSnapshots)
|
||||||
|
}
|
||||||
|
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3} /*tag*/, "",
|
||||||
|
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||||
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false)
|
||||||
|
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, nil /*tags*/, nil /*retentions*/, nil,
|
||||||
|
/*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
|
||||||
|
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
|
||||||
|
if numberOfSnapshots != 3 {
|
||||||
|
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
|
||||||
|
}
|
||||||
|
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3, 4} /*tag*/, "",
|
||||||
|
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||||
|
|
||||||
|
/*buf := make([]byte, 1<<16)
|
||||||
|
runtime.Stack(buf, true)
|
||||||
|
fmt.Printf("%s", buf)*/
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create file with random file with certain seed
|
||||||
|
func createRandomFileSeeded(path string, maxSize int, seed int64) {
|
||||||
|
rand.Seed(seed)
|
||||||
|
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RANDOM_FILE", "Can't open %s for writing: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
size := maxSize/2 + rand.Int()%(maxSize/2)
|
||||||
|
|
||||||
|
buffer := make([]byte, 32*1024)
|
||||||
|
for size > 0 {
|
||||||
|
bytes := size
|
||||||
|
if bytes > cap(buffer) {
|
||||||
|
bytes = cap(buffer)
|
||||||
|
}
|
||||||
|
rand.Read(buffer[:bytes])
|
||||||
|
bytes, err = file.Write(buffer[:bytes])
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RANDOM_FILE", "Failed to write to %s: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
size -= bytes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func corruptFile(path string, start int, length int, seed int64) {
|
||||||
|
rand.Seed(seed)
|
||||||
|
|
||||||
|
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CORRUPT_FILE", "Can't open %s for writing: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if file != nil {
|
||||||
|
file.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, err = file.Seek(int64(start), 0)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CORRUPT_FILE", "Can't seek to the offset %d: %v", start, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer := make([]byte, length)
|
||||||
|
rand.Read(buffer)
|
||||||
|
|
||||||
|
_, err = file.Write(buffer)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CORRUPT_FILE", "Failed to write to %s: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPersistRestore(t *testing.T) {
|
||||||
|
// We want deterministic output here so we can test the expected files are corrupted by missing or corrupt chunks
|
||||||
|
// There use rand functions with fixed seed, and known keys
|
||||||
|
|
||||||
|
setTestingT(t)
|
||||||
|
SetLoggingLevel(INFO)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
switch e := r.(type) {
|
||||||
|
case Exception:
|
||||||
|
t.Errorf("%s %s", e.LogID, e.Message)
|
||||||
|
debug.PrintStack()
|
||||||
|
default:
|
||||||
|
t.Errorf("%v", e)
|
||||||
|
debug.PrintStack()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test")
|
||||||
|
os.RemoveAll(testDir)
|
||||||
|
os.MkdirAll(testDir, 0700)
|
||||||
|
os.Mkdir(testDir+"/repository1", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository1/dir1", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository1/.duplicacy", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository2", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository2/.duplicacy", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository3", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository3/.duplicacy", 0700)
|
||||||
|
|
||||||
|
maxFileSize := 1000000
|
||||||
|
//maxFileSize := 200000
|
||||||
|
|
||||||
|
createRandomFileSeeded(testDir+"/repository1/file1", maxFileSize,1)
|
||||||
|
createRandomFileSeeded(testDir+"/repository1/file2", maxFileSize,2)
|
||||||
|
createRandomFileSeeded(testDir+"/repository1/dir1/file3", maxFileSize,3)
|
||||||
|
|
||||||
|
threads := 1
|
||||||
|
|
||||||
|
password := "duplicacy"
|
||||||
|
|
||||||
|
// We want deterministic output, plus ability to test encrypted storage
|
||||||
|
// So make unencrypted storage with default keys, and encrypted as bit-identical copy of this but with password
|
||||||
|
unencStorage, err := loadStorage(testDir+"/unenc_storage", threads)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create storage: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
delay := 0
|
||||||
|
if _, ok := unencStorage.(*ACDStorage); ok {
|
||||||
|
delay = 1
|
||||||
|
}
|
||||||
|
if _, ok := unencStorage.(*OneDriveStorage); ok {
|
||||||
|
delay = 5
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
cleanStorage(unencStorage)
|
||||||
|
|
||||||
|
if !ConfigStorage(unencStorage, 16384, 100, 64*1024, 256*1024, 16*1024, "", nil, false, "", 0, 0) {
|
||||||
|
t.Errorf("Failed to initialize the unencrypted storage")
|
||||||
|
}
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
unencConfig, _, err := DownloadConfig(unencStorage, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to download storage config: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make encrypted storage
|
||||||
|
storage, err := loadStorage(testDir+"/enc_storage", threads)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create encrypted storage: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
cleanStorage(storage)
|
||||||
|
|
||||||
|
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, unencConfig, true, "", 0, 0) {
|
||||||
|
t.Errorf("Failed to initialize the encrypted storage")
|
||||||
|
}
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
// do unencrypted backup
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
unencBackupManager := CreateBackupManager("host1", unencStorage, testDir, "", "", "", false)
|
||||||
|
unencBackupManager.SetupSnapshotCache("default")
|
||||||
|
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
unencBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
|
||||||
|
// do encrypted backup
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
encBackupManager := CreateBackupManager("host1", storage, testDir, password, "", "", false)
|
||||||
|
encBackupManager.SetupSnapshotCache("default")
|
||||||
|
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
encBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
|
||||||
|
// check snapshots
|
||||||
|
unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
|
||||||
|
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
|
||||||
|
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||||
|
|
||||||
|
encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
|
||||||
|
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
|
||||||
|
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||||
|
|
||||||
|
// check functions
|
||||||
|
checkAllUncorrupted := func(cmpRepository string) {
|
||||||
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
|
if _, err := os.Stat(testDir + cmpRepository + "/" + f); os.IsNotExist(err) {
|
||||||
|
t.Errorf("File %s does not exist", f)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
|
hash2 := getFileHash(testDir + cmpRepository + "/" + f)
|
||||||
|
if hash1 != hash2 {
|
||||||
|
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
checkMissingFile := func(cmpRepository string, expectMissing string) {
|
||||||
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
|
_, err := os.Stat(testDir + cmpRepository + "/" + f)
|
||||||
|
if err==nil {
|
||||||
|
if f==expectMissing {
|
||||||
|
t.Errorf("File %s exists, expected to be missing", f)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
if f!=expectMissing {
|
||||||
|
t.Errorf("File %s does not exist", f)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
|
hash2 := getFileHash(testDir + cmpRepository + "/" + f)
|
||||||
|
if hash1 != hash2 {
|
||||||
|
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
checkCorruptedFile := func(cmpRepository string, expectCorrupted string) {
|
||||||
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
|
if _, err := os.Stat(testDir + cmpRepository + "/" + f); os.IsNotExist(err) {
|
||||||
|
t.Errorf("File %s does not exist", f)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
|
hash2 := getFileHash(testDir + cmpRepository + "/" + f)
|
||||||
|
if (f==expectCorrupted) {
|
||||||
|
if hash1 == hash2 {
|
||||||
|
t.Errorf("File %s has same hashes, expected to be corrupted: %s vs %s", f, hash1, hash2)
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
if hash1 != hash2 {
|
||||||
|
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// test restore all uncorrupted to repository3
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy")
|
||||||
|
failedFiles := unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
checkAllUncorrupted("/repository3")
|
||||||
|
|
||||||
|
// test for corrupt files and -persist
|
||||||
|
// corrupt a chunk
|
||||||
|
chunkToCorrupt1 := "/4d/538e5dfd2b08e782bfeb56d1360fb5d7eb9d8c4b2531cc2fca79efbaec910c"
|
||||||
|
// this should affect file1
|
||||||
|
chunkToCorrupt2 := "/2b/f953a766d0196ce026ae259e76e3c186a0e4bcd3ce10f1571d17f86f0a5497"
|
||||||
|
// this should affect dir1/file3
|
||||||
|
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
if i==0 {
|
||||||
|
// test corrupt chunks
|
||||||
|
corruptFile(testDir+"/unenc_storage"+"/chunks"+chunkToCorrupt1, 128, 128, 4)
|
||||||
|
corruptFile(testDir+"/enc_storage"+"/chunks"+chunkToCorrupt2, 128, 128, 4)
|
||||||
|
} else {
|
||||||
|
// test missing chunks
|
||||||
|
os.Remove(testDir+"/unenc_storage"+"/chunks"+chunkToCorrupt1)
|
||||||
|
os.Remove(testDir+"/enc_storage"+"/chunks"+chunkToCorrupt2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check snapshots with --persist (allowFailures == true)
|
||||||
|
// this would cause a panic and os.Exit from duplicacy_log if allowFailures == false
|
||||||
|
unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
|
||||||
|
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
|
||||||
|
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, true)
|
||||||
|
|
||||||
|
encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
|
||||||
|
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
|
||||||
|
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, true)
|
||||||
|
|
||||||
|
|
||||||
|
// test restore corrupted, inPlace = true, corrupted files will have hash failures
|
||||||
|
os.RemoveAll(testDir+"/repository2")
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
|
failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||||
|
assertRestoreFailures(t, failedFiles, 1)
|
||||||
|
|
||||||
|
// check restore, expect file1 to be corrupted
|
||||||
|
checkCorruptedFile("/repository2", "file1")
|
||||||
|
|
||||||
|
|
||||||
|
os.RemoveAll(testDir+"/repository2")
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
|
failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||||
|
assertRestoreFailures(t, failedFiles, 1)
|
||||||
|
|
||||||
|
// check restore, expect file3 to be corrupted
|
||||||
|
checkCorruptedFile("/repository2", "dir1/file3")
|
||||||
|
|
||||||
|
//SetLoggingLevel(DEBUG)
|
||||||
|
// test restore corrupted, inPlace = false, corrupted files will be missing
|
||||||
|
os.RemoveAll(testDir+"/repository2")
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
|
failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||||
|
assertRestoreFailures(t, failedFiles, 1)
|
||||||
|
|
||||||
|
// check restore, expect file1 to be corrupted
|
||||||
|
checkMissingFile("/repository2", "file1")
|
||||||
|
|
||||||
|
|
||||||
|
os.RemoveAll(testDir+"/repository2")
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
|
failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||||
|
assertRestoreFailures(t, failedFiles, 1)
|
||||||
|
|
||||||
|
// check restore, expect file3 to be corrupted
|
||||||
|
checkMissingFile("/repository2", "dir1/file3")
|
||||||
|
|
||||||
|
// test restore corrupted files from different backups, inPlace = true
|
||||||
|
// with overwrite=true, corrupted file1 from unenc will be restored correctly from enc
|
||||||
|
// the latter will not touch the existing file3 with correct hash
|
||||||
|
os.RemoveAll(testDir+"/repository2")
|
||||||
|
failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||||
|
assertRestoreFailures(t, failedFiles, 1)
|
||||||
|
|
||||||
|
failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
checkAllUncorrupted("/repository2")
|
||||||
|
|
||||||
|
// restore to repository3, with overwrite and allowFailures (true/false), quickMode = false (use hashes)
|
||||||
|
// should always succeed as uncorrupted files already exist with correct hash, so these will be ignored
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy")
|
||||||
|
failedFiles = unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
checkAllUncorrupted("/repository3")
|
||||||
|
|
||||||
|
failedFiles = unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
checkAllUncorrupted("/repository3")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
235
src/duplicacy_benchmark.go
Normal file
235
src/duplicacy_benchmark.go
Normal file
@@ -0,0 +1,235 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func benchmarkSplit(reader *bytes.Reader, fileSize int64, chunkSize int, compression bool, encryption bool, annotation string) {
|
||||||
|
|
||||||
|
config := CreateConfig()
|
||||||
|
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
||||||
|
config.AverageChunkSize = chunkSize
|
||||||
|
config.MaximumChunkSize = chunkSize * 4
|
||||||
|
config.MinimumChunkSize = chunkSize / 4
|
||||||
|
config.ChunkSeed = []byte("duplicacy")
|
||||||
|
|
||||||
|
config.HashKey = DEFAULT_KEY
|
||||||
|
config.IDKey = DEFAULT_KEY
|
||||||
|
|
||||||
|
maker := CreateChunkMaker(config, false)
|
||||||
|
|
||||||
|
startTime := float64(time.Now().UnixNano()) / 1e9
|
||||||
|
numberOfChunks := 0
|
||||||
|
reader.Seek(0, os.SEEK_SET)
|
||||||
|
maker.ForEachChunk(reader,
|
||||||
|
func(chunk *Chunk, final bool) {
|
||||||
|
if compression {
|
||||||
|
key := ""
|
||||||
|
if encryption {
|
||||||
|
key = "0123456789abcdef0123456789abcdef"
|
||||||
|
}
|
||||||
|
err := chunk.Encrypt([]byte(key), "", false)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("BENCHMARK_ENCRYPT", "Failed to encrypt the chunk: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
config.PutChunk(chunk)
|
||||||
|
numberOfChunks++
|
||||||
|
},
|
||||||
|
func(size int64, hash string) (io.Reader, bool) {
|
||||||
|
return nil, false
|
||||||
|
})
|
||||||
|
|
||||||
|
runningTime := float64(time.Now().UnixNano())/1e9 - startTime
|
||||||
|
speed := int64(float64(fileSize) / runningTime)
|
||||||
|
LOG_INFO("BENCHMARK_SPLIT", "Split %s bytes into %d chunks %s in %.2fs: %s/s", PrettySize(fileSize), numberOfChunks, annotation,
|
||||||
|
runningTime, PrettySize(speed))
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkRun(threads int, chunkCount int, job func(threadIndex int, chunkIndex int)) {
|
||||||
|
indexChannel := make(chan int, chunkCount)
|
||||||
|
stopChannel := make(chan int, threads)
|
||||||
|
finishChannel := make(chan int, threads)
|
||||||
|
|
||||||
|
// Start the uploading goroutines
|
||||||
|
for i := 0; i < threads; i++ {
|
||||||
|
go func(threadIndex int) {
|
||||||
|
defer CatchLogException()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case chunkIndex := <-indexChannel:
|
||||||
|
job(threadIndex, chunkIndex)
|
||||||
|
finishChannel <- 0
|
||||||
|
case <-stopChannel:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < chunkCount; i++ {
|
||||||
|
indexChannel <- i
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < chunkCount; i++ {
|
||||||
|
<-finishChannel
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < threads; i++ {
|
||||||
|
stopChannel <- 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Benchmark(localDirectory string, storage Storage, fileSize int64, chunkSize int, chunkCount int, uploadThreads int, downloadThreads int) bool {
|
||||||
|
|
||||||
|
filename := filepath.Join(localDirectory, "benchmark.dat")
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
os.Remove(filename)
|
||||||
|
}()
|
||||||
|
|
||||||
|
LOG_INFO("BENCHMARK_GENERATE", "Generating %s byte random data in memory", PrettySize(fileSize))
|
||||||
|
data := make([]byte, fileSize)
|
||||||
|
_, err := rand.Read(data)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("BENCHMARK_RAND", "Failed to generate random data: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
startTime := float64(time.Now().UnixNano()) / 1e9
|
||||||
|
LOG_INFO("BENCHMARK_WRITE", "Writing random data to local disk")
|
||||||
|
err = ioutil.WriteFile(filename, data, 0600)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("BENCHMARK_WRITE", "Failed to write the random data: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
runningTime := float64(time.Now().UnixNano())/1e9 - startTime
|
||||||
|
speed := int64(float64(fileSize) / runningTime)
|
||||||
|
LOG_INFO("BENCHMARK_WRITE", "Wrote %s bytes in %.2fs: %s/s", PrettySize(fileSize), runningTime, PrettySize(speed))
|
||||||
|
|
||||||
|
startTime = float64(time.Now().UnixNano()) / 1e9
|
||||||
|
LOG_INFO("BENCHMARK_READ", "Reading the random data from local disk")
|
||||||
|
file, err := os.Open(filename)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("BENCHMARK_OPEN", "Failed to open the random data file: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
segment := make([]byte, 1024*1024)
|
||||||
|
for err == nil {
|
||||||
|
_, err = file.Read(segment)
|
||||||
|
}
|
||||||
|
if err != io.EOF {
|
||||||
|
LOG_ERROR("BENCHMARK_OPEN", "Failed to read the random data file: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
file.Close()
|
||||||
|
runningTime = float64(time.Now().UnixNano())/1e9 - startTime
|
||||||
|
speed = int64(float64(fileSize) / runningTime)
|
||||||
|
LOG_INFO("BENCHMARK_READ", "Read %s bytes in %.2fs: %s/s", PrettySize(fileSize), runningTime, PrettySize(speed))
|
||||||
|
|
||||||
|
buffer := bytes.NewReader(data)
|
||||||
|
benchmarkSplit(buffer, fileSize, chunkSize, false, false, "without compression/encryption")
|
||||||
|
benchmarkSplit(buffer, fileSize, chunkSize, true, false, "with compression but without encryption")
|
||||||
|
benchmarkSplit(buffer, fileSize, chunkSize, true, true, "with compression and encryption")
|
||||||
|
|
||||||
|
storage.CreateDirectory(0, "benchmark")
|
||||||
|
existingFiles, _, err := storage.ListFiles(0, "benchmark/")
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("BENCHMARK_LIST", "Failed to list the benchmark directory: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var existingChunks []string
|
||||||
|
for _, f := range existingFiles {
|
||||||
|
if len(f) > 0 && f[len(f)-1] != '/' {
|
||||||
|
existingChunks = append(existingChunks, "benchmark/"+f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(existingChunks) > 0 {
|
||||||
|
LOG_INFO("BENCHMARK_DELETE", "Deleting %d temporary files from previous benchmark runs", len(existingChunks))
|
||||||
|
benchmarkRun(uploadThreads, len(existingChunks), func(threadIndex int, chunkIndex int) {
|
||||||
|
storage.DeleteFile(threadIndex, existingChunks[chunkIndex])
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
chunks := make([][]byte, chunkCount)
|
||||||
|
chunkHashes := make([]string, chunkCount)
|
||||||
|
LOG_INFO("BENCHMARK_GENERATE", "Generating %d chunks", chunkCount)
|
||||||
|
for i := 0; i < chunkCount; i++ {
|
||||||
|
chunks[i] = make([]byte, chunkSize)
|
||||||
|
_, err = rand.Read(chunks[i])
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("BENCHMARK_RAND", "Failed to generate random data: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
hashInBytes := sha256.Sum256(chunks[i])
|
||||||
|
chunkHashes[i] = hex.EncodeToString(hashInBytes[:])
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
startTime = float64(time.Now().UnixNano()) / 1e9
|
||||||
|
benchmarkRun(uploadThreads, chunkCount, func(threadIndex int, chunkIndex int) {
|
||||||
|
err := storage.UploadFile(threadIndex, fmt.Sprintf("benchmark/chunk%d", chunkIndex), chunks[chunkIndex])
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("BENCHMARK_UPLOAD", "Failed to upload the chunk: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
runningTime = float64(time.Now().UnixNano())/1e9 - startTime
|
||||||
|
speed = int64(float64(chunkSize*chunkCount) / runningTime)
|
||||||
|
LOG_INFO("BENCHMARK_UPLOAD", "Uploaded %s bytes in %.2fs: %s/s", PrettySize(int64(chunkSize*chunkCount)), runningTime, PrettySize(speed))
|
||||||
|
|
||||||
|
config := CreateConfig()
|
||||||
|
|
||||||
|
startTime = float64(time.Now().UnixNano()) / 1e9
|
||||||
|
hashError := false
|
||||||
|
benchmarkRun(downloadThreads, chunkCount, func(threadIndex int, chunkIndex int) {
|
||||||
|
chunk := config.GetChunk()
|
||||||
|
chunk.Reset(false)
|
||||||
|
err := storage.DownloadFile(threadIndex, fmt.Sprintf("benchmark/chunk%d", chunkIndex), chunk)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("BENCHMARK_DOWNLOAD", "Failed to download the chunk: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hashInBytes := sha256.Sum256(chunk.GetBytes())
|
||||||
|
hash := hex.EncodeToString(hashInBytes[:])
|
||||||
|
if hash != chunkHashes[chunkIndex] {
|
||||||
|
LOG_WARN("BENCHMARK_HASH", "Chunk %d has mismatched hashes: %s != %s", chunkIndex, chunkHashes[chunkIndex], hash)
|
||||||
|
hashError = true
|
||||||
|
}
|
||||||
|
|
||||||
|
config.PutChunk(chunk)
|
||||||
|
})
|
||||||
|
|
||||||
|
runningTime = float64(time.Now().UnixNano())/1e9 - startTime
|
||||||
|
speed = int64(float64(chunkSize*chunkCount) / runningTime)
|
||||||
|
LOG_INFO("BENCHMARK_DOWNLOAD", "Downloaded %s bytes in %.2fs: %s/s", PrettySize(int64(chunkSize*chunkCount)), runningTime, PrettySize(speed))
|
||||||
|
|
||||||
|
if !hashError {
|
||||||
|
benchmarkRun(uploadThreads, chunkCount, func(threadIndex int, chunkIndex int) {
|
||||||
|
storage.DeleteFile(threadIndex, fmt.Sprintf("benchmark/chunk%d", chunkIndex))
|
||||||
|
})
|
||||||
|
LOG_INFO("BENCHMARK_DELETE", "Deleted %d temporary files from the storage", chunkCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
625
src/duplicacy_chunk.go
Normal file
625
src/duplicacy_chunk.go
Normal file
@@ -0,0 +1,625 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"compress/zlib"
|
||||||
|
"crypto/aes"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/cipher"
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/bkaradzic/go-lz4"
|
||||||
|
"github.com/minio/highwayhash"
|
||||||
|
"github.com/klauspost/reedsolomon"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
|
||||||
|
// we maintain a pool of previously used buffers.
|
||||||
|
var chunkBufferPool chan *bytes.Buffer = make(chan *bytes.Buffer, runtime.NumCPU()*16)
|
||||||
|
|
||||||
|
func AllocateChunkBuffer() (buffer *bytes.Buffer) {
|
||||||
|
select {
|
||||||
|
case buffer = <-chunkBufferPool:
|
||||||
|
default:
|
||||||
|
buffer = new(bytes.Buffer)
|
||||||
|
}
|
||||||
|
return buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReleaseChunkBuffer(buffer *bytes.Buffer) {
|
||||||
|
select {
|
||||||
|
case chunkBufferPool <- buffer:
|
||||||
|
default:
|
||||||
|
LOG_INFO("CHUNK_BUFFER", "Discarding a free chunk buffer due to a full pool")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chunk is the object being passed between the chunk maker, the chunk uploader, and chunk downloader. It can be
|
||||||
|
// read and written like a bytes.Buffer, and provides convenient functions to calculate the hash and id of the chunk.
|
||||||
|
type Chunk struct {
|
||||||
|
buffer *bytes.Buffer // Where the actual data is stored. It may be nil for hash-only chunks, where chunks
|
||||||
|
// are only used to compute the hashes
|
||||||
|
|
||||||
|
size int // The size of data stored. This field is needed if buffer is nil
|
||||||
|
|
||||||
|
hasher hash.Hash // Keeps track of the hash of data stored in the buffer. It may be nil, since sometimes
|
||||||
|
// it isn't necessary to compute the hash, for instance, when the encrypted data is being
|
||||||
|
// read into the primary buffer
|
||||||
|
|
||||||
|
hash []byte // The hash of the chunk data. It is always in the binary format
|
||||||
|
id string // The id of the chunk data (used as the file name for saving the chunk); always in hex format
|
||||||
|
|
||||||
|
config *Config // Every chunk is associated with a Config object. Which hashing algorithm to use is determined
|
||||||
|
// by the config
|
||||||
|
|
||||||
|
isSnapshot bool // Indicates if the chunk is a snapshot chunk (instead of a file chunk). This is only used by RSA
|
||||||
|
// encryption, where a snapshot chunk is not encrypted by RSA
|
||||||
|
|
||||||
|
isBroken bool // Indicates the chunk did not download correctly. This is only used for -persist (allowFailures) mode
|
||||||
|
}
|
||||||
|
|
||||||
|
// Magic word to identify a duplicacy format encrypted file, plus a version number.
|
||||||
|
var ENCRYPTION_BANNER = "duplicacy\000"
|
||||||
|
|
||||||
|
// RSA encrypted chunks start with "duplicacy\002"
|
||||||
|
var ENCRYPTION_VERSION_RSA byte = 2
|
||||||
|
|
||||||
|
var ERASURE_CODING_BANNER = "duplicacy\003"
|
||||||
|
|
||||||
|
// CreateChunk creates a new chunk.
|
||||||
|
func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
|
||||||
|
|
||||||
|
var buffer *bytes.Buffer
|
||||||
|
|
||||||
|
if bufferNeeded {
|
||||||
|
buffer = AllocateChunkBuffer()
|
||||||
|
buffer.Reset()
|
||||||
|
if buffer.Cap() < config.MaximumChunkSize {
|
||||||
|
buffer.Grow(config.MaximumChunkSize - buffer.Cap())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Chunk{
|
||||||
|
buffer: buffer,
|
||||||
|
config: config,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLength returns the length of available data
|
||||||
|
func (chunk *Chunk) GetLength() int {
|
||||||
|
if chunk.buffer != nil {
|
||||||
|
return len(chunk.buffer.Bytes())
|
||||||
|
} else {
|
||||||
|
return chunk.size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBytes returns data available in this chunk
|
||||||
|
func (chunk *Chunk) GetBytes() []byte {
|
||||||
|
return chunk.buffer.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset makes the chunk reusable by clearing the existing data in the buffers. 'hashNeeded' indicates whether the
|
||||||
|
// hash of the new data to be read is needed. If the data to be read in is encrypted, there is no need to
|
||||||
|
// calculate the hash so hashNeeded should be 'false'.
|
||||||
|
func (chunk *Chunk) Reset(hashNeeded bool) {
|
||||||
|
if chunk.buffer != nil {
|
||||||
|
chunk.buffer.Reset()
|
||||||
|
}
|
||||||
|
if hashNeeded {
|
||||||
|
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
||||||
|
} else {
|
||||||
|
chunk.hasher = nil
|
||||||
|
}
|
||||||
|
chunk.hash = nil
|
||||||
|
chunk.id = ""
|
||||||
|
chunk.size = 0
|
||||||
|
chunk.isSnapshot = false
|
||||||
|
chunk.isBroken = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write implements the Writer interface.
|
||||||
|
func (chunk *Chunk) Write(p []byte) (int, error) {
|
||||||
|
|
||||||
|
// buffer may be nil, when the chunk is used for computing the hash only.
|
||||||
|
if chunk.buffer == nil {
|
||||||
|
chunk.size += len(p)
|
||||||
|
} else {
|
||||||
|
chunk.buffer.Write(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasher may be nil, when the chunk is used to stored encrypted content
|
||||||
|
if chunk.hasher != nil {
|
||||||
|
chunk.hasher.Write(p)
|
||||||
|
}
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetHash returns the chunk hash.
|
||||||
|
func (chunk *Chunk) GetHash() string {
|
||||||
|
if len(chunk.hash) == 0 {
|
||||||
|
chunk.hash = chunk.hasher.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(chunk.hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetID returns the chunk id.
|
||||||
|
func (chunk *Chunk) GetID() string {
|
||||||
|
if len(chunk.id) == 0 {
|
||||||
|
if len(chunk.hash) == 0 {
|
||||||
|
chunk.hash = chunk.hasher.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := chunk.config.NewKeyedHasher(chunk.config.IDKey)
|
||||||
|
hasher.Write([]byte(chunk.hash))
|
||||||
|
chunk.id = hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
return chunk.id
|
||||||
|
}
|
||||||
|
|
||||||
|
func (chunk *Chunk) VerifyID() {
|
||||||
|
hasher := chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
||||||
|
hasher.Write(chunk.buffer.Bytes())
|
||||||
|
hash := hasher.Sum(nil)
|
||||||
|
hasher = chunk.config.NewKeyedHasher(chunk.config.IDKey)
|
||||||
|
hasher.Write([]byte(hash))
|
||||||
|
chunkID := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
if chunkID != chunk.GetID() {
|
||||||
|
LOG_ERROR("CHUNK_ID", "The chunk id should be %s instead of %s, length: %d", chunkID, chunk.GetID(), len(chunk.buffer.Bytes()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
|
||||||
|
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
|
||||||
|
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapshot bool) (err error) {
|
||||||
|
|
||||||
|
var aesBlock cipher.Block
|
||||||
|
var gcm cipher.AEAD
|
||||||
|
var nonce []byte
|
||||||
|
var offset int
|
||||||
|
|
||||||
|
encryptedBuffer := AllocateChunkBuffer()
|
||||||
|
encryptedBuffer.Reset()
|
||||||
|
defer func() {
|
||||||
|
ReleaseChunkBuffer(encryptedBuffer)
|
||||||
|
}()
|
||||||
|
|
||||||
|
if len(encryptionKey) > 0 {
|
||||||
|
|
||||||
|
key := encryptionKey
|
||||||
|
usingRSA := false
|
||||||
|
// Enable RSA encryption only when the chunk is not a snapshot chunk
|
||||||
|
if chunk.config.rsaPublicKey != nil && !isSnapshot && !chunk.isSnapshot {
|
||||||
|
randomKey := make([]byte, 32)
|
||||||
|
_, err := rand.Read(randomKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
key = randomKey
|
||||||
|
usingRSA = true
|
||||||
|
} else if len(derivationKey) > 0 {
|
||||||
|
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
|
||||||
|
hasher.Write(encryptionKey)
|
||||||
|
key = hasher.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
aesBlock, err = aes.NewCipher(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
gcm, err = cipher.NewGCM(aesBlock)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start with the magic number and the version number.
|
||||||
|
if usingRSA {
|
||||||
|
// RSA encryption starts "duplicacy\002"
|
||||||
|
encryptedBuffer.Write([]byte(ENCRYPTION_BANNER)[:len(ENCRYPTION_BANNER) - 1])
|
||||||
|
encryptedBuffer.Write([]byte{ENCRYPTION_VERSION_RSA})
|
||||||
|
|
||||||
|
// Then the encrypted key
|
||||||
|
encryptedKey, err := rsa.EncryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPublicKey, key, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
binary.Write(encryptedBuffer, binary.LittleEndian, uint16(len(encryptedKey)))
|
||||||
|
encryptedBuffer.Write(encryptedKey)
|
||||||
|
} else {
|
||||||
|
encryptedBuffer.Write([]byte(ENCRYPTION_BANNER))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Followed by the nonce
|
||||||
|
nonce = make([]byte, gcm.NonceSize())
|
||||||
|
_, err := rand.Read(nonce)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
encryptedBuffer.Write(nonce)
|
||||||
|
offset = encryptedBuffer.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// offset is either 0 or the length of banner + nonce
|
||||||
|
|
||||||
|
if chunk.config.CompressionLevel >= -1 && chunk.config.CompressionLevel <= 9 {
|
||||||
|
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
|
||||||
|
deflater.Write(chunk.buffer.Bytes())
|
||||||
|
deflater.Close()
|
||||||
|
} else if chunk.config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
||||||
|
encryptedBuffer.Write([]byte("LZ4 "))
|
||||||
|
// Make sure we have enough space in encryptedBuffer
|
||||||
|
availableLength := encryptedBuffer.Cap() - len(encryptedBuffer.Bytes())
|
||||||
|
maximumLength := lz4.CompressBound(len(chunk.buffer.Bytes()))
|
||||||
|
if availableLength < maximumLength {
|
||||||
|
encryptedBuffer.Grow(maximumLength - availableLength)
|
||||||
|
}
|
||||||
|
written, err := lz4.Encode(encryptedBuffer.Bytes()[offset+4:], chunk.buffer.Bytes())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("LZ4 compression error: %v", err)
|
||||||
|
}
|
||||||
|
// written is actually encryptedBuffer[offset + 4:], but we need to move the write pointer
|
||||||
|
// and this seems to be the only way
|
||||||
|
encryptedBuffer.Write(written)
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("Invalid compression level: %d", chunk.config.CompressionLevel)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(encryptionKey) > 0 {
|
||||||
|
|
||||||
|
// PKCS7 is used. The sizes of compressed chunks leak information about the original chunks so we want the padding sizes
|
||||||
|
// to be the maximum allowed by PKCS7
|
||||||
|
dataLength := encryptedBuffer.Len() - offset
|
||||||
|
paddingLength := 256 - dataLength%256
|
||||||
|
|
||||||
|
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
|
||||||
|
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
|
||||||
|
|
||||||
|
// The encrypted data will be appended to the duplicacy banner and the once.
|
||||||
|
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
|
||||||
|
encryptedBuffer.Bytes()[offset:offset+dataLength+paddingLength], nil)
|
||||||
|
|
||||||
|
encryptedBuffer.Truncate(len(encryptedBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
if chunk.config.DataShards == 0 || chunk.config.ParityShards == 0 {
|
||||||
|
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start erasure coding
|
||||||
|
encoder, err := reedsolomon.New(chunk.config.DataShards, chunk.config.ParityShards)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
chunkSize := len(encryptedBuffer.Bytes())
|
||||||
|
shardSize := (chunkSize + chunk.config.DataShards - 1) / chunk.config.DataShards
|
||||||
|
// Append zeros to make the last shard to have the same size as other
|
||||||
|
encryptedBuffer.Write(make([]byte, shardSize * chunk.config.DataShards - chunkSize))
|
||||||
|
// Grow the buffer for parity shards
|
||||||
|
encryptedBuffer.Grow(shardSize * chunk.config.ParityShards)
|
||||||
|
// Now create one slice for each shard, reusing the data in the buffer
|
||||||
|
data := make([][]byte, chunk.config.DataShards + chunk.config.ParityShards)
|
||||||
|
for i := 0; i < chunk.config.DataShards + chunk.config.ParityShards; i++ {
|
||||||
|
data[i] = encryptedBuffer.Bytes()[i * shardSize: (i + 1) * shardSize]
|
||||||
|
}
|
||||||
|
// This populates the parity shard
|
||||||
|
encoder.Encode(data)
|
||||||
|
|
||||||
|
// Prepare the chunk to be uploaded
|
||||||
|
chunk.buffer.Reset()
|
||||||
|
// First the banner
|
||||||
|
chunk.buffer.Write([]byte(ERASURE_CODING_BANNER))
|
||||||
|
// Then the header which includes the chunk size, data/parity and a 2-byte checksum
|
||||||
|
header := make([]byte, 14)
|
||||||
|
binary.LittleEndian.PutUint64(header[0:], uint64(chunkSize))
|
||||||
|
binary.LittleEndian.PutUint16(header[8:], uint16(chunk.config.DataShards))
|
||||||
|
binary.LittleEndian.PutUint16(header[10:], uint16(chunk.config.ParityShards))
|
||||||
|
header[12] = header[0] ^ header[2] ^ header[4] ^ header[6] ^ header[8] ^ header[10]
|
||||||
|
header[13] = header[1] ^ header[3] ^ header[5] ^ header[7] ^ header[9] ^ header[11]
|
||||||
|
chunk.buffer.Write(header)
|
||||||
|
// Calculate the highway hash for each shard
|
||||||
|
hashKey := make([]byte, 32)
|
||||||
|
for _, part := range data {
|
||||||
|
hasher, err := highwayhash.New(hashKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = hasher.Write(part)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
chunk.buffer.Write(hasher.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the data
|
||||||
|
for _, part := range data {
|
||||||
|
chunk.buffer.Write(part)
|
||||||
|
}
|
||||||
|
// Append the header again for redundancy
|
||||||
|
chunk.buffer.Write(header)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is to ensure compatibility with Vertical Backup, which still uses HMAC-SHA256 (instead of HMAC-BLAKE2) to
|
||||||
|
// derive the key used to encrypt/decrypt files and chunks.
|
||||||
|
|
||||||
|
var DecryptWithHMACSHA256 = false
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if value, found := os.LookupEnv("DUPLICACY_DECRYPT_WITH_HMACSHA256"); found && value != "0" {
|
||||||
|
DecryptWithHMACSHA256 = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt decrypts the encrypted data stored in the chunk buffer. If derivationKey is not nil, the actual
|
||||||
|
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
|
||||||
|
func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err error) {
|
||||||
|
|
||||||
|
var offset int
|
||||||
|
|
||||||
|
encryptedBuffer := AllocateChunkBuffer()
|
||||||
|
encryptedBuffer.Reset()
|
||||||
|
defer func() {
|
||||||
|
ReleaseChunkBuffer(encryptedBuffer)
|
||||||
|
}()
|
||||||
|
|
||||||
|
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||||
|
bannerLength := len(ENCRYPTION_BANNER)
|
||||||
|
|
||||||
|
if len(encryptedBuffer.Bytes()) > bannerLength && string(encryptedBuffer.Bytes()[:bannerLength]) == ERASURE_CODING_BANNER {
|
||||||
|
|
||||||
|
// The chunk was encoded with erasure coding
|
||||||
|
if len(encryptedBuffer.Bytes()) < bannerLength + 14 {
|
||||||
|
return fmt.Errorf("Erasure coding header truncated (%d bytes)", len(encryptedBuffer.Bytes()))
|
||||||
|
}
|
||||||
|
// Check the header checksum
|
||||||
|
header := encryptedBuffer.Bytes()[bannerLength: bannerLength + 14]
|
||||||
|
if header[12] != header[0] ^ header[2] ^ header[4] ^ header[6] ^ header[8] ^ header[10] ||
|
||||||
|
header[13] != header[1] ^ header[3] ^ header[5] ^ header[7] ^ header[9] ^ header[11] {
|
||||||
|
return fmt.Errorf("Erasure coding header corrupted (%x)", header)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the parameters
|
||||||
|
chunkSize := int(binary.LittleEndian.Uint64(header[0:8]))
|
||||||
|
dataShards := int(binary.LittleEndian.Uint16(header[8:10]))
|
||||||
|
parityShards := int(binary.LittleEndian.Uint16(header[10:12]))
|
||||||
|
shardSize := (chunkSize + chunk.config.DataShards - 1) / chunk.config.DataShards
|
||||||
|
// This is the length the chunk file should have
|
||||||
|
expectedLength := bannerLength + 2 * len(header) + (dataShards + parityShards) * (shardSize + 32)
|
||||||
|
// The minimum length that can be recovered from
|
||||||
|
minimumLength := bannerLength + len(header) + (dataShards + parityShards) * 32 + dataShards * shardSize
|
||||||
|
LOG_DEBUG("CHUNK_ERASURECODE", "Chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards)
|
||||||
|
if len(encryptedBuffer.Bytes()) > expectedLength {
|
||||||
|
LOG_WARN("CHUNK_ERASURECODE", "Chunk has %d bytes (instead of %d)", len(encryptedBuffer.Bytes()), expectedLength)
|
||||||
|
} else if len(encryptedBuffer.Bytes()) == expectedLength {
|
||||||
|
// Correct size; fall through
|
||||||
|
} else if len(encryptedBuffer.Bytes()) > minimumLength {
|
||||||
|
LOG_WARN("CHUNK_ERASURECODE", "Chunk is truncated (%d out of %d bytes)", len(encryptedBuffer.Bytes()), expectedLength)
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("Not enough chunk data for recovery; chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where the hashes start
|
||||||
|
hashOffset := bannerLength + len(header)
|
||||||
|
// Where the data start
|
||||||
|
dataOffset := hashOffset + (dataShards + parityShards) * 32
|
||||||
|
|
||||||
|
data := make([][]byte, dataShards + parityShards)
|
||||||
|
recoveryNeeded := false
|
||||||
|
hashKey := make([]byte, 32)
|
||||||
|
availableShards := 0
|
||||||
|
for i := 0; i < dataShards + parityShards; i++ {
|
||||||
|
start := dataOffset + i * shardSize
|
||||||
|
if start + shardSize > len(encryptedBuffer.Bytes()) {
|
||||||
|
// the current shard is incomplete
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Now verify the hash
|
||||||
|
hasher, err := highwayhash.New(hashKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = hasher.Write(encryptedBuffer.Bytes()[start: start + shardSize])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if bytes.Compare(hasher.Sum(nil), encryptedBuffer.Bytes()[hashOffset + i * 32: hashOffset + (i + 1) * 32]) != 0 {
|
||||||
|
if i < dataShards {
|
||||||
|
recoveryNeeded = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// The shard is good
|
||||||
|
data[i] = encryptedBuffer.Bytes()[start: start + shardSize]
|
||||||
|
availableShards++
|
||||||
|
if availableShards >= dataShards {
|
||||||
|
// We have enough shards to recover; skip the remaining shards
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !recoveryNeeded {
|
||||||
|
// Remove the padding zeros from the last shard
|
||||||
|
encryptedBuffer.Truncate(dataOffset + chunkSize)
|
||||||
|
// Skip the header and hashes
|
||||||
|
encryptedBuffer.Read(encryptedBuffer.Bytes()[:dataOffset])
|
||||||
|
} else {
|
||||||
|
if availableShards < dataShards {
|
||||||
|
return fmt.Errorf("Not enough chunk data for recover; only %d out of %d shards are complete", availableShards, dataShards + parityShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show the validity of shards using a string of * and -
|
||||||
|
slots := ""
|
||||||
|
for _, part := range data {
|
||||||
|
if len(part) != 0 {
|
||||||
|
slots += "*"
|
||||||
|
} else {
|
||||||
|
slots += "-"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_WARN("CHUNK_ERASURECODE", "Recovering a %d byte chunk from %d byte shards: %s", chunkSize, shardSize, slots)
|
||||||
|
encoder, err := reedsolomon.New(dataShards, parityShards)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = encoder.Reconstruct(data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
LOG_DEBUG("CHUNK_ERASURECODE", "Chunk data successfully recovered")
|
||||||
|
buffer := AllocateChunkBuffer()
|
||||||
|
buffer.Reset()
|
||||||
|
for i := 0; i < dataShards; i++ {
|
||||||
|
buffer.Write(data[i])
|
||||||
|
}
|
||||||
|
buffer.Truncate(chunkSize)
|
||||||
|
|
||||||
|
ReleaseChunkBuffer(encryptedBuffer)
|
||||||
|
encryptedBuffer = buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(encryptionKey) > 0 {
|
||||||
|
|
||||||
|
key := encryptionKey
|
||||||
|
|
||||||
|
if len(derivationKey) > 0 {
|
||||||
|
var hasher hash.Hash
|
||||||
|
if DecryptWithHMACSHA256 {
|
||||||
|
hasher = hmac.New(sha256.New, []byte(derivationKey))
|
||||||
|
} else {
|
||||||
|
hasher = chunk.config.NewKeyedHasher([]byte(derivationKey))
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher.Write(encryptionKey)
|
||||||
|
key = hasher.Sum(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(encryptedBuffer.Bytes()) < bannerLength + 12 {
|
||||||
|
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(encryptedBuffer.Bytes()[:bannerLength-1]) != ENCRYPTION_BANNER[:bannerLength-1] {
|
||||||
|
return fmt.Errorf("The storage doesn't seem to be encrypted")
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptionVersion := encryptedBuffer.Bytes()[bannerLength-1]
|
||||||
|
if encryptionVersion != 0 && encryptionVersion != ENCRYPTION_VERSION_RSA {
|
||||||
|
return fmt.Errorf("Unsupported encryption version %d", encryptionVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
if encryptionVersion == ENCRYPTION_VERSION_RSA {
|
||||||
|
if chunk.config.rsaPrivateKey == nil {
|
||||||
|
LOG_ERROR("CHUNK_DECRYPT", "An RSA private key is required to decrypt the chunk")
|
||||||
|
return fmt.Errorf("An RSA private key is required to decrypt the chunk")
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedKeyLength := binary.LittleEndian.Uint16(encryptedBuffer.Bytes()[bannerLength:bannerLength+2])
|
||||||
|
|
||||||
|
if len(encryptedBuffer.Bytes()) < bannerLength + 14 + int(encryptedKeyLength) {
|
||||||
|
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedKey := encryptedBuffer.Bytes()[bannerLength + 2:bannerLength + 2 + int(encryptedKeyLength)]
|
||||||
|
bannerLength += 2 + int(encryptedKeyLength)
|
||||||
|
|
||||||
|
decryptedKey, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPrivateKey, encryptedKey, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
key = decryptedKey
|
||||||
|
}
|
||||||
|
|
||||||
|
aesBlock, err := aes.NewCipher(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
gcm, err := cipher.NewGCM(aesBlock)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
offset = bannerLength + gcm.NonceSize()
|
||||||
|
nonce := encryptedBuffer.Bytes()[bannerLength:offset]
|
||||||
|
|
||||||
|
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
|
||||||
|
encryptedBuffer.Bytes()[offset:], nil)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
paddingLength := int(decryptedBytes[len(decryptedBytes)-1])
|
||||||
|
if paddingLength == 0 {
|
||||||
|
paddingLength = 256
|
||||||
|
}
|
||||||
|
if len(decryptedBytes) <= paddingLength {
|
||||||
|
return fmt.Errorf("Incorrect padding length %d out of %d bytes", paddingLength, len(decryptedBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < paddingLength; i++ {
|
||||||
|
padding := decryptedBytes[len(decryptedBytes)-1-i]
|
||||||
|
if padding != byte(paddingLength) {
|
||||||
|
return fmt.Errorf("Incorrect padding of length %d: %x", paddingLength,
|
||||||
|
decryptedBytes[len(decryptedBytes)-paddingLength:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedBuffer.Truncate(len(decryptedBytes) - paddingLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedBuffer.Read(encryptedBuffer.Bytes()[:offset])
|
||||||
|
|
||||||
|
compressed := encryptedBuffer.Bytes()
|
||||||
|
if len(compressed) > 4 && string(compressed[:4]) == "LZ4 " {
|
||||||
|
chunk.buffer.Reset()
|
||||||
|
decompressed, err := lz4.Decode(chunk.buffer.Bytes(), encryptedBuffer.Bytes()[4:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk.buffer.Write(decompressed)
|
||||||
|
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
||||||
|
chunk.hasher.Write(decompressed)
|
||||||
|
chunk.hash = nil
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
inflater, err := zlib.NewReader(encryptedBuffer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer inflater.Close()
|
||||||
|
|
||||||
|
chunk.buffer.Reset()
|
||||||
|
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
||||||
|
chunk.hash = nil
|
||||||
|
|
||||||
|
if _, err = io.Copy(chunk, inflater); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
136
src/duplicacy_chunk_test.go
Normal file
136
src/duplicacy_chunk_test.go
Normal file
@@ -0,0 +1,136 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
crypto_rand "crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestErasureCoding(t *testing.T) {
|
||||||
|
key := []byte("duplicacydefault")
|
||||||
|
|
||||||
|
config := CreateConfig()
|
||||||
|
config.HashKey = key
|
||||||
|
config.IDKey = key
|
||||||
|
config.MinimumChunkSize = 100
|
||||||
|
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
||||||
|
config.DataShards = 5
|
||||||
|
config.ParityShards = 2
|
||||||
|
|
||||||
|
chunk := CreateChunk(config, true)
|
||||||
|
chunk.Reset(true)
|
||||||
|
data := make([]byte, 100)
|
||||||
|
for i := 0; i < len(data); i++ {
|
||||||
|
data[i] = byte(i)
|
||||||
|
}
|
||||||
|
chunk.Write(data)
|
||||||
|
err := chunk.Encrypt([]byte(""), "", false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to encrypt the test data: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedData := make([]byte, chunk.GetLength())
|
||||||
|
copy(encryptedData, chunk.GetBytes())
|
||||||
|
|
||||||
|
crypto_rand.Read(encryptedData[280:300])
|
||||||
|
|
||||||
|
chunk.Reset(false)
|
||||||
|
chunk.Write(encryptedData)
|
||||||
|
err = chunk.Decrypt([]byte(""), "")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to decrypt the data: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChunkBasic(t *testing.T) {
|
||||||
|
|
||||||
|
key := []byte("duplicacydefault")
|
||||||
|
|
||||||
|
config := CreateConfig()
|
||||||
|
config.HashKey = key
|
||||||
|
config.IDKey = key
|
||||||
|
config.MinimumChunkSize = 100
|
||||||
|
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
||||||
|
maxSize := 1000000
|
||||||
|
|
||||||
|
if testRSAEncryption {
|
||||||
|
privateKey, err := rsa.GenerateKey(crypto_rand.Reader, 2048)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to generate a random private key: %v", err)
|
||||||
|
}
|
||||||
|
config.rsaPrivateKey = privateKey
|
||||||
|
config.rsaPublicKey = privateKey.Public().(*rsa.PublicKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
if testErasureCoding {
|
||||||
|
config.DataShards = 5
|
||||||
|
config.ParityShards = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 500; i++ {
|
||||||
|
|
||||||
|
size := rand.Int() % maxSize
|
||||||
|
|
||||||
|
plainData := make([]byte, size)
|
||||||
|
crypto_rand.Read(plainData)
|
||||||
|
chunk := CreateChunk(config, true)
|
||||||
|
chunk.Reset(true)
|
||||||
|
chunk.Write(plainData)
|
||||||
|
|
||||||
|
hash := chunk.GetHash()
|
||||||
|
id := chunk.GetID()
|
||||||
|
|
||||||
|
err := chunk.Encrypt(key, "", false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to encrypt the data: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedData := make([]byte, chunk.GetLength())
|
||||||
|
copy(encryptedData, chunk.GetBytes())
|
||||||
|
|
||||||
|
if testErasureCoding {
|
||||||
|
offset := 24 + 32 * 7
|
||||||
|
start := rand.Int() % (len(encryptedData) - offset) + offset
|
||||||
|
length := (len(encryptedData) - offset) / 7
|
||||||
|
if start + length > len(encryptedData) {
|
||||||
|
length = len(encryptedData) - start
|
||||||
|
}
|
||||||
|
crypto_rand.Read(encryptedData[start: start+length])
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk.Reset(false)
|
||||||
|
chunk.Write(encryptedData)
|
||||||
|
err = chunk.Decrypt(key, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to decrypt the data: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
decryptedData := chunk.GetBytes()
|
||||||
|
|
||||||
|
if hash != chunk.GetHash() {
|
||||||
|
t.Errorf("Original hash: %x, decrypted hash: %x", hash, chunk.GetHash())
|
||||||
|
}
|
||||||
|
|
||||||
|
if id != chunk.GetID() {
|
||||||
|
t.Errorf("Original id: %s, decrypted hash: %s", id, chunk.GetID())
|
||||||
|
}
|
||||||
|
|
||||||
|
if bytes.Compare(plainData, decryptedData) != 0 {
|
||||||
|
t.Logf("Original length: %d, decrypted length: %d", len(plainData), len(decryptedData))
|
||||||
|
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
518
src/duplicacy_chunkdownloader.go
Normal file
518
src/duplicacy_chunkdownloader.go
Normal file
@@ -0,0 +1,518 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChunkDownloadTask encapsulates information need to download a chunk.
|
||||||
|
type ChunkDownloadTask struct {
|
||||||
|
chunk *Chunk // The chunk that will be downloaded; initially nil
|
||||||
|
chunkIndex int // The index of this chunk in the chunk list
|
||||||
|
chunkHash string // The chunk hash
|
||||||
|
chunkLength int // The length of the chunk; may be zero
|
||||||
|
needed bool // Whether this chunk can be skipped if a local copy exists
|
||||||
|
isDownloading bool // 'true' means the chunk has been downloaded or is being downloaded
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkDownloadCompletion represents the nofication when a chunk has been downloaded.
|
||||||
|
type ChunkDownloadCompletion struct {
|
||||||
|
chunkIndex int // The index of this chunk in the chunk list
|
||||||
|
chunk *Chunk // The chunk that has been downloaded
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkDownloader is capable of performing multi-threaded downloading. Chunks to be downloaded are first organized
|
||||||
|
// as a list of ChunkDownloadTasks, with only the chunkHash field initialized. When a chunk is needed, the
|
||||||
|
// corresponding ChunkDownloadTask is sent to the dowloading goroutine. Once a chunk is downloaded, it will be
|
||||||
|
// inserted in the completed task list.
|
||||||
|
type ChunkDownloader struct {
|
||||||
|
totalChunkSize int64 // Total chunk size
|
||||||
|
downloadedChunkSize int64 // Downloaded chunk size
|
||||||
|
|
||||||
|
config *Config // Associated config
|
||||||
|
storage Storage // Download from this storage
|
||||||
|
snapshotCache *FileStorage // Used as cache if not nil; usually for downloading snapshot chunks
|
||||||
|
showStatistics bool // Show a stats log for each chunk if true
|
||||||
|
threads int // Number of threads
|
||||||
|
allowFailures bool // Whether to failfast on download error, or continue
|
||||||
|
|
||||||
|
taskList []ChunkDownloadTask // The list of chunks to be downloaded
|
||||||
|
completedTasks map[int]bool // Store downloaded chunks
|
||||||
|
lastChunkIndex int // a monotonically increasing number indicating the last chunk to be downloaded
|
||||||
|
|
||||||
|
taskQueue chan ChunkDownloadTask // Downloading goroutines are waiting on this channel for input
|
||||||
|
stopChannel chan bool // Used to stop the dowloading goroutines
|
||||||
|
completionChannel chan ChunkDownloadCompletion // A downloading goroutine sends back the chunk via this channel after downloading
|
||||||
|
|
||||||
|
startTime int64 // The time it starts downloading
|
||||||
|
numberOfDownloadedChunks int // The number of chunks that have been downloaded
|
||||||
|
numberOfDownloadingChunks int // The number of chunks still being downloaded
|
||||||
|
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
|
||||||
|
|
||||||
|
NumberOfFailedChunks int // The number of chunks that can't be downloaded
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int, allowFailures bool) *ChunkDownloader {
|
||||||
|
downloader := &ChunkDownloader{
|
||||||
|
config: config,
|
||||||
|
storage: storage,
|
||||||
|
snapshotCache: snapshotCache,
|
||||||
|
showStatistics: showStatistics,
|
||||||
|
threads: threads,
|
||||||
|
allowFailures: allowFailures,
|
||||||
|
|
||||||
|
taskList: nil,
|
||||||
|
completedTasks: make(map[int]bool),
|
||||||
|
lastChunkIndex: 0,
|
||||||
|
|
||||||
|
taskQueue: make(chan ChunkDownloadTask, threads),
|
||||||
|
stopChannel: make(chan bool),
|
||||||
|
completionChannel: make(chan ChunkDownloadCompletion),
|
||||||
|
|
||||||
|
startTime: time.Now().Unix(),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the downloading goroutines
|
||||||
|
for i := 0; i < downloader.threads; i++ {
|
||||||
|
go func(threadIndex int) {
|
||||||
|
defer CatchLogException()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case task := <-downloader.taskQueue:
|
||||||
|
downloader.Download(threadIndex, task)
|
||||||
|
case <-downloader.stopChannel:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
return downloader
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddFiles adds chunks needed by the specified files to the download list.
|
||||||
|
func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files []*Entry) {
|
||||||
|
|
||||||
|
downloader.taskList = nil
|
||||||
|
lastChunkIndex := -1
|
||||||
|
maximumChunks := 0
|
||||||
|
downloader.totalChunkSize = 0
|
||||||
|
for _, file := range files {
|
||||||
|
if file.Size == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for i := file.StartChunk; i <= file.EndChunk; i++ {
|
||||||
|
if lastChunkIndex != i {
|
||||||
|
task := ChunkDownloadTask{
|
||||||
|
chunkIndex: len(downloader.taskList),
|
||||||
|
chunkHash: snapshot.ChunkHashes[i],
|
||||||
|
chunkLength: snapshot.ChunkLengths[i],
|
||||||
|
needed: false,
|
||||||
|
}
|
||||||
|
downloader.taskList = append(downloader.taskList, task)
|
||||||
|
downloader.totalChunkSize += int64(snapshot.ChunkLengths[i])
|
||||||
|
} else {
|
||||||
|
downloader.taskList[len(downloader.taskList)-1].needed = true
|
||||||
|
}
|
||||||
|
lastChunkIndex = i
|
||||||
|
}
|
||||||
|
file.StartChunk = len(downloader.taskList) - (file.EndChunk - file.StartChunk) - 1
|
||||||
|
file.EndChunk = len(downloader.taskList) - 1
|
||||||
|
if file.EndChunk-file.StartChunk > maximumChunks {
|
||||||
|
maximumChunks = file.EndChunk - file.StartChunk
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddChunk adds a single chunk the download list.
|
||||||
|
func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
|
||||||
|
|
||||||
|
task := ChunkDownloadTask{
|
||||||
|
chunkIndex: len(downloader.taskList),
|
||||||
|
chunkHash: chunkHash,
|
||||||
|
chunkLength: 0,
|
||||||
|
needed: true,
|
||||||
|
isDownloading: false,
|
||||||
|
}
|
||||||
|
downloader.taskList = append(downloader.taskList, task)
|
||||||
|
if downloader.numberOfActiveChunks < downloader.threads {
|
||||||
|
downloader.taskQueue <- task
|
||||||
|
downloader.numberOfDownloadingChunks++
|
||||||
|
downloader.numberOfActiveChunks++
|
||||||
|
downloader.taskList[len(downloader.taskList)-1].isDownloading = true
|
||||||
|
}
|
||||||
|
return len(downloader.taskList) - 1
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prefetch adds up to 'threads' chunks needed by a file to the download list
|
||||||
|
func (downloader *ChunkDownloader) Prefetch(file *Entry) {
|
||||||
|
|
||||||
|
// Any chunks before the first chunk of this filea are not needed any more, so they can be reclaimed.
|
||||||
|
downloader.Reclaim(file.StartChunk)
|
||||||
|
|
||||||
|
for i := file.StartChunk; i <= file.EndChunk; i++ {
|
||||||
|
task := &downloader.taskList[i]
|
||||||
|
if task.needed {
|
||||||
|
if !task.isDownloading {
|
||||||
|
if downloader.numberOfActiveChunks >= downloader.threads {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching %s chunk %s", file.Path,
|
||||||
|
downloader.config.GetChunkIDFromHash(task.chunkHash))
|
||||||
|
downloader.taskQueue <- *task
|
||||||
|
task.isDownloading = true
|
||||||
|
downloader.numberOfDownloadingChunks++
|
||||||
|
downloader.numberOfActiveChunks++
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOG_DEBUG("DOWNLOAD_PREFETCH", "%s chunk %s is not needed", file.Path,
|
||||||
|
downloader.config.GetChunkIDFromHash(task.chunkHash))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reclaim releases the downloaded chunk to the chunk pool
|
||||||
|
func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
|
||||||
|
|
||||||
|
if downloader.lastChunkIndex >= chunkIndex {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range downloader.completedTasks {
|
||||||
|
if i < chunkIndex && downloader.taskList[i].chunk != nil {
|
||||||
|
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
||||||
|
downloader.taskList[i].chunk = nil
|
||||||
|
delete(downloader.completedTasks, i)
|
||||||
|
downloader.numberOfActiveChunks--
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := downloader.lastChunkIndex; i < chunkIndex; i++ {
|
||||||
|
// These chunks are never downloaded if 'isDownloading' is false; note that 'isDownloading' isn't reset to
|
||||||
|
// false after a chunk has been downloaded
|
||||||
|
if !downloader.taskList[i].isDownloading {
|
||||||
|
atomic.AddInt64(&downloader.totalChunkSize, -int64(downloader.taskList[i].chunkLength))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
downloader.lastChunkIndex = chunkIndex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the chunk last downloaded and its hash
|
||||||
|
func (downloader *ChunkDownloader) GetLastDownloadedChunk() (chunk *Chunk, chunkHash string) {
|
||||||
|
if downloader.lastChunkIndex >= len(downloader.taskList) {
|
||||||
|
return nil, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
task := downloader.taskList[downloader.lastChunkIndex]
|
||||||
|
return task.chunk, task.chunkHash
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForChunk waits until the specified chunk is ready
|
||||||
|
func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
|
||||||
|
|
||||||
|
// Reclaim any chunk not needed
|
||||||
|
downloader.Reclaim(chunkIndex)
|
||||||
|
|
||||||
|
// If we haven't started download the specified chunk, download it now
|
||||||
|
if !downloader.taskList[chunkIndex].isDownloading {
|
||||||
|
LOG_DEBUG("DOWNLOAD_FETCH", "Fetching chunk %s",
|
||||||
|
downloader.config.GetChunkIDFromHash(downloader.taskList[chunkIndex].chunkHash))
|
||||||
|
downloader.taskQueue <- downloader.taskList[chunkIndex]
|
||||||
|
downloader.taskList[chunkIndex].isDownloading = true
|
||||||
|
downloader.numberOfDownloadingChunks++
|
||||||
|
downloader.numberOfActiveChunks++
|
||||||
|
}
|
||||||
|
|
||||||
|
// We also need to look ahead and prefetch other chunks as many as permitted by the number of threads
|
||||||
|
for i := chunkIndex + 1; i < len(downloader.taskList); i++ {
|
||||||
|
if downloader.numberOfActiveChunks >= downloader.threads {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
task := &downloader.taskList[i]
|
||||||
|
if !task.needed {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if !task.isDownloading {
|
||||||
|
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching chunk %s", downloader.config.GetChunkIDFromHash(task.chunkHash))
|
||||||
|
downloader.taskQueue <- *task
|
||||||
|
task.isDownloading = true
|
||||||
|
downloader.numberOfDownloadingChunks++
|
||||||
|
downloader.numberOfActiveChunks++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now wait until the chunk to be downloaded appears in the completed tasks
|
||||||
|
for _, found := downloader.completedTasks[chunkIndex]; !found; _, found = downloader.completedTasks[chunkIndex] {
|
||||||
|
completion := <-downloader.completionChannel
|
||||||
|
downloader.completedTasks[completion.chunkIndex] = true
|
||||||
|
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
||||||
|
downloader.numberOfDownloadedChunks++
|
||||||
|
downloader.numberOfDownloadingChunks--
|
||||||
|
if completion.chunk.isBroken {
|
||||||
|
downloader.NumberOfFailedChunks++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return downloader.taskList[chunkIndex].chunk
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForCompletion waits until all chunks have been downloaded
|
||||||
|
func (downloader *ChunkDownloader) WaitForCompletion() {
|
||||||
|
|
||||||
|
// Tasks in completedTasks have not been counted by numberOfActiveChunks
|
||||||
|
downloader.numberOfActiveChunks -= len(downloader.completedTasks)
|
||||||
|
|
||||||
|
// find the completed task with the largest index; we'll start from the next index
|
||||||
|
for index := range downloader.completedTasks {
|
||||||
|
if downloader.lastChunkIndex < index {
|
||||||
|
downloader.lastChunkIndex = index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Looping until there isn't a download task in progress
|
||||||
|
for downloader.numberOfActiveChunks > 0 || downloader.lastChunkIndex + 1 < len(downloader.taskList) {
|
||||||
|
|
||||||
|
// Wait for a completion event first
|
||||||
|
if downloader.numberOfActiveChunks > 0 {
|
||||||
|
completion := <-downloader.completionChannel
|
||||||
|
downloader.config.PutChunk(completion.chunk)
|
||||||
|
downloader.numberOfActiveChunks--
|
||||||
|
downloader.numberOfDownloadedChunks++
|
||||||
|
downloader.numberOfDownloadingChunks--
|
||||||
|
if completion.chunk.isBroken {
|
||||||
|
downloader.NumberOfFailedChunks++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pass the tasks one by one to the download queue
|
||||||
|
if downloader.lastChunkIndex + 1 < len(downloader.taskList) {
|
||||||
|
task := &downloader.taskList[downloader.lastChunkIndex + 1]
|
||||||
|
if task.isDownloading {
|
||||||
|
downloader.lastChunkIndex++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
downloader.taskQueue <- *task
|
||||||
|
task.isDownloading = true
|
||||||
|
downloader.numberOfDownloadingChunks++
|
||||||
|
downloader.numberOfActiveChunks++
|
||||||
|
downloader.lastChunkIndex++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop terminates all downloading goroutines
|
||||||
|
func (downloader *ChunkDownloader) Stop() {
|
||||||
|
for downloader.numberOfDownloadingChunks > 0 {
|
||||||
|
completion := <-downloader.completionChannel
|
||||||
|
downloader.completedTasks[completion.chunkIndex] = true
|
||||||
|
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
||||||
|
downloader.numberOfDownloadedChunks++
|
||||||
|
downloader.numberOfDownloadingChunks--
|
||||||
|
if completion.chunk.isBroken {
|
||||||
|
downloader.NumberOfFailedChunks++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range downloader.completedTasks {
|
||||||
|
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
||||||
|
downloader.taskList[i].chunk = nil
|
||||||
|
downloader.numberOfActiveChunks--
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < downloader.threads; i++ {
|
||||||
|
downloader.stopChannel <- true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Download downloads a chunk from the storage.
|
||||||
|
func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadTask) bool {
|
||||||
|
|
||||||
|
cachedPath := ""
|
||||||
|
chunk := downloader.config.GetChunk()
|
||||||
|
chunkID := downloader.config.GetChunkIDFromHash(task.chunkHash)
|
||||||
|
|
||||||
|
if downloader.snapshotCache != nil && downloader.storage.IsCacheNeeded() {
|
||||||
|
|
||||||
|
var exist bool
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// Reset the chunk with a hasher -- we're reading from the cache where chunk are not encrypted or compressed
|
||||||
|
chunk.Reset(true)
|
||||||
|
|
||||||
|
cachedPath, exist, _, err = downloader.snapshotCache.FindChunk(threadIndex, chunkID, false)
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("DOWNLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
|
||||||
|
} else if exist {
|
||||||
|
err = downloader.snapshotCache.DownloadFile(0, cachedPath, chunk)
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("DOWNLOAD_CACHE", "Failed to load the chunk %s from the snapshot cache: %v", chunkID, err)
|
||||||
|
} else {
|
||||||
|
actualChunkID := chunk.GetID()
|
||||||
|
if actualChunkID != chunkID {
|
||||||
|
LOG_WARN("DOWNLOAD_CACHE_CORRUPTED",
|
||||||
|
"The chunk %s load from the snapshot cache has a hash id of %s", chunkID, actualChunkID)
|
||||||
|
} else {
|
||||||
|
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been loaded from the snapshot cache", chunkID)
|
||||||
|
|
||||||
|
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset the chunk without a hasher -- the downloaded content will be encrypted and/or compressed and the hasher
|
||||||
|
// will be set up before the encryption
|
||||||
|
chunk.Reset(false)
|
||||||
|
|
||||||
|
// If failures are allowed, complete the task properly
|
||||||
|
completeFailedChunk := func(chunk *Chunk) {
|
||||||
|
if downloader.allowFailures {
|
||||||
|
chunk.isBroken = true
|
||||||
|
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const MaxDownloadAttempts = 3
|
||||||
|
for downloadAttempt := 0; ; downloadAttempt++ {
|
||||||
|
|
||||||
|
// Find the chunk by ID first.
|
||||||
|
chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false)
|
||||||
|
if err != nil {
|
||||||
|
completeFailedChunk(chunk)
|
||||||
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exist {
|
||||||
|
// No chunk is found. Have to find it in the fossil pool again.
|
||||||
|
fossilPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, true)
|
||||||
|
if err != nil {
|
||||||
|
completeFailedChunk(chunk)
|
||||||
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exist {
|
||||||
|
|
||||||
|
retry := false
|
||||||
|
|
||||||
|
// Retry for Hubic or WebDAV as it may return 404 even when the chunk exists
|
||||||
|
if _, ok := downloader.storage.(*HubicStorage); ok {
|
||||||
|
retry = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := downloader.storage.(*WebDAVStorage); ok {
|
||||||
|
retry = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if retry && downloadAttempt < MaxDownloadAttempts {
|
||||||
|
LOG_WARN("DOWNLOAD_RETRY", "Failed to find the chunk %s; retrying", chunkID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
completeFailedChunk(chunk)
|
||||||
|
// A chunk is not found. This is a serious error and hopefully it will never happen.
|
||||||
|
if err != nil {
|
||||||
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
|
||||||
|
} else {
|
||||||
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// We can't download the fossil directly. We have to turn it back into a regular chunk and try
|
||||||
|
// downloading again.
|
||||||
|
err = downloader.storage.MoveFile(threadIndex, fossilPath, chunkPath)
|
||||||
|
if err != nil {
|
||||||
|
completeFailedChunk(chunk)
|
||||||
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to resurrect chunk %s: %v", chunkID, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_WARN("DOWNLOAD_RESURRECT", "Fossil %s has been resurrected", chunkID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err = downloader.storage.DownloadFile(threadIndex, chunkPath, chunk)
|
||||||
|
if err != nil {
|
||||||
|
_, isHubic := downloader.storage.(*HubicStorage)
|
||||||
|
// Retry on EOF or if it is a Hubic backend as it may return 404 even when the chunk exists
|
||||||
|
if (err == io.ErrUnexpectedEOF || isHubic) && downloadAttempt < MaxDownloadAttempts {
|
||||||
|
LOG_WARN("DOWNLOAD_RETRY", "Failed to download the chunk %s: %v; retrying", chunkID, err)
|
||||||
|
chunk.Reset(false)
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
completeFailedChunk(chunk)
|
||||||
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to download the chunk %s: %v", chunkID, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash)
|
||||||
|
if err != nil {
|
||||||
|
if downloadAttempt < MaxDownloadAttempts {
|
||||||
|
LOG_WARN("DOWNLOAD_RETRY", "Failed to decrypt the chunk %s: %v; retrying", chunkID, err)
|
||||||
|
chunk.Reset(false)
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
completeFailedChunk(chunk)
|
||||||
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_DECRYPT", "Failed to decrypt the chunk %s: %v", chunkID, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
actualChunkID := chunk.GetID()
|
||||||
|
if actualChunkID != chunkID {
|
||||||
|
if downloadAttempt < MaxDownloadAttempts {
|
||||||
|
LOG_WARN("DOWNLOAD_RETRY", "The chunk %s has a hash id of %s; retrying", chunkID, actualChunkID)
|
||||||
|
chunk.Reset(false)
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
completeFailedChunk(chunk)
|
||||||
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(cachedPath) > 0 {
|
||||||
|
// Save a copy to the local snapshot cache
|
||||||
|
err := downloader.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes())
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("DOWNLOAD_CACHE", "Failed to add the chunk %s to the snapshot cache: %v", chunkID, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
downloadedChunkSize := atomic.AddInt64(&downloader.downloadedChunkSize, int64(chunk.GetLength()))
|
||||||
|
|
||||||
|
if (downloader.showStatistics || IsTracing()) && downloader.totalChunkSize > 0 {
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
if now <= downloader.startTime {
|
||||||
|
now = downloader.startTime + 1
|
||||||
|
}
|
||||||
|
speed := downloadedChunkSize / (now - downloader.startTime)
|
||||||
|
remainingTime := int64(0)
|
||||||
|
if speed > 0 {
|
||||||
|
remainingTime = (downloader.totalChunkSize-downloadedChunkSize)/speed + 1
|
||||||
|
}
|
||||||
|
percentage := float32(downloadedChunkSize * 1000 / downloader.totalChunkSize)
|
||||||
|
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
|
||||||
|
task.chunkIndex+1, chunk.GetLength(),
|
||||||
|
PrettySize(speed), PrettyTime(remainingTime), percentage/10)
|
||||||
|
} else {
|
||||||
|
LOG_DEBUG("CHUNK_DOWNLOAD", "Chunk %s has been downloaded", chunkID)
|
||||||
|
}
|
||||||
|
|
||||||
|
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
|
||||||
|
return true
|
||||||
|
}
|
||||||
297
src/duplicacy_chunkmaker.go
Normal file
297
src/duplicacy_chunkmaker.go
Normal file
@@ -0,0 +1,297 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChunkMaker breaks data into chunks using buzhash. To save memory, the chunk maker only use a circular buffer
|
||||||
|
// whose size is double the minimum chunk size.
|
||||||
|
type ChunkMaker struct {
|
||||||
|
maximumChunkSize int
|
||||||
|
minimumChunkSize int
|
||||||
|
bufferCapacity int
|
||||||
|
|
||||||
|
hashMask uint64
|
||||||
|
randomTable [256]uint64
|
||||||
|
|
||||||
|
buffer []byte
|
||||||
|
bufferSize int
|
||||||
|
bufferStart int
|
||||||
|
|
||||||
|
config *Config
|
||||||
|
|
||||||
|
hashOnly bool
|
||||||
|
hashOnlyChunk *Chunk
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateChunkMaker creates a chunk maker. 'randomSeed' is used to generate the character-to-integer table needed by
|
||||||
|
// buzhash.
|
||||||
|
func CreateChunkMaker(config *Config, hashOnly bool) *ChunkMaker {
|
||||||
|
size := 1
|
||||||
|
for size*2 <= config.AverageChunkSize {
|
||||||
|
size *= 2
|
||||||
|
}
|
||||||
|
|
||||||
|
if size != config.AverageChunkSize {
|
||||||
|
LOG_FATAL("CHUNK_SIZE", "Invalid average chunk size: %d is not a power of 2", config.AverageChunkSize)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
maker := &ChunkMaker{
|
||||||
|
hashMask: uint64(config.AverageChunkSize - 1),
|
||||||
|
maximumChunkSize: config.MaximumChunkSize,
|
||||||
|
minimumChunkSize: config.MinimumChunkSize,
|
||||||
|
bufferCapacity: 2 * config.MinimumChunkSize,
|
||||||
|
config: config,
|
||||||
|
hashOnly: hashOnly,
|
||||||
|
}
|
||||||
|
|
||||||
|
if hashOnly {
|
||||||
|
maker.hashOnlyChunk = CreateChunk(config, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
randomData := sha256.Sum256(config.ChunkSeed)
|
||||||
|
|
||||||
|
for i := 0; i < 64; i++ {
|
||||||
|
for j := 0; j < 4; j++ {
|
||||||
|
maker.randomTable[4*i+j] = binary.LittleEndian.Uint64(randomData[8*j : 8*j+8])
|
||||||
|
}
|
||||||
|
randomData = sha256.Sum256(randomData[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
maker.buffer = make([]byte, 2*config.MinimumChunkSize)
|
||||||
|
|
||||||
|
return maker
|
||||||
|
}
|
||||||
|
|
||||||
|
func rotateLeft(value uint64, bits uint) uint64 {
|
||||||
|
return (value << (bits & 0x3f)) | (value >> (64 - (bits & 0x3f)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func rotateLeftByOne(value uint64) uint64 {
|
||||||
|
return (value << 1) | (value >> 63)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (maker *ChunkMaker) buzhashSum(sum uint64, data []byte) uint64 {
|
||||||
|
for i := 0; i < len(data); i++ {
|
||||||
|
sum = rotateLeftByOne(sum) ^ maker.randomTable[data[i]]
|
||||||
|
}
|
||||||
|
return sum
|
||||||
|
}
|
||||||
|
|
||||||
|
func (maker *ChunkMaker) buzhashUpdate(sum uint64, out byte, in byte, length int) uint64 {
|
||||||
|
return rotateLeftByOne(sum) ^ rotateLeft(maker.randomTable[out], uint(length)) ^ maker.randomTable[in]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForEachChunk reads data from 'reader'. If EOF is encountered, it will call 'nextReader' to ask for next file. If
|
||||||
|
// 'nextReader' returns false, it will process remaining data in the buffer and then quit. When a chunk is identified,
|
||||||
|
// it will call 'endOfChunk' to return the chunk size and a boolean flag indicating if it is the last chunk.
|
||||||
|
func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *Chunk, final bool),
|
||||||
|
nextReader func(size int64, hash string) (io.Reader, bool)) {
|
||||||
|
|
||||||
|
maker.bufferStart = 0
|
||||||
|
maker.bufferSize = 0
|
||||||
|
|
||||||
|
var minimumReached bool
|
||||||
|
var hashSum uint64
|
||||||
|
var chunk *Chunk
|
||||||
|
|
||||||
|
fileSize := int64(0)
|
||||||
|
fileHasher := maker.config.NewFileHasher()
|
||||||
|
|
||||||
|
// Start a new chunk.
|
||||||
|
startNewChunk := func() {
|
||||||
|
hashSum = 0
|
||||||
|
minimumReached = false
|
||||||
|
if maker.hashOnly {
|
||||||
|
chunk = maker.hashOnlyChunk
|
||||||
|
chunk.Reset(true)
|
||||||
|
} else {
|
||||||
|
chunk = maker.config.GetChunk()
|
||||||
|
chunk.Reset(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move data from the buffer to the chunk.
|
||||||
|
fill := func(count int) {
|
||||||
|
if maker.bufferStart+count < maker.bufferCapacity {
|
||||||
|
chunk.Write(maker.buffer[maker.bufferStart : maker.bufferStart+count])
|
||||||
|
maker.bufferStart += count
|
||||||
|
maker.bufferSize -= count
|
||||||
|
} else {
|
||||||
|
chunk.Write(maker.buffer[maker.bufferStart:])
|
||||||
|
chunk.Write(maker.buffer[:count-(maker.bufferCapacity-maker.bufferStart)])
|
||||||
|
maker.bufferStart = count - (maker.bufferCapacity - maker.bufferStart)
|
||||||
|
maker.bufferSize -= count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
startNewChunk()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
isEOF := false
|
||||||
|
|
||||||
|
if maker.minimumChunkSize == maker.maximumChunkSize {
|
||||||
|
|
||||||
|
if maker.bufferCapacity < maker.minimumChunkSize {
|
||||||
|
maker.buffer = make([]byte, maker.minimumChunkSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
maker.bufferStart = 0
|
||||||
|
for maker.bufferStart < maker.minimumChunkSize && !isEOF {
|
||||||
|
count, err := reader.Read(maker.buffer[maker.bufferStart:maker.minimumChunkSize])
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF {
|
||||||
|
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
isEOF = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
maker.bufferStart += count
|
||||||
|
}
|
||||||
|
|
||||||
|
fileHasher.Write(maker.buffer[:maker.bufferStart])
|
||||||
|
fileSize += int64(maker.bufferStart)
|
||||||
|
chunk.Write(maker.buffer[:maker.bufferStart])
|
||||||
|
|
||||||
|
if isEOF {
|
||||||
|
var ok bool
|
||||||
|
reader, ok = nextReader(fileSize, hex.EncodeToString(fileHasher.Sum(nil)))
|
||||||
|
if !ok {
|
||||||
|
endOfChunk(chunk, true)
|
||||||
|
return
|
||||||
|
} else {
|
||||||
|
endOfChunk(chunk, false)
|
||||||
|
startNewChunk()
|
||||||
|
fileSize = 0
|
||||||
|
fileHasher = maker.config.NewFileHasher()
|
||||||
|
isEOF = false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
endOfChunk(chunk, false)
|
||||||
|
startNewChunk()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
|
||||||
|
// If the buffer still has some space left and EOF is not seen, read more data.
|
||||||
|
for maker.bufferSize < maker.bufferCapacity && !isEOF {
|
||||||
|
start := maker.bufferStart + maker.bufferSize
|
||||||
|
count := maker.bufferCapacity - start
|
||||||
|
if start >= maker.bufferCapacity {
|
||||||
|
start -= maker.bufferCapacity
|
||||||
|
count = maker.bufferStart - start
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err = reader.Read(maker.buffer[start : start+count])
|
||||||
|
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
maker.bufferSize += count
|
||||||
|
fileHasher.Write(maker.buffer[start : start+count])
|
||||||
|
fileSize += int64(count)
|
||||||
|
|
||||||
|
// if EOF is seen, try to switch to next file and continue
|
||||||
|
if err == io.EOF {
|
||||||
|
var ok bool
|
||||||
|
reader, ok = nextReader(fileSize, hex.EncodeToString(fileHasher.Sum(nil)))
|
||||||
|
if !ok {
|
||||||
|
isEOF = true
|
||||||
|
} else {
|
||||||
|
fileSize = 0
|
||||||
|
fileHasher = maker.config.NewFileHasher()
|
||||||
|
isEOF = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// No eough data to meet the minimum chunk size requirement, so just return as a chunk.
|
||||||
|
if maker.bufferSize < maker.minimumChunkSize {
|
||||||
|
fill(maker.bufferSize)
|
||||||
|
endOfChunk(chunk, true)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Minimum chunk size has been reached. Calculate the buzhash for the minimum size chunk.
|
||||||
|
if !minimumReached {
|
||||||
|
|
||||||
|
bytes := maker.minimumChunkSize
|
||||||
|
|
||||||
|
if maker.bufferStart+bytes < maker.bufferCapacity {
|
||||||
|
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:maker.bufferStart+bytes])
|
||||||
|
} else {
|
||||||
|
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:])
|
||||||
|
hashSum = maker.buzhashSum(hashSum,
|
||||||
|
maker.buffer[:bytes-(maker.bufferCapacity-maker.bufferStart)])
|
||||||
|
}
|
||||||
|
|
||||||
|
if (hashSum & maker.hashMask) == 0 {
|
||||||
|
// This is a minimum size chunk
|
||||||
|
fill(bytes)
|
||||||
|
endOfChunk(chunk, false)
|
||||||
|
startNewChunk()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
minimumReached = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now check the buzhash of the data in the buffer, shifting one byte at a time.
|
||||||
|
bytes := maker.bufferSize - maker.minimumChunkSize
|
||||||
|
isEOC := false
|
||||||
|
maxSize := maker.maximumChunkSize - chunk.GetLength()
|
||||||
|
for i := 0; i < maker.bufferSize-maker.minimumChunkSize; i++ {
|
||||||
|
out := maker.bufferStart + i
|
||||||
|
if out >= maker.bufferCapacity {
|
||||||
|
out -= maker.bufferCapacity
|
||||||
|
}
|
||||||
|
in := maker.bufferStart + i + maker.minimumChunkSize
|
||||||
|
if in >= maker.bufferCapacity {
|
||||||
|
in -= maker.bufferCapacity
|
||||||
|
}
|
||||||
|
|
||||||
|
hashSum = maker.buzhashUpdate(hashSum, maker.buffer[out], maker.buffer[in], maker.minimumChunkSize)
|
||||||
|
if (hashSum&maker.hashMask) == 0 || i == maxSize-maker.minimumChunkSize-1 {
|
||||||
|
// A chunk is completed.
|
||||||
|
bytes = i + 1 + maker.minimumChunkSize
|
||||||
|
isEOC = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fill(bytes)
|
||||||
|
|
||||||
|
if isEOC {
|
||||||
|
if isEOF && maker.bufferSize == 0 {
|
||||||
|
endOfChunk(chunk, true)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
endOfChunk(chunk, false)
|
||||||
|
startNewChunk()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if isEOF {
|
||||||
|
fill(maker.bufferSize)
|
||||||
|
endOfChunk(chunk, true)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
131
src/duplicacy_chunkmaker_test.go
Normal file
131
src/duplicacy_chunkmaker_test.go
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
crypto_rand "crypto/rand"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunkSize,
|
||||||
|
bufferCapacity int) ([]string, int) {
|
||||||
|
|
||||||
|
config := CreateConfig()
|
||||||
|
|
||||||
|
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
||||||
|
config.AverageChunkSize = averageChunkSize
|
||||||
|
config.MaximumChunkSize = maxChunkSize
|
||||||
|
config.MinimumChunkSize = minChunkSize
|
||||||
|
config.ChunkSeed = []byte("duplicacy")
|
||||||
|
|
||||||
|
config.HashKey = DEFAULT_KEY
|
||||||
|
config.IDKey = DEFAULT_KEY
|
||||||
|
|
||||||
|
maker := CreateChunkMaker(config, false)
|
||||||
|
|
||||||
|
var chunks []string
|
||||||
|
totalChunkSize := 0
|
||||||
|
totalFileSize := int64(0)
|
||||||
|
|
||||||
|
//LOG_INFO("CHUNK_SPLIT", "bufferCapacity: %d", bufferCapacity)
|
||||||
|
|
||||||
|
buffers := make([]*bytes.Buffer, n)
|
||||||
|
sizes := make([]int, n)
|
||||||
|
sizes[0] = 0
|
||||||
|
for i := 1; i < n; i++ {
|
||||||
|
same := true
|
||||||
|
for same {
|
||||||
|
same = false
|
||||||
|
sizes[i] = rand.Int() % n
|
||||||
|
for j := 0; j < i; j++ {
|
||||||
|
if sizes[i] == sizes[j] {
|
||||||
|
same = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(sort.IntSlice(sizes))
|
||||||
|
|
||||||
|
for i := 0; i < n-1; i++ {
|
||||||
|
buffers[i] = bytes.NewBuffer(content[sizes[i]:sizes[i+1]])
|
||||||
|
}
|
||||||
|
buffers[n-1] = bytes.NewBuffer(content[sizes[n-1]:])
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
|
||||||
|
maker.ForEachChunk(buffers[0],
|
||||||
|
func(chunk *Chunk, final bool) {
|
||||||
|
//LOG_INFO("CHUNK_SPLIT", "i: %d, chunk: %s, size: %d", i, chunk.GetHash(), size)
|
||||||
|
chunks = append(chunks, chunk.GetHash())
|
||||||
|
totalChunkSize += chunk.GetLength()
|
||||||
|
},
|
||||||
|
func(size int64, hash string) (io.Reader, bool) {
|
||||||
|
totalFileSize += size
|
||||||
|
i++
|
||||||
|
if i >= len(buffers) {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
return buffers[i], true
|
||||||
|
})
|
||||||
|
|
||||||
|
if totalFileSize != int64(totalChunkSize) {
|
||||||
|
LOG_ERROR("CHUNK_SPLIT", "total chunk size: %d, total file size: %d", totalChunkSize, totalFileSize)
|
||||||
|
}
|
||||||
|
return chunks, totalChunkSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChunkMaker(t *testing.T) {
|
||||||
|
|
||||||
|
//sizes := [...] int { 64 }
|
||||||
|
sizes := [...]int{64, 256, 1024, 1024 * 10}
|
||||||
|
|
||||||
|
for _, size := range sizes {
|
||||||
|
|
||||||
|
content := make([]byte, size)
|
||||||
|
_, err := crypto_rand.Read(content)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error generating random content: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkArray1, totalSize1 := splitIntoChunks(content, 10, 32, 64, 16, 32)
|
||||||
|
|
||||||
|
capacities := [...]int{32, 33, 34, 61, 62, 63, 64, 65, 66, 126, 127, 128, 129, 130,
|
||||||
|
255, 256, 257, 511, 512, 513, 1023, 1024, 1025,
|
||||||
|
32, 48, 64, 128, 256, 512, 1024, 2048}
|
||||||
|
|
||||||
|
//capacities := [...]int { 32 }
|
||||||
|
|
||||||
|
for _, capacity := range capacities {
|
||||||
|
|
||||||
|
for _, n := range [...]int{6, 7, 8, 9, 10} {
|
||||||
|
chunkArray2, totalSize2 := splitIntoChunks(content, n, 32, 64, 16, capacity)
|
||||||
|
|
||||||
|
if totalSize1 != totalSize2 {
|
||||||
|
t.Errorf("[size %d, capacity %d] total size is %d instead of %d",
|
||||||
|
size, capacity, totalSize2, totalSize1)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(chunkArray1) != len(chunkArray2) {
|
||||||
|
t.Errorf("[size %d, capacity %d] number of chunks is %d instead of %d",
|
||||||
|
size, capacity, len(chunkArray2), len(chunkArray1))
|
||||||
|
} else {
|
||||||
|
for i := 0; i < len(chunkArray1); i++ {
|
||||||
|
if chunkArray1[i] != chunkArray2[i] {
|
||||||
|
t.Errorf("[size %d, capacity %d, chunk %d] chunk is different", size, capacity, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
209
src/duplicacy_chunkoperator.go
Normal file
209
src/duplicacy_chunkoperator.go
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// These are operations that ChunkOperator will perform.
|
||||||
|
const (
|
||||||
|
ChunkOperationFind = 0
|
||||||
|
ChunkOperationDelete = 1
|
||||||
|
ChunkOperationFossilize = 2
|
||||||
|
ChunkOperationResurrect = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChunkOperatorTask is used to pass parameters for different kinds of chunk operations.
|
||||||
|
type ChunkOperatorTask struct {
|
||||||
|
operation int // The type of operation
|
||||||
|
chunkID string // The chunk id
|
||||||
|
filePath string // The path of the chunk file; it may be empty
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkOperator is capable of performing multi-threaded operations on chunks.
|
||||||
|
type ChunkOperator struct {
|
||||||
|
numberOfActiveTasks int64 // The number of chunks that are being operated on
|
||||||
|
storage Storage // This storage
|
||||||
|
threads int // Number of threads
|
||||||
|
taskQueue chan ChunkOperatorTask // Operating goroutines are waiting on this channel for input
|
||||||
|
stopChannel chan bool // Used to stop all the goroutines
|
||||||
|
|
||||||
|
fossils []string // For fossilize operation, the paths of the fossils are stored in this slice
|
||||||
|
fossilsLock *sync.Mutex // The lock for 'fossils'
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateChunkOperator creates a new ChunkOperator.
|
||||||
|
func CreateChunkOperator(storage Storage, threads int) *ChunkOperator {
|
||||||
|
operator := &ChunkOperator{
|
||||||
|
storage: storage,
|
||||||
|
threads: threads,
|
||||||
|
|
||||||
|
taskQueue: make(chan ChunkOperatorTask, threads*4),
|
||||||
|
stopChannel: make(chan bool),
|
||||||
|
|
||||||
|
fossils: make([]string, 0),
|
||||||
|
fossilsLock: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start the operator goroutines
|
||||||
|
for i := 0; i < operator.threads; i++ {
|
||||||
|
go func(threadIndex int) {
|
||||||
|
defer CatchLogException()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case task := <-operator.taskQueue:
|
||||||
|
operator.Run(threadIndex, task)
|
||||||
|
case <-operator.stopChannel:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
return operator
|
||||||
|
}
|
||||||
|
|
||||||
|
func (operator *ChunkOperator) Stop() {
|
||||||
|
if atomic.LoadInt64(&operator.numberOfActiveTasks) < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for atomic.LoadInt64(&operator.numberOfActiveTasks) > 0 {
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
|
for i := 0; i < operator.threads; i++ {
|
||||||
|
operator.stopChannel <- false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assign -1 to numberOfActiveTasks so Stop() can be called multiple times
|
||||||
|
atomic.AddInt64(&operator.numberOfActiveTasks, int64(-1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (operator *ChunkOperator) AddTask(operation int, chunkID string, filePath string) {
|
||||||
|
|
||||||
|
task := ChunkOperatorTask{
|
||||||
|
operation: operation,
|
||||||
|
chunkID: chunkID,
|
||||||
|
filePath: filePath,
|
||||||
|
}
|
||||||
|
operator.taskQueue <- task
|
||||||
|
atomic.AddInt64(&operator.numberOfActiveTasks, int64(1))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (operator *ChunkOperator) Find(chunkID string) {
|
||||||
|
operator.AddTask(ChunkOperationFind, chunkID, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (operator *ChunkOperator) Delete(chunkID string, filePath string) {
|
||||||
|
operator.AddTask(ChunkOperationDelete, chunkID, filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (operator *ChunkOperator) Fossilize(chunkID string, filePath string) {
|
||||||
|
operator.AddTask(ChunkOperationFossilize, chunkID, filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (operator *ChunkOperator) Resurrect(chunkID string, filePath string) {
|
||||||
|
operator.AddTask(ChunkOperationResurrect, chunkID, filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (operator *ChunkOperator) Run(threadIndex int, task ChunkOperatorTask) {
|
||||||
|
defer func() {
|
||||||
|
atomic.AddInt64(&operator.numberOfActiveTasks, int64(-1))
|
||||||
|
}()
|
||||||
|
|
||||||
|
// task.filePath may be empty. If so, find the chunk first.
|
||||||
|
if task.operation == ChunkOperationDelete || task.operation == ChunkOperationFossilize {
|
||||||
|
if task.filePath == "" {
|
||||||
|
filePath, exist, _, err := operator.storage.FindChunk(threadIndex, task.chunkID, false)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", task.chunkID, err)
|
||||||
|
return
|
||||||
|
} else if !exist {
|
||||||
|
if task.operation == ChunkOperationDelete {
|
||||||
|
LOG_WARN("CHUNK_FIND", "Chunk %s does not exist in the storage", task.chunkID)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
fossilPath, exist, _, _ := operator.storage.FindChunk(threadIndex, task.chunkID, true)
|
||||||
|
if exist {
|
||||||
|
LOG_WARN("CHUNK_FOSSILIZE", "Chunk %s is already a fossil", task.chunkID)
|
||||||
|
operator.fossilsLock.Lock()
|
||||||
|
operator.fossils = append(operator.fossils, fossilPath)
|
||||||
|
operator.fossilsLock.Unlock()
|
||||||
|
} else {
|
||||||
|
LOG_ERROR("CHUNK_FIND", "Chunk %s does not exist in the storage", task.chunkID)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
task.filePath = filePath
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if task.operation == ChunkOperationFind {
|
||||||
|
_, exist, _, err := operator.storage.FindChunk(threadIndex, task.chunkID, false)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", task.chunkID, err)
|
||||||
|
} else if !exist {
|
||||||
|
LOG_ERROR("CHUNK_FIND", "Chunk %s does not exist in the storage", task.chunkID)
|
||||||
|
} else {
|
||||||
|
LOG_DEBUG("CHUNK_FIND", "Chunk %s exists in the storage", task.chunkID)
|
||||||
|
}
|
||||||
|
} else if task.operation == ChunkOperationDelete {
|
||||||
|
err := operator.storage.DeleteFile(threadIndex, task.filePath)
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("CHUNK_DELETE", "Failed to remove the file %s: %v", task.filePath, err)
|
||||||
|
} else {
|
||||||
|
if task.chunkID != "" {
|
||||||
|
LOG_INFO("CHUNK_DELETE", "The chunk %s has been permanently removed", task.chunkID)
|
||||||
|
} else {
|
||||||
|
LOG_INFO("CHUNK_DELETE", "Deleted file %s from the storage", task.filePath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if task.operation == ChunkOperationFossilize {
|
||||||
|
|
||||||
|
fossilPath := task.filePath + ".fsl"
|
||||||
|
|
||||||
|
err := operator.storage.MoveFile(threadIndex, task.filePath, fossilPath)
|
||||||
|
if err != nil {
|
||||||
|
if _, exist, _, _ := operator.storage.FindChunk(threadIndex, task.chunkID, true); exist {
|
||||||
|
err := operator.storage.DeleteFile(threadIndex, task.filePath)
|
||||||
|
if err == nil {
|
||||||
|
LOG_TRACE("CHUNK_DELETE", "Deleted chunk file %s as the fossil already exists", task.chunkID)
|
||||||
|
}
|
||||||
|
operator.fossilsLock.Lock()
|
||||||
|
operator.fossils = append(operator.fossils, fossilPath)
|
||||||
|
operator.fossilsLock.Unlock()
|
||||||
|
} else {
|
||||||
|
LOG_ERROR("CHUNK_DELETE", "Failed to fossilize the chunk %s: %v", task.chunkID, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOG_TRACE("CHUNK_FOSSILIZE", "The chunk %s has been marked as a fossil", task.chunkID)
|
||||||
|
operator.fossilsLock.Lock()
|
||||||
|
operator.fossils = append(operator.fossils, fossilPath)
|
||||||
|
operator.fossilsLock.Unlock()
|
||||||
|
}
|
||||||
|
} else if task.operation == ChunkOperationResurrect {
|
||||||
|
chunkPath, exist, _, err := operator.storage.FindChunk(threadIndex, task.chunkID, false)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", task.chunkID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exist {
|
||||||
|
operator.storage.DeleteFile(threadIndex, task.filePath)
|
||||||
|
LOG_INFO("FOSSIL_RESURRECT", "The chunk %s already exists", task.chunkID)
|
||||||
|
} else {
|
||||||
|
err := operator.storage.MoveFile(threadIndex, task.filePath, chunkPath)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("FOSSIL_RESURRECT", "Failed to resurrect the chunk %s from the fossil %s: %v",
|
||||||
|
task.chunkID, task.filePath, err)
|
||||||
|
} else {
|
||||||
|
LOG_INFO("FOSSIL_RESURRECT", "The chunk %s has been resurrected", task.filePath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
151
src/duplicacy_chunkuploader.go
Normal file
151
src/duplicacy_chunkuploader.go
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChunkUploadTask represents a chunk to be uploaded.
|
||||||
|
type ChunkUploadTask struct {
|
||||||
|
chunk *Chunk
|
||||||
|
chunkIndex int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChunkUploader uploads chunks to the storage using one or more uploading goroutines. Chunks are added
|
||||||
|
// by the call to StartChunk(), and then passed to the uploading goroutines. The completion function is
|
||||||
|
// called when the downloading is completed. Note that ChunkUploader does not release chunks to the
|
||||||
|
// chunk pool; instead
|
||||||
|
type ChunkUploader struct {
|
||||||
|
config *Config // Associated config
|
||||||
|
storage Storage // Download from this storage
|
||||||
|
snapshotCache *FileStorage // Used as cache if not nil; usually for uploading snapshot chunks
|
||||||
|
threads int // Number of uploading goroutines
|
||||||
|
taskQueue chan ChunkUploadTask // Uploading goroutines are listening on this channel for upload jobs
|
||||||
|
stopChannel chan bool // Used to terminate uploading goroutines
|
||||||
|
|
||||||
|
numberOfUploadingTasks int32 // The number of uploading tasks
|
||||||
|
|
||||||
|
// Uploading goroutines call this function after having downloaded chunks
|
||||||
|
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateChunkUploader creates a chunk uploader.
|
||||||
|
func CreateChunkUploader(config *Config, storage Storage, snapshotCache *FileStorage, threads int,
|
||||||
|
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)) *ChunkUploader {
|
||||||
|
uploader := &ChunkUploader{
|
||||||
|
config: config,
|
||||||
|
storage: storage,
|
||||||
|
snapshotCache: snapshotCache,
|
||||||
|
threads: threads,
|
||||||
|
taskQueue: make(chan ChunkUploadTask, 1),
|
||||||
|
stopChannel: make(chan bool),
|
||||||
|
completionFunc: completionFunc,
|
||||||
|
}
|
||||||
|
|
||||||
|
return uploader
|
||||||
|
}
|
||||||
|
|
||||||
|
// Starts starts uploading goroutines.
|
||||||
|
func (uploader *ChunkUploader) Start() {
|
||||||
|
for i := 0; i < uploader.threads; i++ {
|
||||||
|
go func(threadIndex int) {
|
||||||
|
defer CatchLogException()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case task := <-uploader.taskQueue:
|
||||||
|
uploader.Upload(threadIndex, task)
|
||||||
|
case <-uploader.stopChannel:
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}(i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartChunk sends a chunk to be uploaded to a waiting uploading goroutine. It may block if all uploading goroutines are busy.
|
||||||
|
func (uploader *ChunkUploader) StartChunk(chunk *Chunk, chunkIndex int) {
|
||||||
|
atomic.AddInt32(&uploader.numberOfUploadingTasks, 1)
|
||||||
|
uploader.taskQueue <- ChunkUploadTask{
|
||||||
|
chunk: chunk,
|
||||||
|
chunkIndex: chunkIndex,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops all uploading goroutines.
|
||||||
|
func (uploader *ChunkUploader) Stop() {
|
||||||
|
for atomic.LoadInt32(&uploader.numberOfUploadingTasks) > 0 {
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
}
|
||||||
|
for i := 0; i < uploader.threads; i++ {
|
||||||
|
uploader.stopChannel <- false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Upload is called by the uploading goroutines to perform the actual uploading
|
||||||
|
func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) bool {
|
||||||
|
|
||||||
|
chunk := task.chunk
|
||||||
|
chunkSize := chunk.GetLength()
|
||||||
|
chunkID := chunk.GetID()
|
||||||
|
|
||||||
|
// For a snapshot chunk, verify that its chunk id is correct
|
||||||
|
if uploader.snapshotCache != nil {
|
||||||
|
chunk.VerifyID()
|
||||||
|
}
|
||||||
|
|
||||||
|
if uploader.snapshotCache != nil && uploader.storage.IsCacheNeeded() {
|
||||||
|
// Save a copy to the local snapshot.
|
||||||
|
chunkPath, exist, _, err := uploader.snapshotCache.FindChunk(threadIndex, chunkID, false)
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("UPLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
|
||||||
|
} else if exist {
|
||||||
|
LOG_DEBUG("CHUNK_CACHE", "Chunk %s already exists in the snapshot cache", chunkID)
|
||||||
|
} else if err = uploader.snapshotCache.UploadFile(threadIndex, chunkPath, chunk.GetBytes()); err != nil {
|
||||||
|
LOG_WARN("UPLOAD_CACHE", "Failed to save the chunk %s to the snapshot cache: %v", chunkID, err)
|
||||||
|
} else {
|
||||||
|
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been saved to the snapshot cache", chunkID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This returns the path the chunk file should be at.
|
||||||
|
chunkPath, exist, _, err := uploader.storage.FindChunk(threadIndex, chunkID, false)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("UPLOAD_CHUNK", "Failed to find the path for the chunk %s: %v", chunkID, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if exist {
|
||||||
|
// Chunk deduplication by name in effect here.
|
||||||
|
LOG_DEBUG("CHUNK_DUPLICATE", "Chunk %s already exists", chunkID)
|
||||||
|
|
||||||
|
uploader.completionFunc(chunk, task.chunkIndex, true, chunkSize, 0)
|
||||||
|
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encrypt the chunk only after we know that it must be uploaded.
|
||||||
|
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash(), uploader.snapshotCache != nil)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("UPLOAD_CHUNK", "Failed to encrypt the chunk %s: %v", chunkID, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !uploader.config.dryRun {
|
||||||
|
err = uploader.storage.UploadFile(threadIndex, chunkPath, chunk.GetBytes())
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("UPLOAD_CHUNK", "Failed to upload the chunk %s: %v", chunkID, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
LOG_DEBUG("CHUNK_UPLOAD", "Chunk %s has been uploaded", chunkID)
|
||||||
|
} else {
|
||||||
|
LOG_DEBUG("CHUNK_UPLOAD", "Uploading was skipped for chunk %s", chunkID)
|
||||||
|
}
|
||||||
|
|
||||||
|
uploader.completionFunc(chunk, task.chunkIndex, false, chunkSize, chunk.GetLength())
|
||||||
|
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
|
||||||
|
return true
|
||||||
|
}
|
||||||
128
src/duplicacy_chunkuploader_test.go
Normal file
128
src/duplicacy_chunkuploader_test.go
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"runtime/debug"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
crypto_rand "crypto/rand"
|
||||||
|
"math/rand"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestUploaderAndDownloader(t *testing.T) {
|
||||||
|
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
setTestingT(t)
|
||||||
|
SetLoggingLevel(INFO)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
switch e := r.(type) {
|
||||||
|
case Exception:
|
||||||
|
t.Errorf("%s %s", e.LogID, e.Message)
|
||||||
|
debug.PrintStack()
|
||||||
|
default:
|
||||||
|
t.Errorf("%v", e)
|
||||||
|
debug.PrintStack()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
||||||
|
os.RemoveAll(testDir)
|
||||||
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
|
t.Logf("storage: %s", testStorageName)
|
||||||
|
|
||||||
|
storage, err := loadStorage(testDir, 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create storage: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
storage.EnableTestMode()
|
||||||
|
storage.SetRateLimits(testRateLimit, testRateLimit)
|
||||||
|
|
||||||
|
for _, dir := range []string{"chunks", "snapshots"} {
|
||||||
|
err = storage.CreateDirectory(0, dir)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create directory %s: %v", dir, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
numberOfChunks := 100
|
||||||
|
maxChunkSize := 64 * 1024
|
||||||
|
|
||||||
|
if testQuickMode {
|
||||||
|
numberOfChunks = 10
|
||||||
|
}
|
||||||
|
|
||||||
|
var chunks []*Chunk
|
||||||
|
|
||||||
|
config := CreateConfig()
|
||||||
|
config.MinimumChunkSize = 100
|
||||||
|
config.chunkPool = make(chan *Chunk, numberOfChunks*2)
|
||||||
|
totalFileSize := 0
|
||||||
|
|
||||||
|
for i := 0; i < numberOfChunks; i++ {
|
||||||
|
content := make([]byte, rand.Int()%maxChunkSize+1)
|
||||||
|
_, err = crypto_rand.Read(content)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error generating random content: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
chunk := CreateChunk(config, true)
|
||||||
|
chunk.Reset(true)
|
||||||
|
chunk.Write(content)
|
||||||
|
chunks = append(chunks, chunk)
|
||||||
|
|
||||||
|
t.Logf("Chunk: %s, size: %d", chunk.GetID(), chunk.GetLength())
|
||||||
|
totalFileSize += chunk.GetLength()
|
||||||
|
}
|
||||||
|
|
||||||
|
completionFunc := func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
|
||||||
|
t.Logf("Chunk %s size %d (%d/%d) uploaded", chunk.GetID(), chunkSize, chunkIndex, len(chunks))
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkUploader := CreateChunkUploader(config, storage, nil, testThreads, nil)
|
||||||
|
chunkUploader.completionFunc = completionFunc
|
||||||
|
chunkUploader.Start()
|
||||||
|
|
||||||
|
for i, chunk := range chunks {
|
||||||
|
chunkUploader.StartChunk(chunk, i)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkUploader.Stop()
|
||||||
|
|
||||||
|
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads, false)
|
||||||
|
chunkDownloader.totalChunkSize = int64(totalFileSize)
|
||||||
|
|
||||||
|
for _, chunk := range chunks {
|
||||||
|
chunkDownloader.AddChunk(chunk.GetHash())
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, chunk := range chunks {
|
||||||
|
downloaded := chunkDownloader.WaitForChunk(i)
|
||||||
|
if downloaded.GetID() != chunk.GetID() {
|
||||||
|
t.Errorf("Uploaded: %s, downloaded: %s", chunk.GetID(), downloaded.GetID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkDownloader.Stop()
|
||||||
|
|
||||||
|
for _, file := range listChunks(storage) {
|
||||||
|
err = storage.DeleteFile(0, "chunks/"+file)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to delete the file %s: %v", file, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
684
src/duplicacy_config.go
Normal file
684
src/duplicacy_config.go
Normal file
@@ -0,0 +1,684 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/x509"
|
||||||
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"encoding/pem"
|
||||||
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
"runtime"
|
||||||
|
"runtime/debug"
|
||||||
|
"sync/atomic"
|
||||||
|
"io/ioutil"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
blake2 "github.com/minio/blake2b-simd"
|
||||||
|
)
|
||||||
|
|
||||||
|
// If encryption is turned off, use this key for HMAC-SHA256 or chunk ID generation etc.
|
||||||
|
var DEFAULT_KEY = []byte("duplicacy")
|
||||||
|
|
||||||
|
// The new default compression level is 100. However, in the early versions we use the
|
||||||
|
// standard zlib levels of -1 to 9.
|
||||||
|
var DEFAULT_COMPRESSION_LEVEL = 100
|
||||||
|
|
||||||
|
// The new banner of the config file (to differentiate from the old format where the salt and iterations are fixed)
|
||||||
|
var CONFIG_BANNER = "duplicacy\001"
|
||||||
|
|
||||||
|
// The length of the salt used in the new format
|
||||||
|
var CONFIG_SALT_LENGTH = 32
|
||||||
|
|
||||||
|
// The default iterations for key derivation
|
||||||
|
var CONFIG_DEFAULT_ITERATIONS = 16384
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
CompressionLevel int `json:"compression-level"`
|
||||||
|
AverageChunkSize int `json:"average-chunk-size"`
|
||||||
|
MaximumChunkSize int `json:"max-chunk-size"`
|
||||||
|
MinimumChunkSize int `json:"min-chunk-size"`
|
||||||
|
|
||||||
|
ChunkSeed []byte `json:"chunk-seed"`
|
||||||
|
|
||||||
|
FixedNesting bool `json:"fixed-nesting"`
|
||||||
|
|
||||||
|
// Use HMAC-SHA256(hashKey, plaintext) as the chunk hash.
|
||||||
|
// Use HMAC-SHA256(idKey, chunk hash) as the file name of the chunk
|
||||||
|
// For chunks, use HMAC-SHA256(chunkKey, chunk hash) as the encryption key
|
||||||
|
// For files, use HMAC-SHA256(fileKey, file path) as the encryption key
|
||||||
|
|
||||||
|
// the HMAC-SHA256 key of the chunk data
|
||||||
|
HashKey []byte `json:"-"`
|
||||||
|
|
||||||
|
// used to generate an id from the chunk hash
|
||||||
|
IDKey []byte `json:"-"`
|
||||||
|
|
||||||
|
// for encrypting a chunk
|
||||||
|
ChunkKey []byte `json:"-"`
|
||||||
|
|
||||||
|
// for encrypting a non-chunk file
|
||||||
|
FileKey []byte `json:"-"`
|
||||||
|
|
||||||
|
// for erasure coding
|
||||||
|
DataShards int `json:'data-shards'`
|
||||||
|
ParityShards int `json:'parity-shards'`
|
||||||
|
|
||||||
|
// for RSA encryption
|
||||||
|
rsaPrivateKey *rsa.PrivateKey
|
||||||
|
rsaPublicKey *rsa.PublicKey
|
||||||
|
|
||||||
|
chunkPool chan *Chunk
|
||||||
|
numberOfChunks int32
|
||||||
|
dryRun bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create an alias to avoid recursive calls on Config.MarshalJSON
|
||||||
|
type aliasedConfig Config
|
||||||
|
|
||||||
|
type jsonableConfig struct {
|
||||||
|
*aliasedConfig
|
||||||
|
ChunkSeed string `json:"chunk-seed"`
|
||||||
|
HashKey string `json:"hash-key"`
|
||||||
|
IDKey string `json:"id-key"`
|
||||||
|
ChunkKey string `json:"chunk-key"`
|
||||||
|
FileKey string `json:"file-key"`
|
||||||
|
RSAPublicKey string `json:"rsa-public-key"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *Config) MarshalJSON() ([]byte, error) {
|
||||||
|
|
||||||
|
publicKey := []byte {}
|
||||||
|
if config.rsaPublicKey != nil {
|
||||||
|
publicKey, _ = x509.MarshalPKIXPublicKey(config.rsaPublicKey)
|
||||||
|
}
|
||||||
|
return json.Marshal(&jsonableConfig{
|
||||||
|
aliasedConfig: (*aliasedConfig)(config),
|
||||||
|
ChunkSeed: hex.EncodeToString(config.ChunkSeed),
|
||||||
|
HashKey: hex.EncodeToString(config.HashKey),
|
||||||
|
IDKey: hex.EncodeToString(config.IDKey),
|
||||||
|
ChunkKey: hex.EncodeToString(config.ChunkKey),
|
||||||
|
FileKey: hex.EncodeToString(config.FileKey),
|
||||||
|
RSAPublicKey: hex.EncodeToString(publicKey),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *Config) UnmarshalJSON(description []byte) (err error) {
|
||||||
|
|
||||||
|
aliased := &jsonableConfig{
|
||||||
|
aliasedConfig: (*aliasedConfig)(config),
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = json.Unmarshal(description, &aliased); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.ChunkSeed, err = hex.DecodeString(aliased.ChunkSeed); err != nil {
|
||||||
|
return fmt.Errorf("Invalid representation of the chunk seed in the config")
|
||||||
|
}
|
||||||
|
if config.HashKey, err = hex.DecodeString(aliased.HashKey); err != nil {
|
||||||
|
return fmt.Errorf("Invalid representation of the hash key in the config")
|
||||||
|
}
|
||||||
|
if config.IDKey, err = hex.DecodeString(aliased.IDKey); err != nil {
|
||||||
|
return fmt.Errorf("Invalid representation of the id key in the config")
|
||||||
|
}
|
||||||
|
if config.ChunkKey, err = hex.DecodeString(aliased.ChunkKey); err != nil {
|
||||||
|
return fmt.Errorf("Invalid representation of the chunk key in the config")
|
||||||
|
}
|
||||||
|
if config.FileKey, err = hex.DecodeString(aliased.FileKey); err != nil {
|
||||||
|
return fmt.Errorf("Invalid representation of the file key in the config")
|
||||||
|
}
|
||||||
|
|
||||||
|
if publicKey, err := hex.DecodeString(aliased.RSAPublicKey); err != nil {
|
||||||
|
return fmt.Errorf("Invalid hex encoding of the RSA public key in the config")
|
||||||
|
} else if len(publicKey) > 0 {
|
||||||
|
parsedKey, err := x509.ParsePKIXPublicKey(publicKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Invalid RSA public key in the config: %v", err)
|
||||||
|
}
|
||||||
|
config.rsaPublicKey = parsedKey.(*rsa.PublicKey)
|
||||||
|
if config.rsaPublicKey == nil {
|
||||||
|
return fmt.Errorf("Unsupported public key type %s in the config", reflect.TypeOf(parsedKey))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *Config) IsCompatiableWith(otherConfig *Config) bool {
|
||||||
|
|
||||||
|
return config.CompressionLevel == otherConfig.CompressionLevel &&
|
||||||
|
config.AverageChunkSize == otherConfig.AverageChunkSize &&
|
||||||
|
config.MaximumChunkSize == otherConfig.MaximumChunkSize &&
|
||||||
|
config.MinimumChunkSize == otherConfig.MinimumChunkSize &&
|
||||||
|
bytes.Equal(config.ChunkSeed, otherConfig.ChunkSeed) &&
|
||||||
|
bytes.Equal(config.HashKey, otherConfig.HashKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *Config) Print() {
|
||||||
|
|
||||||
|
LOG_INFO("CONFIG_INFO", "Compression level: %d", config.CompressionLevel)
|
||||||
|
LOG_INFO("CONFIG_INFO", "Average chunk size: %d", config.AverageChunkSize)
|
||||||
|
LOG_INFO("CONFIG_INFO", "Maximum chunk size: %d", config.MaximumChunkSize)
|
||||||
|
LOG_INFO("CONFIG_INFO", "Minimum chunk size: %d", config.MinimumChunkSize)
|
||||||
|
LOG_INFO("CONFIG_INFO", "Chunk seed: %x", config.ChunkSeed)
|
||||||
|
|
||||||
|
LOG_TRACE("CONFIG_INFO", "Hash key: %x", config.HashKey)
|
||||||
|
LOG_TRACE("CONFIG_INFO", "ID key: %x", config.IDKey)
|
||||||
|
|
||||||
|
if len(config.ChunkKey) > 0 {
|
||||||
|
LOG_TRACE("CONFIG_INFO", "File chunks are encrypted")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(config.FileKey) > 0 {
|
||||||
|
LOG_TRACE("CONFIG_INFO", "Metadata chunks are encrypted")
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.DataShards != 0 && config.ParityShards != 0 {
|
||||||
|
LOG_TRACE("CONFIG_INFO", "Data shards: %d, parity shards: %d", config.DataShards, config.ParityShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.rsaPublicKey != nil {
|
||||||
|
pkisPublicKey, _ := x509.MarshalPKIXPublicKey(config.rsaPublicKey)
|
||||||
|
|
||||||
|
publicKey := pem.EncodeToMemory(&pem.Block{
|
||||||
|
Type: "PUBLIC KEY",
|
||||||
|
Bytes: pkisPublicKey,
|
||||||
|
})
|
||||||
|
|
||||||
|
LOG_TRACE("CONFIG_INFO", "RSA public key: %s", publicKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
|
||||||
|
isEncrypted bool, copyFrom *Config, bitCopy bool) (config *Config) {
|
||||||
|
|
||||||
|
config = &Config{
|
||||||
|
CompressionLevel: compressionLevel,
|
||||||
|
AverageChunkSize: averageChunkSize,
|
||||||
|
MaximumChunkSize: maximumChunkSize,
|
||||||
|
MinimumChunkSize: mininumChunkSize,
|
||||||
|
FixedNesting: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
if isEncrypted {
|
||||||
|
// Randomly generate keys
|
||||||
|
keys := make([]byte, 32*5)
|
||||||
|
_, err := rand.Read(keys)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CONFIG_KEY", "Failed to generate random keys: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
config.ChunkSeed = keys[:32]
|
||||||
|
config.HashKey = keys[32:64]
|
||||||
|
config.IDKey = keys[64:96]
|
||||||
|
config.ChunkKey = keys[96:128]
|
||||||
|
config.FileKey = keys[128:]
|
||||||
|
} else {
|
||||||
|
config.ChunkSeed = DEFAULT_KEY
|
||||||
|
config.HashKey = DEFAULT_KEY
|
||||||
|
config.IDKey = DEFAULT_KEY
|
||||||
|
}
|
||||||
|
|
||||||
|
if copyFrom != nil {
|
||||||
|
config.CompressionLevel = copyFrom.CompressionLevel
|
||||||
|
|
||||||
|
config.AverageChunkSize = copyFrom.AverageChunkSize
|
||||||
|
config.MaximumChunkSize = copyFrom.MaximumChunkSize
|
||||||
|
config.MinimumChunkSize = copyFrom.MinimumChunkSize
|
||||||
|
|
||||||
|
config.ChunkSeed = copyFrom.ChunkSeed
|
||||||
|
config.HashKey = copyFrom.HashKey
|
||||||
|
|
||||||
|
if bitCopy {
|
||||||
|
config.IDKey = copyFrom.IDKey
|
||||||
|
config.ChunkKey = copyFrom.ChunkKey
|
||||||
|
config.FileKey = copyFrom.FileKey
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config.chunkPool = make(chan *Chunk, runtime.NumCPU()*16)
|
||||||
|
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateConfig() (config *Config) {
|
||||||
|
return &Config{
|
||||||
|
HashKey: DEFAULT_KEY,
|
||||||
|
IDKey: DEFAULT_KEY,
|
||||||
|
CompressionLevel: DEFAULT_COMPRESSION_LEVEL,
|
||||||
|
chunkPool: make(chan *Chunk, runtime.NumCPU()*16),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *Config) GetChunk() (chunk *Chunk) {
|
||||||
|
select {
|
||||||
|
case chunk = <-config.chunkPool:
|
||||||
|
default:
|
||||||
|
numberOfChunks := atomic.AddInt32(&config.numberOfChunks, 1)
|
||||||
|
if numberOfChunks >= int32(runtime.NumCPU()*16) {
|
||||||
|
LOG_WARN("CONFIG_CHUNK", "%d chunks have been allocated", numberOfChunks)
|
||||||
|
if _, found := os.LookupEnv("DUPLICACY_CHUNK_DEBUG"); found {
|
||||||
|
debug.PrintStack()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
chunk = CreateChunk(config, true)
|
||||||
|
}
|
||||||
|
return chunk
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *Config) PutChunk(chunk *Chunk) {
|
||||||
|
|
||||||
|
if chunk == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case config.chunkPool <- chunk:
|
||||||
|
default:
|
||||||
|
LOG_INFO("CHUNK_BUFFER", "Discarding a free chunk due to a full pool")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *Config) NewKeyedHasher(key []byte) hash.Hash {
|
||||||
|
if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
||||||
|
hasher, err := blake2.New(&blake2.Config{Size: 32, Key: key})
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("HASH_KEY", "Invalid hash key: %x", key)
|
||||||
|
}
|
||||||
|
return hasher
|
||||||
|
} else {
|
||||||
|
return hmac.New(sha256.New, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var SkipFileHash = false
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if value, found := os.LookupEnv("DUPLICACY_SKIP_FILE_HASH"); found && value != "" && value != "0" {
|
||||||
|
SkipFileHash = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Implement a dummy hasher to be used when SkipFileHash is true.
|
||||||
|
type DummyHasher struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hasher *DummyHasher) Write(p []byte) (int, error) {
|
||||||
|
return len(p), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hasher *DummyHasher) Sum(b []byte) []byte {
|
||||||
|
return []byte("")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hasher *DummyHasher) Reset() {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hasher *DummyHasher) Size() int {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (hasher *DummyHasher) BlockSize() int {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *Config) NewFileHasher() hash.Hash {
|
||||||
|
if SkipFileHash {
|
||||||
|
return &DummyHasher{}
|
||||||
|
} else if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
||||||
|
hasher, _ := blake2.New(&blake2.Config{Size: 32})
|
||||||
|
return hasher
|
||||||
|
} else {
|
||||||
|
return sha256.New()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate the file hash using the corresponding hasher
|
||||||
|
func (config *Config) ComputeFileHash(path string, buffer []byte) string {
|
||||||
|
|
||||||
|
file, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := config.NewFileHasher()
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
count := 1
|
||||||
|
for count > 0 {
|
||||||
|
count, err = file.Read(buffer)
|
||||||
|
hasher.Write(buffer[:count])
|
||||||
|
}
|
||||||
|
|
||||||
|
return hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetChunkIDFromHash creates a chunk id from the chunk hash. The chunk id will be used as the name of the chunk
|
||||||
|
// file, so it is publicly exposed. The chunk hash is the HMAC-SHA256 of what is contained in the chunk and should
|
||||||
|
// never be exposed.
|
||||||
|
func (config *Config) GetChunkIDFromHash(hash string) string {
|
||||||
|
hasher := config.NewKeyedHasher(config.IDKey)
|
||||||
|
hasher.Write([]byte(hash))
|
||||||
|
return hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
func DownloadConfig(storage Storage, password string) (config *Config, isEncrypted bool, err error) {
|
||||||
|
// Although the default key is passed to the function call the key is not actually used since there is no need to
|
||||||
|
// calculate the hash or id of the config file.
|
||||||
|
configFile := CreateChunk(CreateConfig(), true)
|
||||||
|
|
||||||
|
exist, _, _, err := storage.GetFileInfo(0, "config")
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exist {
|
||||||
|
return nil, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.DownloadFile(0, "config", configFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(configFile.GetBytes()) < len(ENCRYPTION_BANNER) {
|
||||||
|
return nil, false, fmt.Errorf("The storage has an invalid config file")
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(configFile.GetBytes()[:len(ENCRYPTION_BANNER)-1]) == ENCRYPTION_BANNER[:len(ENCRYPTION_BANNER)-1] && len(password) == 0 {
|
||||||
|
return nil, true, fmt.Errorf("The storage is likely to have been initialized with a password before")
|
||||||
|
}
|
||||||
|
|
||||||
|
var masterKey []byte
|
||||||
|
|
||||||
|
if len(password) > 0 {
|
||||||
|
|
||||||
|
if string(configFile.GetBytes()[:len(ENCRYPTION_BANNER)]) == ENCRYPTION_BANNER {
|
||||||
|
// This is the old config format with a static salt and a fixed number of iterations
|
||||||
|
masterKey = GenerateKeyFromPassword(password, DEFAULT_KEY, CONFIG_DEFAULT_ITERATIONS)
|
||||||
|
LOG_TRACE("CONFIG_FORMAT", "Using a static salt and %d iterations for key derivation", CONFIG_DEFAULT_ITERATIONS)
|
||||||
|
} else if string(configFile.GetBytes()[:len(CONFIG_BANNER)]) == CONFIG_BANNER {
|
||||||
|
// This is the new config format with a random salt and a configurable number of iterations
|
||||||
|
encryptedLength := len(configFile.GetBytes()) - CONFIG_SALT_LENGTH - 4
|
||||||
|
|
||||||
|
// Extract the salt and the number of iterations
|
||||||
|
saltStart := configFile.GetBytes()[len(CONFIG_BANNER):]
|
||||||
|
iterations := binary.LittleEndian.Uint32(saltStart[CONFIG_SALT_LENGTH : CONFIG_SALT_LENGTH+4])
|
||||||
|
LOG_TRACE("CONFIG_ITERATIONS", "Using %d iterations for key derivation", iterations)
|
||||||
|
masterKey = GenerateKeyFromPassword(password, saltStart[:CONFIG_SALT_LENGTH], int(iterations))
|
||||||
|
|
||||||
|
// Copy to a temporary buffer to replace the banner and remove the salt and the number of riterations
|
||||||
|
var encrypted bytes.Buffer
|
||||||
|
encrypted.Write([]byte(ENCRYPTION_BANNER))
|
||||||
|
encrypted.Write(saltStart[CONFIG_SALT_LENGTH+4:])
|
||||||
|
|
||||||
|
configFile.Reset(false)
|
||||||
|
configFile.Write(encrypted.Bytes())
|
||||||
|
if len(configFile.GetBytes()) != encryptedLength {
|
||||||
|
LOG_ERROR("CONFIG_DOWNLOAD", "Encrypted config has %d bytes instead of expected %d bytes", len(configFile.GetBytes()), encryptedLength)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, true, fmt.Errorf("The config file has an invalid banner")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt the config file. masterKey == nil means no encryption.
|
||||||
|
err = configFile.Decrypt(masterKey, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, fmt.Errorf("Failed to retrieve the config file: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config = CreateConfig()
|
||||||
|
|
||||||
|
err = json.Unmarshal(configFile.GetBytes(), config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, false, fmt.Errorf("Failed to parse the config file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.SetNestingLevels(config)
|
||||||
|
|
||||||
|
return config, false, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func UploadConfig(storage Storage, config *Config, password string, iterations int) bool {
|
||||||
|
|
||||||
|
// This is the key to encrypt the config file.
|
||||||
|
var masterKey []byte
|
||||||
|
salt := make([]byte, CONFIG_SALT_LENGTH)
|
||||||
|
|
||||||
|
if len(password) > 0 {
|
||||||
|
|
||||||
|
if len(password) < 8 {
|
||||||
|
LOG_ERROR("CONFIG_PASSWORD", "The password must be at least 8 characters")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err := rand.Read(salt)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CONFIG_KEY", "Failed to generate random salt: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
masterKey = GenerateKeyFromPassword(password, salt, iterations)
|
||||||
|
}
|
||||||
|
|
||||||
|
description, err := json.MarshalIndent(config, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CONFIG_MARSHAL", "Failed to marshal the config: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Although the default key is passed to the function call the key is not actually used since there is no need to
|
||||||
|
// calculate the hash or id of the config file.
|
||||||
|
chunk := CreateChunk(CreateConfig(), true)
|
||||||
|
chunk.Write(description)
|
||||||
|
|
||||||
|
if len(password) > 0 {
|
||||||
|
// Encrypt the config file with masterKey. If masterKey is nil then no encryption is performed.
|
||||||
|
err = chunk.Encrypt(masterKey, "", true)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CONFIG_CREATE", "Failed to create the config file: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// The new encrypted format for config is CONFIG_BANNER + salt + #iterations + encrypted content
|
||||||
|
encryptedLength := len(chunk.GetBytes()) + CONFIG_SALT_LENGTH + 4
|
||||||
|
|
||||||
|
// Copy to a temporary buffer to replace the banner and add the salt and the number of iterations
|
||||||
|
var encrypted bytes.Buffer
|
||||||
|
encrypted.Write([]byte(CONFIG_BANNER))
|
||||||
|
encrypted.Write(salt)
|
||||||
|
binary.Write(&encrypted, binary.LittleEndian, uint32(iterations))
|
||||||
|
encrypted.Write(chunk.GetBytes()[len(ENCRYPTION_BANNER):])
|
||||||
|
|
||||||
|
chunk.Reset(false)
|
||||||
|
chunk.Write(encrypted.Bytes())
|
||||||
|
if len(chunk.GetBytes()) != encryptedLength {
|
||||||
|
LOG_ERROR("CONFIG_CREATE", "Encrypted config has %d bytes instead of expected %d bytes", len(chunk.GetBytes()), encryptedLength)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.UploadFile(0, "config", chunk.GetBytes())
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CONFIG_INIT", "Failed to configure the storage: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if IsTracing() {
|
||||||
|
config.Print()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, subDir := range []string{"chunks", "snapshots"} {
|
||||||
|
err = storage.CreateDirectory(0, subDir)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CONFIG_MKDIR", "Failed to create storage subdirectory: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConfigStorage makes the general storage space available for storing duplicacy format snapshots. In essence,
|
||||||
|
// it simply creates a file named 'config' that stores various parameters as well as a set of keys if encryption
|
||||||
|
// is enabled.
|
||||||
|
func ConfigStorage(storage Storage, iterations int, compressionLevel int, averageChunkSize int, maximumChunkSize int,
|
||||||
|
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool, keyFile string, dataShards int, parityShards int) bool {
|
||||||
|
|
||||||
|
exist, _, _, err := storage.GetFileInfo(0, "config")
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CONFIG_INIT", "Failed to check if there is an existing config file: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if exist {
|
||||||
|
LOG_INFO("CONFIG_EXIST", "The storage has already been configured")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
config := CreateConfigFromParameters(compressionLevel, averageChunkSize, maximumChunkSize, minimumChunkSize, len(password) > 0,
|
||||||
|
copyFrom, bitCopy)
|
||||||
|
if config == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if keyFile != "" {
|
||||||
|
config.loadRSAPublicKey(keyFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
config.DataShards = dataShards
|
||||||
|
config.ParityShards = parityShards
|
||||||
|
|
||||||
|
return UploadConfig(storage, config, password, iterations)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *Config) loadRSAPublicKey(keyFile string) {
|
||||||
|
encodedKey := []byte(keyFile)
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// keyFile may be the actually key, in which case we don't need to read from a file
|
||||||
|
if !strings.Contains(keyFile, "-----BEGIN") {
|
||||||
|
encodedKey, err = ioutil.ReadFile(keyFile)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("BACKUP_KEY", "Failed to read the public key file: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
decodedKey, _ := pem.Decode(encodedKey)
|
||||||
|
if decodedKey == nil {
|
||||||
|
LOG_ERROR("RSA_PUBLIC", "unrecognized public key in %s", keyFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if decodedKey.Type != "PUBLIC KEY" {
|
||||||
|
LOG_ERROR("RSA_PUBLIC", "Unsupported public key type %s in %s", decodedKey.Type, keyFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
parsedKey, err := x509.ParsePKIXPublicKey(decodedKey.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RSA_PUBLIC", "Failed to parse the public key in %s: %v", keyFile, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := parsedKey.(*rsa.PublicKey)
|
||||||
|
if !ok {
|
||||||
|
LOG_ERROR("RSA_PUBLIC", "Unsupported public key type %s in %s", reflect.TypeOf(parsedKey), keyFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
config.rsaPublicKey = key
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadRSAPrivateKey loads the specifed private key file for decrypting file chunks
|
||||||
|
func (config *Config) loadRSAPrivateKey(keyFile string, passphrase string) {
|
||||||
|
|
||||||
|
if config.rsaPublicKey == nil {
|
||||||
|
LOG_ERROR("RSA_PUBLIC", "The storage was not encrypted by an RSA key")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
encodedKey := []byte(keyFile)
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// keyFile may be the actually key, in which case we don't need to read from a file
|
||||||
|
if !strings.Contains(keyFile, "-----BEGIN") {
|
||||||
|
encodedKey, err = ioutil.ReadFile(keyFile)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Failed to read the private key file: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
decodedKey, _ := pem.Decode(encodedKey)
|
||||||
|
if decodedKey == nil {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "unrecognized private key in %s", keyFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if decodedKey.Type != "RSA PRIVATE KEY" {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Unsupported private key type %s in %s", decodedKey.Type, keyFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var decodedKeyBytes []byte
|
||||||
|
if passphrase != "" {
|
||||||
|
decodedKeyBytes, err = x509.DecryptPEMBlock(decodedKey, []byte(passphrase))
|
||||||
|
} else {
|
||||||
|
decodedKeyBytes = decodedKey.Bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
var parsedKey interface{}
|
||||||
|
if parsedKey, err = x509.ParsePKCS1PrivateKey(decodedKeyBytes); err != nil {
|
||||||
|
if parsedKey, err = x509.ParsePKCS8PrivateKey(decodedKeyBytes); err != nil {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Failed to parse the private key in %s: %v", keyFile, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := parsedKey.(*rsa.PrivateKey)
|
||||||
|
if !ok {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Unsupported private key type %s in %s", reflect.TypeOf(parsedKey), keyFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
data := make([]byte, 32)
|
||||||
|
_, err = rand.Read(data)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Failed to generate random data for testing the private key: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now test if the private key matches the public key
|
||||||
|
encryptedData, err := rsa.EncryptOAEP(sha256.New(), rand.Reader, config.rsaPublicKey, data, nil)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Failed to encrypt random data with the public key: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
decryptedData, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, key, encryptedData, nil)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Incorrect private key: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(data, decryptedData) {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Decrypted data do not match the original data")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
config.rsaPrivateKey = key
|
||||||
|
}
|
||||||
242
src/duplicacy_dropboxstorage.go
Normal file
242
src/duplicacy_dropboxstorage.go
Normal file
@@ -0,0 +1,242 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gilbertchen/go-dropbox"
|
||||||
|
)
|
||||||
|
|
||||||
|
type DropboxStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
clients []*dropbox.Files
|
||||||
|
minimumNesting int // The minimum level of directories to dive into before searching for the chunk file.
|
||||||
|
storageDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDropboxStorage creates a dropbox storage object.
|
||||||
|
func CreateDropboxStorage(accessToken string, storageDir string, minimumNesting int, threads int) (storage *DropboxStorage, err error) {
|
||||||
|
|
||||||
|
var clients []*dropbox.Files
|
||||||
|
for i := 0; i < threads; i++ {
|
||||||
|
client := dropbox.NewFiles(dropbox.NewConfig(accessToken))
|
||||||
|
clients = append(clients, client)
|
||||||
|
}
|
||||||
|
|
||||||
|
if storageDir == "" || storageDir[0] != '/' {
|
||||||
|
storageDir = "/" + storageDir
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(storageDir) > 1 && storageDir[len(storageDir)-1] == '/' {
|
||||||
|
storageDir = storageDir[:len(storageDir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &DropboxStorage{
|
||||||
|
clients: clients,
|
||||||
|
storageDir: storageDir,
|
||||||
|
minimumNesting: minimumNesting,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.CreateDirectory(0, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Can't create storage directory: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{1}, 1)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
|
func (storage *DropboxStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
|
||||||
|
if dir != "" && dir[0] != '/' {
|
||||||
|
dir = "/" + dir
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dir) > 1 && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
input := &dropbox.ListFolderInput{
|
||||||
|
Path: storage.storageDir + dir,
|
||||||
|
Recursive: false,
|
||||||
|
IncludeMediaInfo: false,
|
||||||
|
IncludeDeleted: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := storage.clients[threadIndex].ListFolder(input)
|
||||||
|
|
||||||
|
for {
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range output.Entries {
|
||||||
|
name := entry.Name
|
||||||
|
if entry.Tag == "folder" {
|
||||||
|
name += "/"
|
||||||
|
}
|
||||||
|
files = append(files, name)
|
||||||
|
sizes = append(sizes, int64(entry.Size))
|
||||||
|
}
|
||||||
|
|
||||||
|
if output.HasMore {
|
||||||
|
output, err = storage.clients[threadIndex].ListFolderContinue(
|
||||||
|
&dropbox.ListFolderContinueInput{Cursor: output.Cursor})
|
||||||
|
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *DropboxStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
if filePath != "" && filePath[0] != '/' {
|
||||||
|
filePath = "/" + filePath
|
||||||
|
}
|
||||||
|
|
||||||
|
input := &dropbox.DeleteInput{
|
||||||
|
Path: storage.storageDir + filePath,
|
||||||
|
}
|
||||||
|
_, err = storage.clients[threadIndex].Delete(input)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path_lookup/not_found/") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *DropboxStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
if from != "" && from[0] != '/' {
|
||||||
|
from = "/" + from
|
||||||
|
}
|
||||||
|
if to != "" && to[0] != '/' {
|
||||||
|
to = "/" + to
|
||||||
|
}
|
||||||
|
input := &dropbox.MoveInput{
|
||||||
|
FromPath: storage.storageDir + from,
|
||||||
|
ToPath: storage.storageDir + to,
|
||||||
|
}
|
||||||
|
_, err = storage.clients[threadIndex].Move(input)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *DropboxStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
if dir != "" && dir[0] != '/' {
|
||||||
|
dir = "/" + dir
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dir) > 1 && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
input := &dropbox.CreateFolderInput{
|
||||||
|
Path: storage.storageDir + dir,
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = storage.clients[threadIndex].CreateFolder(input)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path/conflict/") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *DropboxStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
|
if filePath != "" && filePath[0] != '/' {
|
||||||
|
filePath = "/" + filePath
|
||||||
|
}
|
||||||
|
|
||||||
|
input := &dropbox.GetMetadataInput{
|
||||||
|
Path: storage.storageDir + filePath,
|
||||||
|
IncludeMediaInfo: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := storage.clients[threadIndex].GetMetadata(input)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path/not_found/") {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, output.Tag == "folder", int64(output.Size), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *DropboxStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
|
if filePath != "" && filePath[0] != '/' {
|
||||||
|
filePath = "/" + filePath
|
||||||
|
}
|
||||||
|
|
||||||
|
input := &dropbox.DownloadInput{
|
||||||
|
Path: storage.storageDir + filePath,
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := storage.clients[threadIndex].Download(input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer output.Body.Close()
|
||||||
|
defer ioutil.ReadAll(output.Body)
|
||||||
|
|
||||||
|
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.clients))
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *DropboxStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
if filePath != "" && filePath[0] != '/' {
|
||||||
|
filePath = "/" + filePath
|
||||||
|
}
|
||||||
|
|
||||||
|
input := &dropbox.UploadInput{
|
||||||
|
Path: storage.storageDir + filePath,
|
||||||
|
Mode: dropbox.WriteModeOverwrite,
|
||||||
|
AutoRename: false,
|
||||||
|
Mute: true,
|
||||||
|
Reader: CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.clients)),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = storage.clients[threadIndex].Upload(input)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *DropboxStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *DropboxStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *DropboxStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *DropboxStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *DropboxStorage) EnableTestMode() {}
|
||||||
599
src/duplicacy_entry.go
Normal file
599
src/duplicacy_entry.go
Normal file
@@ -0,0 +1,599 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is the hidden directory in the repository for storing various files.
|
||||||
|
var DUPLICACY_DIRECTORY = ".duplicacy"
|
||||||
|
var DUPLICACY_FILE = ".duplicacy"
|
||||||
|
|
||||||
|
// Mask for file permission bits
|
||||||
|
var fileModeMask = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky
|
||||||
|
|
||||||
|
// Regex for matching 'StartChunk:StartOffset:EndChunk:EndOffset'
|
||||||
|
var contentRegex = regexp.MustCompile(`^([0-9]+):([0-9]+):([0-9]+):([0-9]+)`)
|
||||||
|
|
||||||
|
// Entry encapsulates information about a file or directory.
|
||||||
|
type Entry struct {
|
||||||
|
Path string
|
||||||
|
Size int64
|
||||||
|
Time int64
|
||||||
|
Mode uint32
|
||||||
|
Link string
|
||||||
|
Hash string
|
||||||
|
|
||||||
|
UID int
|
||||||
|
GID int
|
||||||
|
|
||||||
|
StartChunk int
|
||||||
|
StartOffset int
|
||||||
|
EndChunk int
|
||||||
|
EndOffset int
|
||||||
|
|
||||||
|
Attributes map[string][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateEntry creates an entry from file properties.
|
||||||
|
func CreateEntry(path string, size int64, time int64, mode uint32) *Entry {
|
||||||
|
|
||||||
|
if len(path) > 0 && path[len(path)-1] != '/' && (mode&uint32(os.ModeDir)) != 0 {
|
||||||
|
path += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Entry{
|
||||||
|
Path: path,
|
||||||
|
Size: size,
|
||||||
|
Time: time,
|
||||||
|
Mode: mode,
|
||||||
|
|
||||||
|
UID: -1,
|
||||||
|
GID: -1,
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateEntryFromFileInfo creates an entry from a 'FileInfo' object.
|
||||||
|
func CreateEntryFromFileInfo(fileInfo os.FileInfo, directory string) *Entry {
|
||||||
|
path := directory + fileInfo.Name()
|
||||||
|
|
||||||
|
mode := fileInfo.Mode()
|
||||||
|
|
||||||
|
if mode&os.ModeDir != 0 && mode&os.ModeSymlink != 0 {
|
||||||
|
mode ^= os.ModeDir
|
||||||
|
}
|
||||||
|
|
||||||
|
if path[len(path)-1] != '/' && mode&os.ModeDir != 0 {
|
||||||
|
path += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
entry := &Entry{
|
||||||
|
Path: path,
|
||||||
|
Size: fileInfo.Size(),
|
||||||
|
Time: fileInfo.ModTime().Unix(),
|
||||||
|
Mode: uint32(mode),
|
||||||
|
}
|
||||||
|
|
||||||
|
GetOwner(entry, &fileInfo)
|
||||||
|
|
||||||
|
return entry
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateEntryFromJSON creates an entry from a json description.
|
||||||
|
func (entry *Entry) UnmarshalJSON(description []byte) (err error) {
|
||||||
|
|
||||||
|
var object map[string]interface{}
|
||||||
|
|
||||||
|
err = json.Unmarshal(description, &object)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var value interface{}
|
||||||
|
var ok bool
|
||||||
|
|
||||||
|
if value, ok = object["name"]; ok {
|
||||||
|
pathInBase64, ok := value.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("Name is not a string for a file in the snapshot")
|
||||||
|
}
|
||||||
|
path, err := base64.StdEncoding.DecodeString(pathInBase64)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Invalid name '%s' in the snapshot", pathInBase64)
|
||||||
|
}
|
||||||
|
entry.Path = string(path)
|
||||||
|
} else if value, ok = object["path"]; !ok {
|
||||||
|
return fmt.Errorf("Path is not specified for a file in the snapshot")
|
||||||
|
} else if entry.Path, ok = value.(string); !ok {
|
||||||
|
return fmt.Errorf("Path is not a string for a file in the snapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok = object["size"]; !ok {
|
||||||
|
return fmt.Errorf("Size is not specified for file '%s' in the snapshot", entry.Path)
|
||||||
|
} else if _, ok = value.(float64); !ok {
|
||||||
|
return fmt.Errorf("Size is not a valid integer for file '%s' in the snapshot", entry.Path)
|
||||||
|
}
|
||||||
|
entry.Size = int64(value.(float64))
|
||||||
|
|
||||||
|
if value, ok = object["time"]; !ok {
|
||||||
|
return fmt.Errorf("Time is not specified for file '%s' in the snapshot", entry.Path)
|
||||||
|
} else if _, ok = value.(float64); !ok {
|
||||||
|
return fmt.Errorf("Time is not a valid integer for file '%s' in the snapshot", entry.Path)
|
||||||
|
}
|
||||||
|
entry.Time = int64(value.(float64))
|
||||||
|
|
||||||
|
if value, ok = object["mode"]; !ok {
|
||||||
|
return fmt.Errorf("float64 is not specified for file '%s' in the snapshot", entry.Path)
|
||||||
|
} else if _, ok = value.(float64); !ok {
|
||||||
|
return fmt.Errorf("Mode is not a valid integer for file '%s' in the snapshot", entry.Path)
|
||||||
|
}
|
||||||
|
entry.Mode = uint32(value.(float64))
|
||||||
|
|
||||||
|
if value, ok = object["hash"]; !ok {
|
||||||
|
return fmt.Errorf("Hash is not specified for file '%s' in the snapshot", entry.Path)
|
||||||
|
} else if entry.Hash, ok = value.(string); !ok {
|
||||||
|
return fmt.Errorf("Hash is not a string for file '%s' in the snapshot", entry.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok = object["link"]; ok {
|
||||||
|
var link string
|
||||||
|
if link, ok = value.(string); !ok {
|
||||||
|
return fmt.Errorf("Symlink is not a valid string for file '%s' in the snapshot", entry.Path)
|
||||||
|
}
|
||||||
|
entry.Link = link
|
||||||
|
}
|
||||||
|
|
||||||
|
entry.UID = -1
|
||||||
|
if value, ok = object["uid"]; ok {
|
||||||
|
if _, ok = value.(float64); ok {
|
||||||
|
entry.UID = int(value.(float64))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
entry.GID = -1
|
||||||
|
if value, ok = object["gid"]; ok {
|
||||||
|
if _, ok = value.(float64); ok {
|
||||||
|
entry.GID = int(value.(float64))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok = object["attributes"]; ok {
|
||||||
|
if attributes, ok := value.(map[string]interface{}); !ok {
|
||||||
|
return fmt.Errorf("Attributes are invalid for file '%s' in the snapshot", entry.Path)
|
||||||
|
} else {
|
||||||
|
entry.Attributes = make(map[string][]byte)
|
||||||
|
for name, object := range attributes {
|
||||||
|
if object == nil {
|
||||||
|
entry.Attributes[name] = []byte("")
|
||||||
|
} else if attributeInBase64, ok := object.(string); !ok {
|
||||||
|
return fmt.Errorf("Attribute '%s' is invalid for file '%s' in the snapshot", name, entry.Path)
|
||||||
|
} else if attribute, err := base64.StdEncoding.DecodeString(attributeInBase64); err != nil {
|
||||||
|
return fmt.Errorf("Failed to decode attribute '%s' for file '%s' in the snapshot: %v",
|
||||||
|
name, entry.Path, err)
|
||||||
|
} else {
|
||||||
|
entry.Attributes[name] = attribute
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.IsFile() && entry.Size > 0 {
|
||||||
|
if value, ok = object["content"]; !ok {
|
||||||
|
return fmt.Errorf("Content is not specified for file '%s' in the snapshot", entry.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
if content, ok := value.(string); !ok {
|
||||||
|
return fmt.Errorf("Content is invalid for file '%s' in the snapshot", entry.Path)
|
||||||
|
} else {
|
||||||
|
|
||||||
|
matched := contentRegex.FindStringSubmatch(content)
|
||||||
|
if matched == nil {
|
||||||
|
return fmt.Errorf("Content is specified in a wrong format for file '%s' in the snapshot", entry.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
entry.StartChunk, _ = strconv.Atoi(matched[1])
|
||||||
|
entry.StartOffset, _ = strconv.Atoi(matched[2])
|
||||||
|
entry.EndChunk, _ = strconv.Atoi(matched[3])
|
||||||
|
entry.EndOffset, _ = strconv.Atoi(matched[4])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) convertToObject(encodeName bool) map[string]interface{} {
|
||||||
|
|
||||||
|
object := make(map[string]interface{})
|
||||||
|
|
||||||
|
if encodeName {
|
||||||
|
object["name"] = base64.StdEncoding.EncodeToString([]byte(entry.Path))
|
||||||
|
} else {
|
||||||
|
object["path"] = entry.Path
|
||||||
|
}
|
||||||
|
object["size"] = entry.Size
|
||||||
|
object["time"] = entry.Time
|
||||||
|
object["mode"] = entry.Mode
|
||||||
|
object["hash"] = entry.Hash
|
||||||
|
|
||||||
|
if entry.IsLink() {
|
||||||
|
object["link"] = entry.Link
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.IsFile() && entry.Size > 0 {
|
||||||
|
object["content"] = fmt.Sprintf("%d:%d:%d:%d",
|
||||||
|
entry.StartChunk, entry.StartOffset, entry.EndChunk, entry.EndOffset)
|
||||||
|
}
|
||||||
|
|
||||||
|
if entry.UID != -1 && entry.GID != -1 {
|
||||||
|
object["uid"] = entry.UID
|
||||||
|
object["gid"] = entry.GID
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entry.Attributes) > 0 {
|
||||||
|
object["attributes"] = entry.Attributes
|
||||||
|
}
|
||||||
|
|
||||||
|
return object
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON returns the json description of an entry.
|
||||||
|
func (entry *Entry) MarshalJSON() ([]byte, error) {
|
||||||
|
|
||||||
|
object := entry.convertToObject(true)
|
||||||
|
description, err := json.Marshal(object)
|
||||||
|
return description, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) IsFile() bool {
|
||||||
|
return entry.Mode&uint32(os.ModeType) == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) IsDir() bool {
|
||||||
|
return entry.Mode&uint32(os.ModeDir) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) IsLink() bool {
|
||||||
|
return entry.Mode&uint32(os.ModeSymlink) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) GetPermissions() os.FileMode {
|
||||||
|
return os.FileMode(entry.Mode) & fileModeMask
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) IsSameAs(other *Entry) bool {
|
||||||
|
return entry.Size == other.Size && entry.Time <= other.Time+1 && entry.Time >= other.Time-1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) IsSameAsFileInfo(other os.FileInfo) bool {
|
||||||
|
time := other.ModTime().Unix()
|
||||||
|
return entry.Size == other.Size() && entry.Time <= time+1 && entry.Time >= time-1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) String(maxSizeDigits int) string {
|
||||||
|
modifiedTime := time.Unix(entry.Time, 0).Format("2006-01-02 15:04:05")
|
||||||
|
return fmt.Sprintf("%*d %s %64s %s", maxSizeDigits, entry.Size, modifiedTime, entry.Hash, entry.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setOwner bool) bool {
|
||||||
|
|
||||||
|
if fileInfo == nil {
|
||||||
|
stat, err := os.Lstat(fullPath)
|
||||||
|
fileInfo = &stat
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RESTORE_STAT", "Failed to retrieve the file info: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note that chown can remove setuid/setgid bits so should be called before chmod
|
||||||
|
if setOwner {
|
||||||
|
if !SetOwner(fullPath, entry, fileInfo) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only set the permission if the file is not a symlink
|
||||||
|
if !entry.IsLink() && (*fileInfo).Mode()&fileModeMask != entry.GetPermissions() {
|
||||||
|
err := os.Chmod(fullPath, entry.GetPermissions())
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RESTORE_CHMOD", "Failed to set the file permissions: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only set the time if the file is not a symlink
|
||||||
|
if !entry.IsLink() && (*fileInfo).ModTime().Unix() != entry.Time {
|
||||||
|
modifiedTime := time.Unix(entry.Time, 0)
|
||||||
|
err := os.Chtimes(fullPath, modifiedTime, modifiedTime)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RESTORE_CHTIME", "Failed to set the modification time: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entry.Attributes) > 0 {
|
||||||
|
entry.SetAttributesToFile(fullPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return -1 if 'left' should appear before 'right', 1 if opposite, and 0 if they are the same.
|
||||||
|
// Files are always arranged before subdirectories under the same parent directory.
|
||||||
|
func (left *Entry) Compare(right *Entry) int {
|
||||||
|
|
||||||
|
path1 := left.Path
|
||||||
|
path2 := right.Path
|
||||||
|
|
||||||
|
p := 0
|
||||||
|
for ; p < len(path1) && p < len(path2); p++ {
|
||||||
|
if path1[p] != path2[p] {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// c1, c2 is the first byte that differs
|
||||||
|
var c1, c2 byte
|
||||||
|
if p < len(path1) {
|
||||||
|
c1 = path1[p]
|
||||||
|
}
|
||||||
|
if p < len(path2) {
|
||||||
|
c2 = path2[p]
|
||||||
|
}
|
||||||
|
|
||||||
|
// c3, c4 indicates how the current component ends
|
||||||
|
// c3 == '/': the current component is a directory
|
||||||
|
// c3 != '/': the current component is the last one
|
||||||
|
c3 := c1
|
||||||
|
for i := p; c3 != '/' && i < len(path1); i++ {
|
||||||
|
c3 = path1[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
c4 := c2
|
||||||
|
for i := p; c4 != '/' && i < len(path2); i++ {
|
||||||
|
c4 = path2[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
if c3 == '/' {
|
||||||
|
if c4 == '/' {
|
||||||
|
// We are comparing two directory components
|
||||||
|
if c1 == '/' {
|
||||||
|
// left is shorter
|
||||||
|
// Note that c2 maybe smaller than c1 but c1 is '/' which is counted
|
||||||
|
// as 0
|
||||||
|
return -1
|
||||||
|
} else if c2 == '/' {
|
||||||
|
// right is shorter
|
||||||
|
return 1
|
||||||
|
} else {
|
||||||
|
return int(c1) - int(c2)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// We're at the last component of left and left is a file
|
||||||
|
if c4 == '/' {
|
||||||
|
// the current component of right is a directory
|
||||||
|
return -1
|
||||||
|
} else {
|
||||||
|
return int(c1) - int(c2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is used to sort entries by their names.
|
||||||
|
type ByName []*Entry
|
||||||
|
|
||||||
|
func (entries ByName) Len() int { return len(entries) }
|
||||||
|
func (entries ByName) Swap(i, j int) { entries[i], entries[j] = entries[j], entries[i] }
|
||||||
|
func (entries ByName) Less(i, j int) bool {
|
||||||
|
return entries[i].Compare(entries[j]) < 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is used to sort entries by their starting chunks (and starting offsets if the starting chunks are the same).
|
||||||
|
type ByChunk []*Entry
|
||||||
|
|
||||||
|
func (entries ByChunk) Len() int { return len(entries) }
|
||||||
|
func (entries ByChunk) Swap(i, j int) { entries[i], entries[j] = entries[j], entries[i] }
|
||||||
|
func (entries ByChunk) Less(i, j int) bool {
|
||||||
|
return entries[i].StartChunk < entries[j].StartChunk ||
|
||||||
|
(entries[i].StartChunk == entries[j].StartChunk && entries[i].StartOffset < entries[j].StartOffset)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is used to sort FileInfo objects.
|
||||||
|
type FileInfoCompare []os.FileInfo
|
||||||
|
|
||||||
|
func (files FileInfoCompare) Len() int { return len(files) }
|
||||||
|
func (files FileInfoCompare) Swap(i, j int) { files[i], files[j] = files[j], files[i] }
|
||||||
|
func (files FileInfoCompare) Less(i, j int) bool {
|
||||||
|
|
||||||
|
left := files[i]
|
||||||
|
right := files[j]
|
||||||
|
|
||||||
|
if left.IsDir() && left.Mode()&os.ModeSymlink == 0 {
|
||||||
|
if right.IsDir() && right.Mode()&os.ModeSymlink == 0 {
|
||||||
|
return left.Name() < right.Name()
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if right.IsDir() && right.Mode()&os.ModeSymlink == 0 {
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
return left.Name() < right.Name()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths
|
||||||
|
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files.
|
||||||
|
func ListEntries(top string, path string, fileList *[]*Entry, patterns []string, nobackupFile string, discardAttributes bool, excludeByAttribute bool) (directoryList []*Entry,
|
||||||
|
skippedFiles []string, err error) {
|
||||||
|
|
||||||
|
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
|
||||||
|
|
||||||
|
fullPath := joinPath(top, path)
|
||||||
|
|
||||||
|
files := make([]os.FileInfo, 0, 1024)
|
||||||
|
|
||||||
|
files, err = ioutil.ReadDir(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
return directoryList, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// This binary search works because ioutil.ReadDir returns files sorted by Name() by default
|
||||||
|
if nobackupFile != "" {
|
||||||
|
ii := sort.Search(len(files), func(ii int) bool { return strings.Compare(files[ii].Name(), nobackupFile) >= 0 })
|
||||||
|
if ii < len(files) && files[ii].Name() == nobackupFile {
|
||||||
|
LOG_DEBUG("LIST_NOBACKUP", "%s is excluded due to nobackup file", path)
|
||||||
|
return directoryList, skippedFiles, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
normalizedPath := path
|
||||||
|
if len(normalizedPath) > 0 && normalizedPath[len(normalizedPath)-1] != '/' {
|
||||||
|
normalizedPath += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
normalizedTop := top
|
||||||
|
if normalizedTop != "" && normalizedTop[len(normalizedTop)-1] != '/' {
|
||||||
|
normalizedTop += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(FileInfoCompare(files))
|
||||||
|
|
||||||
|
entries := make([]*Entry, 0, 4)
|
||||||
|
|
||||||
|
for _, f := range files {
|
||||||
|
if f.Name() == DUPLICACY_DIRECTORY {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
entry := CreateEntryFromFileInfo(f, normalizedPath)
|
||||||
|
if len(patterns) > 0 && !MatchPath(entry.Path, patterns) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if entry.IsLink() {
|
||||||
|
isRegular := false
|
||||||
|
isRegular, entry.Link, err = Readlink(joinPath(top, entry.Path))
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("LIST_LINK", "Failed to read the symlink %s: %v", entry.Path, err)
|
||||||
|
skippedFiles = append(skippedFiles, entry.Path)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if isRegular {
|
||||||
|
entry.Mode ^= uint32(os.ModeSymlink)
|
||||||
|
} else if path == "" && (filepath.IsAbs(entry.Link) || filepath.HasPrefix(entry.Link, `\\`)) && !strings.HasPrefix(entry.Link, normalizedTop) {
|
||||||
|
stat, err := os.Stat(joinPath(top, entry.Path))
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err)
|
||||||
|
skippedFiles = append(skippedFiles, entry.Path)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
newEntry := CreateEntryFromFileInfo(stat, "")
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
// On Windows, stat.Name() is the last component of the target, so we need to construct the correct
|
||||||
|
// path from f.Name(); note that a "/" is append assuming a symbolic link is always a directory
|
||||||
|
newEntry.Path = filepath.Join(normalizedPath, f.Name()) + "/"
|
||||||
|
}
|
||||||
|
if len(patterns) > 0 && !MatchPath(newEntry.Path, patterns) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
entry = newEntry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !discardAttributes {
|
||||||
|
entry.ReadAttributes(top)
|
||||||
|
}
|
||||||
|
|
||||||
|
if excludeByAttribute && excludedByAttribute(entry.Attributes) {
|
||||||
|
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded by attribute", entry.Path)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||||
|
LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
|
||||||
|
skippedFiles = append(skippedFiles, entry.Path)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
|
||||||
|
// For top level directory we need to sort again because symlinks may have been changed
|
||||||
|
if path == "" {
|
||||||
|
sort.Sort(ByName(entries))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.IsDir() {
|
||||||
|
directoryList = append(directoryList, entry)
|
||||||
|
} else {
|
||||||
|
*fileList = append(*fileList, entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, j := 0, len(directoryList)-1; i < j; i, j = i+1, j-1 {
|
||||||
|
directoryList[i], directoryList[j] = directoryList[j], directoryList[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
return directoryList, skippedFiles, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Diff returns how many bytes remain unmodifiled between two files.
|
||||||
|
func (entry *Entry) Diff(chunkHashes []string, chunkLengths []int,
|
||||||
|
otherHashes []string, otherLengths []int) (modifiedLength int64) {
|
||||||
|
|
||||||
|
var offset1, offset2 int64
|
||||||
|
i1 := entry.StartChunk
|
||||||
|
i2 := 0
|
||||||
|
for i1 <= entry.EndChunk && i2 < len(otherHashes) {
|
||||||
|
|
||||||
|
start := 0
|
||||||
|
if i1 == entry.StartChunk {
|
||||||
|
start = entry.StartOffset
|
||||||
|
}
|
||||||
|
end := chunkLengths[i1]
|
||||||
|
if i1 == entry.EndChunk {
|
||||||
|
end = entry.EndOffset
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset1 < offset2 {
|
||||||
|
modifiedLength += int64(end - start)
|
||||||
|
offset1 += int64(end - start)
|
||||||
|
i1++
|
||||||
|
} else if offset1 > offset2 {
|
||||||
|
offset2 += int64(otherLengths[i2])
|
||||||
|
i2++
|
||||||
|
} else {
|
||||||
|
if chunkHashes[i1] == otherHashes[i2] && end-start == otherLengths[i2] {
|
||||||
|
} else {
|
||||||
|
modifiedLength += int64(chunkLengths[i1])
|
||||||
|
}
|
||||||
|
offset1 += int64(end - start)
|
||||||
|
offset2 += int64(otherLengths[i2])
|
||||||
|
i1++
|
||||||
|
i2++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return modifiedLength
|
||||||
|
}
|
||||||
329
src/duplicacy_entry_test.go
Normal file
329
src/duplicacy_entry_test.go
Normal file
@@ -0,0 +1,329 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/gilbertchen/xattr"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEntrySort(t *testing.T) {
|
||||||
|
|
||||||
|
DATA := [...]string{
|
||||||
|
"ab",
|
||||||
|
"ab-",
|
||||||
|
"ab0",
|
||||||
|
"ab1",
|
||||||
|
"\xBB\xDDfile",
|
||||||
|
"\xFF\xDDfile",
|
||||||
|
"ab/",
|
||||||
|
"ab/c",
|
||||||
|
"ab+/c-",
|
||||||
|
"ab+/c0",
|
||||||
|
"ab+/c/",
|
||||||
|
"ab+/c/d",
|
||||||
|
"ab+/c+/",
|
||||||
|
"ab+/c+/d",
|
||||||
|
"ab+/c0/",
|
||||||
|
"ab+/c0/d",
|
||||||
|
"ab-/",
|
||||||
|
"ab-/c",
|
||||||
|
"ab0/",
|
||||||
|
"ab1/",
|
||||||
|
"ab1/c",
|
||||||
|
"ab1/\xBB\xDDfile",
|
||||||
|
"ab1/\xFF\xDDfile",
|
||||||
|
}
|
||||||
|
|
||||||
|
var entry1, entry2 *Entry
|
||||||
|
|
||||||
|
for i, p1 := range DATA {
|
||||||
|
if p1[len(p1)-1] == '/' {
|
||||||
|
entry1 = CreateEntry(p1, 0, 0, 0700|uint32(os.ModeDir))
|
||||||
|
} else {
|
||||||
|
entry1 = CreateEntry(p1, 0, 0, 0700)
|
||||||
|
}
|
||||||
|
for j, p2 := range DATA {
|
||||||
|
|
||||||
|
if p2[len(p2)-1] == '/' {
|
||||||
|
entry2 = CreateEntry(p2, 0, 0, 0700|uint32(os.ModeDir))
|
||||||
|
} else {
|
||||||
|
entry2 = CreateEntry(p2, 0, 0, 0700)
|
||||||
|
}
|
||||||
|
|
||||||
|
compared := entry1.Compare(entry2)
|
||||||
|
|
||||||
|
if compared < 0 {
|
||||||
|
compared = -1
|
||||||
|
} else if compared > 0 {
|
||||||
|
compared = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
var expected int
|
||||||
|
if i < j {
|
||||||
|
expected = -1
|
||||||
|
} else if i > j {
|
||||||
|
expected = 1
|
||||||
|
} else {
|
||||||
|
expected = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if compared != expected {
|
||||||
|
t.Errorf("%s vs %s: %d, expected: %d", p1, p2, compared, expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEntryList(t *testing.T) {
|
||||||
|
|
||||||
|
testDir := filepath.Join(os.TempDir(), "duplicacy_test")
|
||||||
|
os.RemoveAll(testDir)
|
||||||
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
|
DATA := [...]string{
|
||||||
|
"ab",
|
||||||
|
"ab-",
|
||||||
|
"ab0",
|
||||||
|
"ab1",
|
||||||
|
"ab+/",
|
||||||
|
"ab+/c",
|
||||||
|
"ab+/c+",
|
||||||
|
"ab+/c1",
|
||||||
|
"ab+/c-/",
|
||||||
|
"ab+/c-/d",
|
||||||
|
"ab+/c0/",
|
||||||
|
"ab+/c0/d",
|
||||||
|
"ab2/",
|
||||||
|
"ab2/c",
|
||||||
|
"ab3/",
|
||||||
|
"ab3/c",
|
||||||
|
}
|
||||||
|
|
||||||
|
var entry1, entry2 *Entry
|
||||||
|
|
||||||
|
for i, p1 := range DATA {
|
||||||
|
if p1[len(p1)-1] == '/' {
|
||||||
|
entry1 = CreateEntry(p1, 0, 0, 0700|uint32(os.ModeDir))
|
||||||
|
} else {
|
||||||
|
entry1 = CreateEntry(p1, 0, 0, 0700)
|
||||||
|
}
|
||||||
|
for j, p2 := range DATA {
|
||||||
|
|
||||||
|
if p2[len(p2)-1] == '/' {
|
||||||
|
entry2 = CreateEntry(p2, 0, 0, 0700|uint32(os.ModeDir))
|
||||||
|
} else {
|
||||||
|
entry2 = CreateEntry(p2, 0, 0, 0700)
|
||||||
|
}
|
||||||
|
|
||||||
|
compared := entry1.Compare(entry2)
|
||||||
|
|
||||||
|
if compared < 0 {
|
||||||
|
compared = -1
|
||||||
|
} else if compared > 0 {
|
||||||
|
compared = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
var expected int
|
||||||
|
if i < j {
|
||||||
|
expected = -1
|
||||||
|
} else if i > j {
|
||||||
|
expected = 1
|
||||||
|
} else {
|
||||||
|
expected = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
if compared != expected {
|
||||||
|
t.Errorf("%s vs %s: %d, expected: %d", p1, p2, compared, expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range DATA {
|
||||||
|
|
||||||
|
fullPath := filepath.Join(testDir, file)
|
||||||
|
if file[len(file)-1] == '/' {
|
||||||
|
err := os.Mkdir(fullPath, 0700)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Mkdir(%s) returned an error: %s", fullPath, err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err := ioutil.WriteFile(fullPath, []byte(file), 0700)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("WriteFile(%s) returned an error: %s", fullPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
directories := make([]*Entry, 0, 4)
|
||||||
|
directories = append(directories, CreateEntry("", 0, 0, 0))
|
||||||
|
|
||||||
|
entries := make([]*Entry, 0, 4)
|
||||||
|
|
||||||
|
for len(directories) > 0 {
|
||||||
|
directory := directories[len(directories)-1]
|
||||||
|
directories = directories[:len(directories)-1]
|
||||||
|
entries = append(entries, directory)
|
||||||
|
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, "", false, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
|
||||||
|
}
|
||||||
|
directories = append(directories, subdirectories...)
|
||||||
|
}
|
||||||
|
|
||||||
|
entries = entries[1:]
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
t.Logf("entry: %s", entry.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(entries) != len(DATA) {
|
||||||
|
t.Errorf("Got %d entries instead of %d", len(entries), len(DATA))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < len(entries); i++ {
|
||||||
|
if entries[i].Path != DATA[i] {
|
||||||
|
t.Errorf("entry: %s, expected: %s", entries[i].Path, DATA[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("shuffling %d entries", len(entries))
|
||||||
|
for i := range entries {
|
||||||
|
j := rand.Intn(i + 1)
|
||||||
|
entries[i], entries[j] = entries[j], entries[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(ByName(entries))
|
||||||
|
|
||||||
|
for i := 0; i < len(entries); i++ {
|
||||||
|
if entries[i].Path != DATA[i] {
|
||||||
|
t.Errorf("entry: %s, expected: %s", entries[i].Path, DATA[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !t.Failed() {
|
||||||
|
os.RemoveAll(testDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestEntryExcludeByAttribute tests the excludeByAttribute parameter to the ListEntries function
|
||||||
|
func TestEntryExcludeByAttribute(t *testing.T) {
|
||||||
|
|
||||||
|
if !(runtime.GOOS == "darwin" || runtime.GOOS == "linux") {
|
||||||
|
t.Skip("skipping test not darwin or linux")
|
||||||
|
}
|
||||||
|
|
||||||
|
testDir := filepath.Join(os.TempDir(), "duplicacy_test")
|
||||||
|
|
||||||
|
os.RemoveAll(testDir)
|
||||||
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
|
// Files or folders named with "exclude" below will have the exclusion attribute set on them
|
||||||
|
// When ListEntries is called with excludeByAttribute true, they should be excluded.
|
||||||
|
DATA := [...]string{
|
||||||
|
"excludefile",
|
||||||
|
"includefile",
|
||||||
|
"excludedir/",
|
||||||
|
"excludedir/file",
|
||||||
|
"includedir/",
|
||||||
|
"includedir/includefile",
|
||||||
|
"includedir/excludefile",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range DATA {
|
||||||
|
fullPath := filepath.Join(testDir, file)
|
||||||
|
if file[len(file)-1] == '/' {
|
||||||
|
err := os.Mkdir(fullPath, 0700)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Mkdir(%s) returned an error: %s", fullPath, err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err := ioutil.WriteFile(fullPath, []byte(file), 0700)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("WriteFile(%s) returned an error: %s", fullPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range DATA {
|
||||||
|
fullPath := filepath.Join(testDir, file)
|
||||||
|
if strings.Contains(file, "exclude") {
|
||||||
|
xattr.Setxattr(fullPath, "com.apple.metadata:com_apple_backup_excludeItem", []byte("com.apple.backupd"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, excludeByAttribute := range [2]bool{true, false} {
|
||||||
|
t.Logf("testing excludeByAttribute: %t", excludeByAttribute)
|
||||||
|
directories := make([]*Entry, 0, 4)
|
||||||
|
directories = append(directories, CreateEntry("", 0, 0, 0))
|
||||||
|
|
||||||
|
entries := make([]*Entry, 0, 4)
|
||||||
|
|
||||||
|
for len(directories) > 0 {
|
||||||
|
directory := directories[len(directories)-1]
|
||||||
|
directories = directories[:len(directories)-1]
|
||||||
|
entries = append(entries, directory)
|
||||||
|
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, "", false, excludeByAttribute)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
|
||||||
|
}
|
||||||
|
directories = append(directories, subdirectories...)
|
||||||
|
}
|
||||||
|
|
||||||
|
entries = entries[1:]
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
t.Logf("entry: %s", entry.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for _, file := range DATA {
|
||||||
|
entryFound := false
|
||||||
|
var entry *Entry
|
||||||
|
for _, entry = range entries {
|
||||||
|
if entry.Path == file {
|
||||||
|
entryFound = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if excludeByAttribute && strings.Contains(file, "exclude") {
|
||||||
|
if entryFound {
|
||||||
|
t.Errorf("file: %s, expected to be excluded but wasn't. attributes: %v", file, entry.Attributes)
|
||||||
|
i++
|
||||||
|
} else {
|
||||||
|
t.Logf("file: %s, excluded", file)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if entryFound {
|
||||||
|
t.Logf("file: %s, included. attributes: %v", file, entry.Attributes)
|
||||||
|
i++
|
||||||
|
} else {
|
||||||
|
t.Errorf("file: %s, expected to be included but wasn't", file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if !t.Failed() {
|
||||||
|
os.RemoveAll(testDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
618
src/duplicacy_filefabricstorage.go
Normal file
618
src/duplicacy_filefabricstorage.go
Normal file
@@ -0,0 +1,618 @@
|
|||||||
|
// Copyright (c) Storage Made Easy. All rights reserved.
|
||||||
|
//
|
||||||
|
// This storage backend is contributed by Storage Made Easy (https://storagemadeeasy.com/) to be used in
|
||||||
|
// Duplicacy and its derivative works.
|
||||||
|
//
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
"sync"
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"net/url"
|
||||||
|
"net/http"
|
||||||
|
"math/rand"
|
||||||
|
"io/ioutil"
|
||||||
|
"encoding/xml"
|
||||||
|
"path/filepath"
|
||||||
|
"mime/multipart"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The XML element representing a file returned by the File Fabric server
|
||||||
|
type FileFabricFile struct {
|
||||||
|
XMLName xml.Name
|
||||||
|
ID string `xml:"fi_id"`
|
||||||
|
Path string `xml:"path"`
|
||||||
|
Size int64 `xml:"fi_size"`
|
||||||
|
Type int `xml:"fi_type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// The XML element representing a file list returned by the server
|
||||||
|
type FileFabricFileList struct {
|
||||||
|
XMLName xml.Name `xml:"files"`
|
||||||
|
Files []FileFabricFile `xml:",any"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileFabricStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
endpoint string // the server
|
||||||
|
authToken string // the authentication token
|
||||||
|
accessToken string // the access token (as returned by getTokenByAuthToken)
|
||||||
|
storageDir string // the path of the storage directory
|
||||||
|
storageDirID string // the id of 'storageDir'
|
||||||
|
|
||||||
|
client *http.Client // the default http client
|
||||||
|
threads int // number of threads
|
||||||
|
maxRetries int // maximum number of tries
|
||||||
|
directoryCache map[string]string // stores ids for directories known to this backend
|
||||||
|
directoryCacheLock sync.Mutex // lock for accessing directoryCache
|
||||||
|
|
||||||
|
isAuthorized bool
|
||||||
|
testMode bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errFileFabricAuthorizationFailure = errors.New("Authentication failure")
|
||||||
|
errFileFabricDirectoryExists = errors.New("Directory exists")
|
||||||
|
)
|
||||||
|
|
||||||
|
// The general server response
|
||||||
|
type FileFabricResponse struct {
|
||||||
|
Status string `xml:"status"`
|
||||||
|
Message string `xml:"statusmessage"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the server response and return an error representing the error message it contains
|
||||||
|
func checkFileFabricResponse(response FileFabricResponse, actionFormat string, actionArguments ...interface{}) error {
|
||||||
|
|
||||||
|
action := fmt.Sprintf(actionFormat, actionArguments...)
|
||||||
|
if response.Status == "ok" && response.Message == "Success" {
|
||||||
|
return nil
|
||||||
|
} else if response.Status == "error_data" {
|
||||||
|
if response.Message == "Folder with same name already exists." {
|
||||||
|
return errFileFabricDirectoryExists
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Failed to %s (status: %s, message: %s)", action, response.Status, response.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a File Fabric storage backend
|
||||||
|
func CreateFileFabricStorage(endpoint string, token string, storageDir string, threads int) (storage *FileFabricStorage, err error) {
|
||||||
|
|
||||||
|
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||||
|
storageDir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &FileFabricStorage{
|
||||||
|
|
||||||
|
endpoint: endpoint,
|
||||||
|
authToken: token,
|
||||||
|
client: http.DefaultClient,
|
||||||
|
threads: threads,
|
||||||
|
directoryCache: make(map[string]string),
|
||||||
|
maxRetries: 12,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.getAccessToken()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
storageDirID, isDir, _, err := storage.getFileInfo(0, storageDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if storageDirID == "" {
|
||||||
|
return nil, fmt.Errorf("Storage path %s does not exist", storageDir)
|
||||||
|
}
|
||||||
|
if !isDir {
|
||||||
|
return nil, fmt.Errorf("Storage path %s is not a directory", storageDir)
|
||||||
|
}
|
||||||
|
storage.storageDir = storageDir
|
||||||
|
storage.storageDirID = storageDirID
|
||||||
|
|
||||||
|
for _, dir := range []string{"snapshots", "chunks"} {
|
||||||
|
storage.CreateDirectory(0, dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve the access token using an auth token
|
||||||
|
func (storage *FileFabricStorage) getAccessToken() (error) {
|
||||||
|
|
||||||
|
formData := url.Values { "authtoken": {storage.authToken},}
|
||||||
|
readCloser, _, _, err := storage.sendRequest(0, http.MethodPost, storage.getAPIURL("getTokenByAuthToken"), nil, formData)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
var output struct {
|
||||||
|
FileFabricResponse
|
||||||
|
Token string `xml:"token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkFileFabricResponse(output.FileFabricResponse, "request the access token")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.accessToken = output.Token
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine if we should retry based on the number of retries given by 'retry' and if so calculate the delay with exponential backoff
|
||||||
|
func (storage *FileFabricStorage) shouldRetry(retry int, messageFormat string, messageArguments ...interface{}) bool {
|
||||||
|
message := fmt.Sprintf(messageFormat, messageArguments...)
|
||||||
|
|
||||||
|
if retry >= storage.maxRetries {
|
||||||
|
LOG_WARN("FILEFABRIC_REQUEST", "%s", message)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
backoff := 1 << uint(retry)
|
||||||
|
if backoff > 60 {
|
||||||
|
backoff = 60
|
||||||
|
}
|
||||||
|
delay := rand.Intn(backoff*500) + backoff*500
|
||||||
|
LOG_INFO("FILEFABRIC_RETRY", "%s; retrying after %.1f seconds", message, float32(delay) / 1000.0)
|
||||||
|
time.Sleep(time.Duration(delay) * time.Millisecond)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a request to the server
|
||||||
|
func (storage *FileFabricStorage) sendRequest(threadIndex int, method string, requestURL string, requestHeaders map[string]string, input interface{}) ( io.ReadCloser, http.Header, int64, error) {
|
||||||
|
|
||||||
|
var response *http.Response
|
||||||
|
|
||||||
|
for retries := 0; ; retries++ {
|
||||||
|
var inputReader io.Reader
|
||||||
|
|
||||||
|
switch input.(type) {
|
||||||
|
case url.Values:
|
||||||
|
values := input.(url.Values)
|
||||||
|
inputReader = strings.NewReader(values.Encode())
|
||||||
|
if requestHeaders == nil {
|
||||||
|
requestHeaders = make(map[string]string)
|
||||||
|
}
|
||||||
|
requestHeaders["Content-Type"] = "application/x-www-form-urlencoded"
|
||||||
|
case *RateLimitedReader:
|
||||||
|
rateLimitedReader := input.(*RateLimitedReader)
|
||||||
|
rateLimitedReader.Reset()
|
||||||
|
inputReader = rateLimitedReader
|
||||||
|
default:
|
||||||
|
LOG_FATAL("FILEFABRIC_REQUEST", "Input type is not supported")
|
||||||
|
return nil, nil, 0, fmt.Errorf("Input type is not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
request, err := http.NewRequest(method, requestURL, inputReader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if requestHeaders != nil {
|
||||||
|
for key, value := range requestHeaders {
|
||||||
|
request.Header.Set(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := input.(*RateLimitedReader); ok {
|
||||||
|
request.ContentLength = input.(*RateLimitedReader).Length()
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err = storage.client.Do(request)
|
||||||
|
if err != nil {
|
||||||
|
if !storage.shouldRetry(retries, "[%d] %s %s returned an error: %v", threadIndex, method, requestURL, err) {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode < 300 {
|
||||||
|
return response.Body, response.Header, response.ContentLength, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
defer response.Body.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, response.Body)
|
||||||
|
|
||||||
|
var output struct {
|
||||||
|
Status string `xml:"status"`
|
||||||
|
Message string `xml:"statusmessage"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = xml.NewDecoder(response.Body).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
if !storage.shouldRetry(retries, "[%d] %s %s returned an invalid response: %v", threadIndex, method, requestURL, err) {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !storage.shouldRetry(retries, "[%d] %s %s returned status: %s, message: %s", threadIndex, method, requestURL, output.Status, output.Message) {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *FileFabricStorage) getAPIURL(function string) string {
|
||||||
|
if storage.accessToken == "" {
|
||||||
|
return "https://" + storage.endpoint + "/api/*/" + function + "/"
|
||||||
|
} else {
|
||||||
|
return "https://" + storage.endpoint + "/api/" + storage.accessToken + "/" + function + "/"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
|
||||||
|
// a size of 0. If 'dir' is 'snapshots', only subdirectories will be returned. If 'dir' is 'snapshots/repository_id', then only
|
||||||
|
// files will be returned. If 'dir' is 'chunks', the implementation can return the list either recusively or non-recusively.
|
||||||
|
func (storage *FileFabricStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
if dir != "" && dir[len(dir)-1] != '/' {
|
||||||
|
dir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
dirID, _, _, err := storage.getFileInfo(threadIndex, dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if dirID == "" {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lastID := ""
|
||||||
|
|
||||||
|
for {
|
||||||
|
formData := url.Values { "marker": {lastID}, "limit": {"1000"}, "includefolders": {"n"}, "fi_pid" : {dirID}}
|
||||||
|
if dir == "snapshots/" {
|
||||||
|
formData["includefolders"] = []string{"y"}
|
||||||
|
}
|
||||||
|
if storage.testMode {
|
||||||
|
formData["limit"] = []string{"5"}
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("getListOfFiles"), nil, formData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
var output struct {
|
||||||
|
FileFabricResponse
|
||||||
|
FileList FileFabricFileList `xml:"files"`
|
||||||
|
Truncated int `xml:"truncated"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkFileFabricResponse(output.FileFabricResponse, "list the storage directory '%s'", dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if dir == "snapshots/" {
|
||||||
|
for _, file := range output.FileList.Files {
|
||||||
|
if file.Type == 1 {
|
||||||
|
files = append(files, file.Path + "/")
|
||||||
|
}
|
||||||
|
lastID = file.ID
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, file := range output.FileList.Files {
|
||||||
|
if file.Type == 0 {
|
||||||
|
files = append(files, file.Path)
|
||||||
|
sizes = append(sizes, file.Size)
|
||||||
|
}
|
||||||
|
lastID = file.ID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if output.Truncated != 1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *FileFabricStorage) getFileInfo(threadIndex int, filePath string) (fileID string, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
|
formData := url.Values { "path" : {storage.storageDir + filePath}}
|
||||||
|
|
||||||
|
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("checkPathExists"), nil, formData)
|
||||||
|
if err != nil {
|
||||||
|
return "", false, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
var output struct {
|
||||||
|
FileFabricResponse
|
||||||
|
File FileFabricFile `xml:"file"`
|
||||||
|
Exists string `xml:"exists"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
return "", false, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkFileFabricResponse(output.FileFabricResponse, "get the info on '%s'", filePath)
|
||||||
|
if err != nil {
|
||||||
|
return "", false, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if output.Exists != "y" {
|
||||||
|
return "", false, 0, nil
|
||||||
|
} else {
|
||||||
|
if output.File.Type == 1 {
|
||||||
|
for filePath != "" && filePath[len(filePath)-1] == '/' {
|
||||||
|
filePath = filePath[:len(filePath)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
storage.directoryCache[filePath] = output.File.ID
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
|
}
|
||||||
|
return output.File.ID, output.File.Type == 1, output.File.Size, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'. This is a function required by the Storage interface.
|
||||||
|
func (storage *FileFabricStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
|
fileID := ""
|
||||||
|
fileID, isDir, size, err = storage.getFileInfo(threadIndex, filePath)
|
||||||
|
return fileID != "", isDir, size, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *FileFabricStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
|
||||||
|
fileID, _, _, _ := storage.getFileInfo(threadIndex, filePath)
|
||||||
|
if fileID == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
formData := url.Values { "fi_id" : {fileID}}
|
||||||
|
|
||||||
|
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doDeleteFile"), nil, formData)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
var output FileFabricResponse
|
||||||
|
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkFileFabricResponse(output, "delete file '%s'", filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *FileFabricStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
fileID, _, _, _ := storage.getFileInfo(threadIndex, from)
|
||||||
|
if fileID == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
formData := url.Values { "fi_id" : {fileID}, "fi_name": {filepath.Base(to)},}
|
||||||
|
|
||||||
|
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doRenameFile"), nil, formData)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
var output FileFabricResponse
|
||||||
|
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkFileFabricResponse(output, "rename file '%s' to '%s'", from, to)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createParentDirectory creates the parent directory if it doesn't exist in the cache.
|
||||||
|
func (storage *FileFabricStorage) createParentDirectory(threadIndex int, dir string) (parentID string, err error) {
|
||||||
|
|
||||||
|
found := strings.LastIndex(dir, "/")
|
||||||
|
if found == -1 {
|
||||||
|
return storage.storageDirID, nil
|
||||||
|
}
|
||||||
|
parent := dir[:found]
|
||||||
|
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
parentID = storage.directoryCache[parent]
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
|
|
||||||
|
if parentID != "" {
|
||||||
|
return parentID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parentID, err = storage.createDirectory(threadIndex, parent)
|
||||||
|
if err != nil {
|
||||||
|
if err == errFileFabricDirectoryExists {
|
||||||
|
var isDir bool
|
||||||
|
parentID, isDir, _, err = storage.getFileInfo(threadIndex, parent)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if isDir == false {
|
||||||
|
return "", fmt.Errorf("'%s' in the storage is a file", parent)
|
||||||
|
}
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
storage.directoryCache[parent] = parentID
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
|
return parentID, nil
|
||||||
|
} else {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return parentID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createDirectory creates a new directory.
|
||||||
|
func (storage *FileFabricStorage) createDirectory(threadIndex int, dir string) (dirID string, err error) {
|
||||||
|
for dir != "" && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
parentID, err := storage.createParentDirectory(threadIndex, dir)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
formData := url.Values { "fi_name": {filepath.Base(dir)}, "fi_pid" : {parentID}}
|
||||||
|
|
||||||
|
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doCreateNewFolder"), nil, formData)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
var output struct {
|
||||||
|
FileFabricResponse
|
||||||
|
File FileFabricFile `xml:"file"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkFileFabricResponse(output.FileFabricResponse, "create directory '%s'", dir)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
storage.directoryCache[dir] = output.File.ID
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
|
|
||||||
|
return output.File.ID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *FileFabricStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
_, err = storage.createDirectory(threadIndex, dir)
|
||||||
|
if err == errFileFabricDirectoryExists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *FileFabricStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
formData := url.Values { "fi_id" : {storage.storageDir + filePath}}
|
||||||
|
|
||||||
|
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("getFile"), nil, formData)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.threads)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *FileFabricStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
|
parentID, err := storage.createParentDirectory(threadIndex, filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fileName := filepath.Base(filePath)
|
||||||
|
requestBody := &bytes.Buffer{}
|
||||||
|
writer := multipart.NewWriter(requestBody)
|
||||||
|
part, _ := writer.CreateFormFile("file_1", fileName)
|
||||||
|
part.Write(content)
|
||||||
|
|
||||||
|
writer.WriteField("file_name1", fileName)
|
||||||
|
writer.WriteField("fi_pid", parentID)
|
||||||
|
writer.WriteField("fi_structtype", "g")
|
||||||
|
writer.Close()
|
||||||
|
|
||||||
|
headers := make(map[string]string)
|
||||||
|
headers["Content-Type"] = writer.FormDataContentType()
|
||||||
|
|
||||||
|
rateLimitedReader := CreateRateLimitedReader(requestBody.Bytes(), storage.UploadRateLimit/storage.threads)
|
||||||
|
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doUploadFiles"), headers, rateLimitedReader)
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
var output FileFabricResponse
|
||||||
|
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkFileFabricResponse(output, "upload file '%s'", filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *FileFabricStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *FileFabricStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *FileFabricStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *FileFabricStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *FileFabricStorage) EnableTestMode() { storage.testMode = true }
|
||||||
70
src/duplicacy_filereader.go
Normal file
70
src/duplicacy_filereader.go
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FileReader wraps a number of files and turns them into a series of readers.
|
||||||
|
type FileReader struct {
|
||||||
|
top string
|
||||||
|
files []*Entry
|
||||||
|
|
||||||
|
CurrentFile *os.File
|
||||||
|
CurrentIndex int
|
||||||
|
CurrentEntry *Entry
|
||||||
|
|
||||||
|
SkippedFiles []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateFileReader creates a file reader.
|
||||||
|
func CreateFileReader(top string, files []*Entry) *FileReader {
|
||||||
|
|
||||||
|
reader := &FileReader{
|
||||||
|
top: top,
|
||||||
|
files: files,
|
||||||
|
CurrentIndex: -1,
|
||||||
|
}
|
||||||
|
|
||||||
|
reader.NextFile()
|
||||||
|
|
||||||
|
return reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// NextFile switches to the next file in the file reader.
|
||||||
|
func (reader *FileReader) NextFile() bool {
|
||||||
|
|
||||||
|
if reader.CurrentFile != nil {
|
||||||
|
reader.CurrentFile.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
reader.CurrentIndex++
|
||||||
|
for reader.CurrentIndex < len(reader.files) {
|
||||||
|
|
||||||
|
reader.CurrentEntry = reader.files[reader.CurrentIndex]
|
||||||
|
if !reader.CurrentEntry.IsFile() || reader.CurrentEntry.Size == 0 {
|
||||||
|
reader.CurrentIndex++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
fullPath := joinPath(reader.top, reader.CurrentEntry.Path)
|
||||||
|
reader.CurrentFile, err = os.OpenFile(fullPath, os.O_RDONLY, 0)
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("OPEN_FAILURE", "Failed to open file for reading: %v", err)
|
||||||
|
reader.CurrentEntry.Size = 0
|
||||||
|
reader.SkippedFiles = append(reader.SkippedFiles, reader.CurrentEntry.Path)
|
||||||
|
reader.CurrentIndex++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
reader.CurrentFile = nil
|
||||||
|
return false
|
||||||
|
}
|
||||||
236
src/duplicacy_filestorage.go
Normal file
236
src/duplicacy_filestorage.go
Normal file
@@ -0,0 +1,236 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FileStorage is a local on-disk file storage implementing the Storage interface.
|
||||||
|
type FileStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
isCacheNeeded bool // Network storages require caching
|
||||||
|
storageDir string
|
||||||
|
numberOfThreads int
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateFileStorage creates a file storage.
|
||||||
|
func CreateFileStorage(storageDir string, isCacheNeeded bool, threads int) (storage *FileStorage, err error) {
|
||||||
|
|
||||||
|
var stat os.FileInfo
|
||||||
|
|
||||||
|
stat, err = os.Stat(storageDir)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
err = os.MkdirAll(storageDir, 0744)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !stat.IsDir() {
|
||||||
|
return nil, fmt.Errorf("The storage path %s is a file", storageDir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for storageDir[len(storageDir)-1] == '/' {
|
||||||
|
storageDir = storageDir[:len(storageDir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &FileStorage{
|
||||||
|
storageDir: storageDir,
|
||||||
|
isCacheNeeded: isCacheNeeded,
|
||||||
|
numberOfThreads: threads,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Random number fo generating the temporary chunk file suffix.
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively).
|
||||||
|
func (storage *FileStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
|
||||||
|
fullPath := path.Join(storage.storageDir, dir)
|
||||||
|
|
||||||
|
list, err := ioutil.ReadDir(fullPath)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, f := range list {
|
||||||
|
name := f.Name()
|
||||||
|
if (f.IsDir() || f.Mode() & os.ModeSymlink != 0) && name[len(name)-1] != '/' {
|
||||||
|
name += "/"
|
||||||
|
}
|
||||||
|
files = append(files, name)
|
||||||
|
sizes = append(sizes, f.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *FileStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
err = os.Remove(path.Join(storage.storageDir, filePath))
|
||||||
|
if err == nil || os.IsNotExist(err) {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *FileStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
return os.Rename(path.Join(storage.storageDir, from), path.Join(storage.storageDir, to))
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *FileStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
err = os.Mkdir(path.Join(storage.storageDir, dir), 0744)
|
||||||
|
if err != nil && os.IsExist(err) {
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *FileStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
stat, err := os.Stat(path.Join(storage.storageDir, filePath))
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, stat.IsDir(), stat.Size(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *FileStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
|
file, err := os.Open(path.Join(storage.storageDir, filePath))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer file.Close()
|
||||||
|
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'
|
||||||
|
func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
|
fullPath := path.Join(storage.storageDir, filePath)
|
||||||
|
|
||||||
|
if len(strings.Split(filePath, "/")) > 2 {
|
||||||
|
dir := path.Dir(fullPath)
|
||||||
|
// Use Lstat() instead of Stat() since 1) Stat() doesn't work for deduplicated disks on Windows and 2) there isn't
|
||||||
|
// really a need to follow the link if filePath is a link.
|
||||||
|
stat, err := os.Lstat(dir)
|
||||||
|
if err != nil {
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = os.MkdirAll(dir, 0744)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if !stat.IsDir() && stat.Mode() & os.ModeSymlink == 0 {
|
||||||
|
return fmt.Errorf("The path %s is not a directory or symlink", dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
letters := "abcdefghijklmnopqrstuvwxyz"
|
||||||
|
suffix := make([]byte, 8)
|
||||||
|
for i := range suffix {
|
||||||
|
suffix[i] = letters[rand.Intn(len(letters))]
|
||||||
|
}
|
||||||
|
|
||||||
|
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
|
||||||
|
|
||||||
|
file, err := os.OpenFile(temporaryFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
|
_, err = io.Copy(file, reader)
|
||||||
|
if err != nil {
|
||||||
|
file.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = file.Sync(); err != nil {
|
||||||
|
pathErr, ok := err.(*os.PathError)
|
||||||
|
isNotSupported := ok && pathErr.Op == "sync" && pathErr.Err == syscall.ENOTSUP
|
||||||
|
if !isNotSupported {
|
||||||
|
_ = file.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = file.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.Rename(temporaryFile, fullPath)
|
||||||
|
if err != nil {
|
||||||
|
|
||||||
|
if _, e := os.Stat(fullPath); e == nil {
|
||||||
|
os.Remove(temporaryFile)
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *FileStorage) IsCacheNeeded() bool { return storage.isCacheNeeded }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *FileStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *FileStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *FileStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *FileStorage) EnableTestMode() {}
|
||||||
808
src/duplicacy_gcdstorage.go
Normal file
808
src/duplicacy_gcdstorage.go
Normal file
@@ -0,0 +1,808 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
"golang.org/x/oauth2/google"
|
||||||
|
"google.golang.org/api/drive/v3"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
"google.golang.org/api/option"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
GCDFileMimeType = "application/octet-stream"
|
||||||
|
GCDDirectoryMimeType = "application/vnd.google-apps.folder"
|
||||||
|
GCDUserDrive = "root"
|
||||||
|
)
|
||||||
|
|
||||||
|
type GCDStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
service *drive.Service
|
||||||
|
idCache map[string]string // only directories are saved in this cache
|
||||||
|
idCacheLock sync.Mutex
|
||||||
|
backoffs []int // desired backoff time in seconds for each thread
|
||||||
|
attempts []int // number of failed attempts since last success for each thread
|
||||||
|
driveID string // the ID of the shared drive or 'root' (GCDUserDrive) if the user's drive
|
||||||
|
|
||||||
|
createDirectoryLock sync.Mutex
|
||||||
|
isConnected bool
|
||||||
|
numberOfThreads int
|
||||||
|
TestMode bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type GCDConfig struct {
|
||||||
|
ClientID string `json:"client_id"`
|
||||||
|
ClientSecret string `json:"client_secret"`
|
||||||
|
Endpoint oauth2.Endpoint `json:"end_point"`
|
||||||
|
Token oauth2.Token `json:"token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *GCDStorage) shouldRetry(threadIndex int, err error) (bool, error) {
|
||||||
|
|
||||||
|
const MAX_ATTEMPTS = 15
|
||||||
|
|
||||||
|
maximumBackoff := 64
|
||||||
|
if maximumBackoff < storage.numberOfThreads {
|
||||||
|
maximumBackoff = storage.numberOfThreads
|
||||||
|
}
|
||||||
|
retry := false
|
||||||
|
message := ""
|
||||||
|
if err == nil {
|
||||||
|
storage.backoffs[threadIndex] = 1
|
||||||
|
storage.attempts[threadIndex] = 0
|
||||||
|
return false, nil
|
||||||
|
} else if e, ok := err.(*googleapi.Error); ok {
|
||||||
|
if 500 <= e.Code && e.Code < 600 {
|
||||||
|
// Retry for 5xx response codes.
|
||||||
|
message = fmt.Sprintf("HTTP status code %d", e.Code)
|
||||||
|
retry = true
|
||||||
|
} else if e.Code == 429 {
|
||||||
|
// Too many requests{
|
||||||
|
message = "HTTP status code 429"
|
||||||
|
retry = true
|
||||||
|
} else if e.Code == 403 {
|
||||||
|
// User Rate Limit Exceeded
|
||||||
|
message = e.Message
|
||||||
|
retry = true
|
||||||
|
} else if e.Code == 408 {
|
||||||
|
// Request timeout
|
||||||
|
message = e.Message
|
||||||
|
retry = true
|
||||||
|
} else if e.Code == 400 && strings.Contains(e.Message, "failedPrecondition") {
|
||||||
|
// Daily quota exceeded
|
||||||
|
message = e.Message
|
||||||
|
retry = true
|
||||||
|
} else if e.Code == 401 {
|
||||||
|
// Only retry on authorization error when storage has been connected before
|
||||||
|
if storage.isConnected {
|
||||||
|
message = "Authorization Error"
|
||||||
|
retry = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if e, ok := err.(*url.Error); ok {
|
||||||
|
message = e.Error()
|
||||||
|
retry = true
|
||||||
|
} else if err == io.ErrUnexpectedEOF {
|
||||||
|
// Retry on unexpected EOFs and temporary network errors.
|
||||||
|
message = "Unexpected EOF"
|
||||||
|
retry = true
|
||||||
|
} else if err, ok := err.(net.Error); ok {
|
||||||
|
message = "Temporary network error"
|
||||||
|
retry = err.Temporary()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !retry {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if storage.attempts[threadIndex] >= MAX_ATTEMPTS {
|
||||||
|
LOG_INFO("GCD_RETRY", "[%d] Maximum number of retries reached (backoff: %d, attempts: %d)",
|
||||||
|
threadIndex, storage.backoffs[threadIndex], storage.attempts[threadIndex])
|
||||||
|
storage.backoffs[threadIndex] = 1
|
||||||
|
storage.attempts[threadIndex] = 0
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if storage.backoffs[threadIndex] < maximumBackoff {
|
||||||
|
storage.backoffs[threadIndex] *= 2
|
||||||
|
}
|
||||||
|
if storage.backoffs[threadIndex] > maximumBackoff {
|
||||||
|
storage.backoffs[threadIndex] = maximumBackoff
|
||||||
|
}
|
||||||
|
storage.attempts[threadIndex] += 1
|
||||||
|
delay := float64(storage.backoffs[threadIndex]) * rand.Float64() * 2
|
||||||
|
LOG_DEBUG("GCD_RETRY", "[%d] %s; retrying after %.2f seconds (backoff: %d, attempts: %d)",
|
||||||
|
threadIndex, message, delay, storage.backoffs[threadIndex], storage.attempts[threadIndex])
|
||||||
|
time.Sleep(time.Duration(delay * float64(time.Second)))
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertFilePath converts the path for a fossil in the form of 'chunks/id.fsl' to 'fossils/id'. This is because
|
||||||
|
// GCD doesn't support file renaming. Instead, it only allows one file to be moved from one directory to another.
|
||||||
|
// By adding a layer of path conversion we're pretending that we can rename between 'chunks/id' and 'chunks/id.fsl'
|
||||||
|
func (storage *GCDStorage) convertFilePath(filePath string) string {
|
||||||
|
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
||||||
|
return "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
||||||
|
}
|
||||||
|
return filePath
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *GCDStorage) getPathID(path string) string {
|
||||||
|
storage.idCacheLock.Lock()
|
||||||
|
pathID := storage.idCache[path]
|
||||||
|
storage.idCacheLock.Unlock()
|
||||||
|
return pathID
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *GCDStorage) findPathID(path string) (string, bool) {
|
||||||
|
storage.idCacheLock.Lock()
|
||||||
|
pathID, ok := storage.idCache[path]
|
||||||
|
storage.idCacheLock.Unlock()
|
||||||
|
return pathID, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *GCDStorage) savePathID(path string, pathID string) {
|
||||||
|
storage.idCacheLock.Lock()
|
||||||
|
storage.idCache[path] = pathID
|
||||||
|
storage.idCacheLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *GCDStorage) deletePathID(path string) {
|
||||||
|
storage.idCacheLock.Lock()
|
||||||
|
delete(storage.idCache, path)
|
||||||
|
storage.idCacheLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *GCDStorage) listFiles(threadIndex int, parentID string, listFiles bool, listDirectories bool) ([]*drive.File, error) {
|
||||||
|
|
||||||
|
if parentID == "" {
|
||||||
|
return nil, fmt.Errorf("No parent ID provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
files := []*drive.File{}
|
||||||
|
|
||||||
|
startToken := ""
|
||||||
|
|
||||||
|
query := "'" + parentID + "' in parents and trashed = false "
|
||||||
|
if listFiles && !listDirectories {
|
||||||
|
query += "and mimeType != 'application/vnd.google-apps.folder'"
|
||||||
|
} else if !listFiles && !listDirectories {
|
||||||
|
query += "and mimeType = 'application/vnd.google-apps.folder'"
|
||||||
|
}
|
||||||
|
|
||||||
|
maxCount := int64(1000)
|
||||||
|
if storage.TestMode {
|
||||||
|
maxCount = 8
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
var fileList *drive.FileList
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for {
|
||||||
|
q := storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount)
|
||||||
|
if storage.driveID != GCDUserDrive {
|
||||||
|
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
|
||||||
|
}
|
||||||
|
fileList, err = q.Do()
|
||||||
|
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
|
||||||
|
break
|
||||||
|
} else if retry {
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
files = append(files, fileList.Files...)
|
||||||
|
|
||||||
|
startToken = fileList.NextPageToken
|
||||||
|
if startToken == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *GCDStorage) listByName(threadIndex int, parentID string, name string) (string, bool, int64, error) {
|
||||||
|
|
||||||
|
var fileList *drive.FileList
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for {
|
||||||
|
query := "name = '" + name + "' and '" + parentID + "' in parents and trashed = false "
|
||||||
|
q := storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)")
|
||||||
|
if storage.driveID != GCDUserDrive {
|
||||||
|
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
|
||||||
|
}
|
||||||
|
fileList, err = q.Do()
|
||||||
|
|
||||||
|
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
|
||||||
|
break
|
||||||
|
} else if retry {
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
return "", false, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(fileList.Files) == 0 {
|
||||||
|
return "", false, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
file := fileList.Files[0]
|
||||||
|
|
||||||
|
return file.Id, file.MimeType == GCDDirectoryMimeType, file.Size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getIDFromPath returns the id of the given path. If 'createDirectories' is true, create the given path and all its
|
||||||
|
// parent directories if they don't exist. Note that if 'createDirectories' is false, it may return an empty 'fileID'
|
||||||
|
// if the file doesn't exist.
|
||||||
|
func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, createDirectories bool) (string, error) {
|
||||||
|
|
||||||
|
if fileID, ok := storage.findPathID(filePath); ok {
|
||||||
|
return fileID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fileID := storage.driveID
|
||||||
|
|
||||||
|
if rootID, ok := storage.findPathID(""); ok {
|
||||||
|
fileID = rootID
|
||||||
|
}
|
||||||
|
|
||||||
|
names := strings.Split(filePath, "/")
|
||||||
|
current := ""
|
||||||
|
for i, name := range names {
|
||||||
|
// Find the intermediate directory in the cache first.
|
||||||
|
current = path.Join(current, name)
|
||||||
|
currentID, ok := storage.findPathID(current)
|
||||||
|
if ok {
|
||||||
|
fileID = currentID
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the directory exists.
|
||||||
|
var err error
|
||||||
|
var isDir bool
|
||||||
|
fileID, isDir, _, err = storage.listByName(threadIndex, fileID, name)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if fileID == "" {
|
||||||
|
if !createDirectories {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only one thread can create the directory at a time -- GCD allows multiple directories
|
||||||
|
// to have the same name but different ids.
|
||||||
|
storage.createDirectoryLock.Lock()
|
||||||
|
err = storage.CreateDirectory(threadIndex, current)
|
||||||
|
storage.createDirectoryLock.Unlock()
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("Failed to create directory '%s': %v", current, err)
|
||||||
|
}
|
||||||
|
currentID, ok = storage.findPathID(current)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("Directory '%s' created by id not found", current)
|
||||||
|
}
|
||||||
|
fileID = currentID
|
||||||
|
continue
|
||||||
|
} else if isDir {
|
||||||
|
storage.savePathID(current, fileID)
|
||||||
|
}
|
||||||
|
if i != len(names)-1 && !isDir {
|
||||||
|
return "", fmt.Errorf("Path '%s' is not a directory", current)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fileID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateGCDStorage creates a GCD storage object.
|
||||||
|
func CreateGCDStorage(tokenFile string, driveID string, storagePath string, threads int) (storage *GCDStorage, err error) {
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
description, err := ioutil.ReadFile(tokenFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var object map[string]interface{}
|
||||||
|
|
||||||
|
err = json.Unmarshal(description, &object)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
isServiceAccount := false
|
||||||
|
if value, ok := object["type"]; ok {
|
||||||
|
if authType, ok := value.(string); ok && authType == "service_account" {
|
||||||
|
isServiceAccount = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var tokenSource oauth2.TokenSource
|
||||||
|
|
||||||
|
if isServiceAccount {
|
||||||
|
config, err := google.JWTConfigFromJSON(description, drive.DriveScope)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tokenSource = config.TokenSource(ctx)
|
||||||
|
} else {
|
||||||
|
gcdConfig := &GCDConfig{}
|
||||||
|
if err := json.Unmarshal(description, gcdConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
config := oauth2.Config{
|
||||||
|
ClientID: gcdConfig.ClientID,
|
||||||
|
ClientSecret: gcdConfig.ClientSecret,
|
||||||
|
Endpoint: gcdConfig.Endpoint,
|
||||||
|
}
|
||||||
|
tokenSource = config.TokenSource(ctx, &gcdConfig.Token)
|
||||||
|
}
|
||||||
|
|
||||||
|
service, err := drive.NewService(ctx, option.WithTokenSource(tokenSource))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(driveID) == 0 {
|
||||||
|
driveID = GCDUserDrive
|
||||||
|
} else {
|
||||||
|
driveList, err := drive.NewTeamdrivesService(service).List().Do()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to look up the drive id: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
found := false
|
||||||
|
for _, teamDrive := range driveList.TeamDrives {
|
||||||
|
if teamDrive.Id == driveID || teamDrive.Name == driveID {
|
||||||
|
driveID = teamDrive.Id
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return nil, fmt.Errorf("%s is not the id or name of a shared drive", driveID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &GCDStorage{
|
||||||
|
service: service,
|
||||||
|
numberOfThreads: threads,
|
||||||
|
idCache: make(map[string]string),
|
||||||
|
backoffs: make([]int, threads),
|
||||||
|
attempts: make([]int, threads),
|
||||||
|
driveID: driveID,
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := range storage.backoffs {
|
||||||
|
storage.backoffs[i] = 1
|
||||||
|
storage.attempts[i] = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.savePathID("", driveID)
|
||||||
|
storagePathID, err := storage.getIDFromPath(0, storagePath, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset the id cache and start with 'storagePathID' as the root
|
||||||
|
storage.idCache = make(map[string]string)
|
||||||
|
storage.idCache[""] = storagePathID
|
||||||
|
|
||||||
|
for _, dir := range []string{"chunks", "snapshots", "fossils"} {
|
||||||
|
dirID, isDir, _, err := storage.listByName(0, storagePathID, dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if dirID == "" {
|
||||||
|
err = storage.CreateDirectory(0, dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else if !isDir {
|
||||||
|
return nil, fmt.Errorf("%s/%s is not a directory", storagePath, dir)
|
||||||
|
} else {
|
||||||
|
storage.idCache[dir] = dirID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.isConnected = true
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
|
func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if dir == "snapshots" {
|
||||||
|
|
||||||
|
files, err := storage.listFiles(threadIndex, storage.getPathID(dir), false, true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
subDirs := []string{}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
storage.savePathID("snapshots/"+file.Name, file.Id)
|
||||||
|
subDirs = append(subDirs, file.Name+"/")
|
||||||
|
}
|
||||||
|
return subDirs, nil, nil
|
||||||
|
} else if strings.HasPrefix(dir, "snapshots/") || strings.HasPrefix(dir, "benchmark") {
|
||||||
|
pathID, err := storage.getIDFromPath(threadIndex, dir, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if pathID == "" {
|
||||||
|
return nil, nil, fmt.Errorf("Path '%s' does not exist", dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err := storage.listFiles(threadIndex, pathID, true, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
files := []string{}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
files = append(files, entry.Name)
|
||||||
|
}
|
||||||
|
return files, nil, nil
|
||||||
|
} else {
|
||||||
|
lock := sync.Mutex {}
|
||||||
|
allFiles := []string{}
|
||||||
|
allSizes := []int64{}
|
||||||
|
|
||||||
|
errorChannel := make(chan error)
|
||||||
|
directoryChannel := make(chan string)
|
||||||
|
activeWorkers := 0
|
||||||
|
|
||||||
|
parents := []string{"chunks", "fossils"}
|
||||||
|
for len(parents) > 0 || activeWorkers > 0 {
|
||||||
|
|
||||||
|
if len(parents) > 0 && activeWorkers < storage.numberOfThreads {
|
||||||
|
parent := parents[0]
|
||||||
|
parents = parents[1:]
|
||||||
|
activeWorkers++
|
||||||
|
go func(parent string) {
|
||||||
|
pathID, ok := storage.findPathID(parent)
|
||||||
|
if !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
entries, err := storage.listFiles(threadIndex, pathID, true, true)
|
||||||
|
if err != nil {
|
||||||
|
errorChannel <- err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_DEBUG("GCD_STORAGE", "Listing %s; %d items returned", parent, len(entries))
|
||||||
|
|
||||||
|
files := []string {}
|
||||||
|
sizes := []int64 {}
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.MimeType != GCDDirectoryMimeType {
|
||||||
|
name := entry.Name
|
||||||
|
if strings.HasPrefix(parent, "fossils") {
|
||||||
|
name = parent + "/" + name + ".fsl"
|
||||||
|
name = name[len("fossils/"):]
|
||||||
|
} else {
|
||||||
|
name = parent + "/" + name
|
||||||
|
name = name[len("chunks/"):]
|
||||||
|
}
|
||||||
|
files = append(files, name)
|
||||||
|
sizes = append(sizes, entry.Size)
|
||||||
|
} else {
|
||||||
|
directoryChannel <- parent+"/"+entry.Name
|
||||||
|
storage.savePathID(parent+"/"+entry.Name, entry.Id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lock.Lock()
|
||||||
|
allFiles = append(allFiles, files...)
|
||||||
|
allSizes = append(allSizes, sizes...)
|
||||||
|
lock.Unlock()
|
||||||
|
directoryChannel <- ""
|
||||||
|
} (parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
if activeWorkers > 0 {
|
||||||
|
select {
|
||||||
|
case err := <- errorChannel:
|
||||||
|
return nil, nil, err
|
||||||
|
case directory := <- directoryChannel:
|
||||||
|
if directory == "" {
|
||||||
|
activeWorkers--
|
||||||
|
} else {
|
||||||
|
parents = append(parents, directory)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return allFiles, allSizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *GCDStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
filePath = storage.convertFilePath(filePath)
|
||||||
|
fileID, err := storage.getIDFromPath(threadIndex, filePath, false)
|
||||||
|
if err != nil {
|
||||||
|
LOG_TRACE("GCD_STORAGE", "Ignored file deletion error: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
err = storage.service.Files.Delete(fileID).SupportsAllDrives(true).Fields("id").Do()
|
||||||
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
|
storage.deletePathID(filePath)
|
||||||
|
return nil
|
||||||
|
} else if retry {
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
if e, ok := err.(*googleapi.Error); ok && e.Code == 404 {
|
||||||
|
LOG_TRACE("GCD_STORAGE", "File %s has disappeared before deletion", filePath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *GCDStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
|
||||||
|
from = storage.convertFilePath(from)
|
||||||
|
to = storage.convertFilePath(to)
|
||||||
|
|
||||||
|
fileID, err := storage.getIDFromPath(threadIndex, from, false)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to retrieve the id of '%s': %v", from, err)
|
||||||
|
}
|
||||||
|
if fileID == "" {
|
||||||
|
return fmt.Errorf("The file '%s' to be moved does not exist", from)
|
||||||
|
}
|
||||||
|
|
||||||
|
fromParent := path.Dir(from)
|
||||||
|
fromParentID, err := storage.getIDFromPath(threadIndex, fromParent, false)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to retrieve the id of the parent directory '%s': %v", fromParent, err)
|
||||||
|
}
|
||||||
|
if fromParentID == "" {
|
||||||
|
return fmt.Errorf("The parent directory '%s' does not exist", fromParent)
|
||||||
|
}
|
||||||
|
|
||||||
|
toParent := path.Dir(to)
|
||||||
|
toParentID, err := storage.getIDFromPath(threadIndex, toParent, true)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to retrieve the id of the parent directory '%s': %v", toParent, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
_, err = storage.service.Files.Update(fileID, nil).SupportsAllDrives(true).AddParents(toParentID).RemoveParents(fromParentID).Do()
|
||||||
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
|
break
|
||||||
|
} else if retry {
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createDirectory creates a new directory.
|
||||||
|
func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
|
||||||
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
exist, isDir, _, err := storage.GetFileInfo(threadIndex, dir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if exist {
|
||||||
|
if !isDir {
|
||||||
|
return fmt.Errorf("%s is a file", dir)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parentDir := path.Dir(dir)
|
||||||
|
if parentDir == "." {
|
||||||
|
parentDir = ""
|
||||||
|
}
|
||||||
|
parentID := storage.getPathID(parentDir)
|
||||||
|
if parentID == "" {
|
||||||
|
return fmt.Errorf("Parent directory '%s' does not exist", parentDir)
|
||||||
|
}
|
||||||
|
name := path.Base(dir)
|
||||||
|
|
||||||
|
var file *drive.File
|
||||||
|
|
||||||
|
for {
|
||||||
|
file = &drive.File{
|
||||||
|
Name: name,
|
||||||
|
MimeType: GCDDirectoryMimeType,
|
||||||
|
Parents: []string{parentID},
|
||||||
|
}
|
||||||
|
|
||||||
|
file, err = storage.service.Files.Create(file).SupportsAllDrives(true).Fields("id").Do()
|
||||||
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
|
break
|
||||||
|
} else {
|
||||||
|
|
||||||
|
// Check if the directory has already been created by other thread
|
||||||
|
if _, ok := storage.findPathID(dir); ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if retry {
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.savePathID(dir, file.Id)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *GCDStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
|
||||||
|
filePath = filePath[:len(filePath)-1]
|
||||||
|
}
|
||||||
|
filePath = storage.convertFilePath(filePath)
|
||||||
|
|
||||||
|
fileID, ok := storage.findPathID(filePath)
|
||||||
|
if ok {
|
||||||
|
// Only directories are saved in the case so this must be a directory
|
||||||
|
return true, true, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
dir := path.Dir(filePath)
|
||||||
|
if dir == "." {
|
||||||
|
dir = ""
|
||||||
|
}
|
||||||
|
dirID, err := storage.getIDFromPath(threadIndex, dir, false)
|
||||||
|
if err != nil {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
if dirID == "" {
|
||||||
|
return false, false, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fileID, isDir, size, err = storage.listByName(threadIndex, dirID, path.Base(filePath))
|
||||||
|
if fileID != "" && isDir {
|
||||||
|
storage.savePathID(filePath, fileID)
|
||||||
|
}
|
||||||
|
return fileID != "", isDir, size, err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
// We never download the fossil so there is no need to convert the path
|
||||||
|
fileID, err := storage.getIDFromPath(threadIndex, filePath, false)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if fileID == "" {
|
||||||
|
return fmt.Errorf("%s does not exist", filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
var response *http.Response
|
||||||
|
|
||||||
|
for {
|
||||||
|
// AcknowledgeAbuse(true) lets the download proceed even if GCD thinks that it contains malware.
|
||||||
|
// TODO: Should this prompt the user or log a warning?
|
||||||
|
req := storage.service.Files.Get(fileID).SupportsAllDrives(true)
|
||||||
|
if e, ok := err.(*googleapi.Error); ok {
|
||||||
|
if strings.Contains(err.Error(), "cannotDownloadAbusiveFile") || len(e.Errors) > 0 && e.Errors[0].Reason == "cannotDownloadAbusiveFile" {
|
||||||
|
LOG_WARN("GCD_STORAGE", "%s is marked as abusive, will download anyway.", filePath)
|
||||||
|
req = req.AcknowledgeAbuse(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
response, err = req.Download()
|
||||||
|
if retry, retry_err := storage.shouldRetry(threadIndex, err); retry_err == nil && !retry {
|
||||||
|
break
|
||||||
|
} else if retry {
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
return retry_err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
_, err = RateLimitedCopy(chunk, response.Body, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
|
// We never upload a fossil so there is no need to convert the path
|
||||||
|
parent := path.Dir(filePath)
|
||||||
|
|
||||||
|
if parent == "." {
|
||||||
|
parent = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
parentID, err := storage.getIDFromPath(threadIndex, parent, true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
file := &drive.File{
|
||||||
|
Name: path.Base(filePath),
|
||||||
|
MimeType: GCDFileMimeType,
|
||||||
|
Parents: []string{parentID},
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
|
_, err = storage.service.Files.Create(file).SupportsAllDrives(true).Media(reader).Fields("id").Do()
|
||||||
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
|
break
|
||||||
|
} else if retry {
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *GCDStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *GCDStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *GCDStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *GCDStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *GCDStorage) EnableTestMode() { storage.TestMode = true }
|
||||||
289
src/duplicacy_gcsstorage.go
Normal file
289
src/duplicacy_gcsstorage.go
Normal file
@@ -0,0 +1,289 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
gcs "cloud.google.com/go/storage"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
"golang.org/x/oauth2/google"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
|
"google.golang.org/api/iterator"
|
||||||
|
"google.golang.org/api/option"
|
||||||
|
)
|
||||||
|
|
||||||
|
type GCSStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
bucket *gcs.BucketHandle
|
||||||
|
storageDir string
|
||||||
|
|
||||||
|
numberOfThreads int
|
||||||
|
TestMode bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type GCSConfig struct {
|
||||||
|
ClientID string `json:"client_id"`
|
||||||
|
ClientSecret string `json:"client_secret"`
|
||||||
|
Endpoint oauth2.Endpoint `json:"end_point"`
|
||||||
|
Token oauth2.Token `json:"token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateGCSStorage creates a GCD storage object.
|
||||||
|
func CreateGCSStorage(tokenFile string, bucketName string, storageDir string, threads int) (storage *GCSStorage, err error) {
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
description, err := ioutil.ReadFile(tokenFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var object map[string]interface{}
|
||||||
|
|
||||||
|
err = json.Unmarshal(description, &object)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
isServiceAccount := false
|
||||||
|
if value, ok := object["type"]; ok {
|
||||||
|
if authType, ok := value.(string); ok && authType == "service_account" {
|
||||||
|
isServiceAccount = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var tokenSource oauth2.TokenSource
|
||||||
|
|
||||||
|
if isServiceAccount {
|
||||||
|
config, err := google.JWTConfigFromJSON(description, gcs.ScopeReadWrite)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tokenSource = config.TokenSource(ctx)
|
||||||
|
} else {
|
||||||
|
gcsConfig := &GCSConfig{}
|
||||||
|
if err := json.Unmarshal(description, gcsConfig); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
config := oauth2.Config{
|
||||||
|
ClientID: gcsConfig.ClientID,
|
||||||
|
ClientSecret: gcsConfig.ClientSecret,
|
||||||
|
Endpoint: gcsConfig.Endpoint,
|
||||||
|
}
|
||||||
|
tokenSource = config.TokenSource(ctx, &gcsConfig.Token)
|
||||||
|
}
|
||||||
|
|
||||||
|
options := option.WithTokenSource(tokenSource)
|
||||||
|
client, err := gcs.NewClient(ctx, options)
|
||||||
|
|
||||||
|
bucket := client.Bucket(bucketName)
|
||||||
|
|
||||||
|
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||||
|
storageDir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &GCSStorage{
|
||||||
|
bucket: bucket,
|
||||||
|
storageDir: storageDir,
|
||||||
|
numberOfThreads: threads,
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *GCSStorage) shouldRetry(backoff *int, err error) (bool, error) {
|
||||||
|
|
||||||
|
retry := false
|
||||||
|
message := ""
|
||||||
|
if err == nil {
|
||||||
|
return false, nil
|
||||||
|
} else if e, ok := err.(*googleapi.Error); ok {
|
||||||
|
if 500 <= e.Code && e.Code < 600 {
|
||||||
|
// Retry for 5xx response codes.
|
||||||
|
message = fmt.Sprintf("HTTP status code %d", e.Code)
|
||||||
|
retry = true
|
||||||
|
} else if e.Code == 429 {
|
||||||
|
// Too many requests{
|
||||||
|
message = "HTTP status code 429"
|
||||||
|
retry = true
|
||||||
|
} else if e.Code == 403 {
|
||||||
|
// User Rate Limit Exceeded
|
||||||
|
message = "User Rate Limit Exceeded"
|
||||||
|
retry = true
|
||||||
|
}
|
||||||
|
} else if e, ok := err.(*url.Error); ok {
|
||||||
|
message = e.Error()
|
||||||
|
retry = true
|
||||||
|
} else if err == io.ErrUnexpectedEOF {
|
||||||
|
// Retry on unexpected EOFs and temporary network errors.
|
||||||
|
message = "Unexpected EOF"
|
||||||
|
retry = true
|
||||||
|
} else if err, ok := err.(net.Error); ok {
|
||||||
|
message = "Temporary network error"
|
||||||
|
retry = err.Temporary()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !retry || *backoff >= 256 {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
delay := float32(*backoff) * rand.Float32()
|
||||||
|
LOG_INFO("GCS_RETRY", "%s; retrying after %.2f seconds", message, delay)
|
||||||
|
time.Sleep(time.Duration(float32(*backoff) * float32(time.Second)))
|
||||||
|
*backoff *= 2
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
|
func (storage *GCSStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
query := gcs.Query{
|
||||||
|
Prefix: storage.storageDir + dir + "/",
|
||||||
|
}
|
||||||
|
dirOnly := false
|
||||||
|
prefixLength := len(query.Prefix)
|
||||||
|
|
||||||
|
if dir == "snapshots" {
|
||||||
|
query.Delimiter = "/"
|
||||||
|
dirOnly = true
|
||||||
|
}
|
||||||
|
|
||||||
|
files := []string{}
|
||||||
|
sizes := []int64{}
|
||||||
|
iter := storage.bucket.Objects(context.Background(), &query)
|
||||||
|
for {
|
||||||
|
attributes, err := iter.Next()
|
||||||
|
if err == iterator.Done {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if dirOnly {
|
||||||
|
if len(attributes.Prefix) != 0 {
|
||||||
|
prefix := attributes.Prefix
|
||||||
|
files = append(files, prefix[prefixLength:])
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if len(attributes.Prefix) == 0 {
|
||||||
|
files = append(files, attributes.Name[prefixLength:])
|
||||||
|
sizes = append(sizes, attributes.Size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *GCSStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
err = storage.bucket.Object(storage.storageDir + filePath).Delete(context.Background())
|
||||||
|
if err == gcs.ErrObjectNotExist {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *GCSStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
|
||||||
|
source := storage.bucket.Object(storage.storageDir + from)
|
||||||
|
destination := storage.bucket.Object(storage.storageDir + to)
|
||||||
|
|
||||||
|
_, err = destination.CopierFrom(source).Run(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return storage.DeleteFile(threadIndex, from)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *GCSStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *GCSStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
object := storage.bucket.Object(storage.storageDir + filePath)
|
||||||
|
|
||||||
|
attributes, err := object.Attrs(context.Background())
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if err == gcs.ErrObjectNotExist {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, false, attributes.Size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *GCSStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
readCloser, err := storage.bucket.Object(storage.storageDir + filePath).NewReader(context.Background())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer readCloser.Close()
|
||||||
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *GCSStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
|
backoff := 1
|
||||||
|
for {
|
||||||
|
writeCloser := storage.bucket.Object(storage.storageDir + filePath).NewWriter(context.Background())
|
||||||
|
defer writeCloser.Close()
|
||||||
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
|
_, err = io.Copy(writeCloser, reader)
|
||||||
|
|
||||||
|
if retry, e := storage.shouldRetry(&backoff, err); e == nil && !retry {
|
||||||
|
break
|
||||||
|
} else if retry {
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *GCSStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *GCSStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *GCSStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *GCSStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *GCSStorage) EnableTestMode() { storage.TestMode = true }
|
||||||
473
src/duplicacy_hubicclient.go
Normal file
473
src/duplicacy_hubicclient.go
Normal file
@@ -0,0 +1,473 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
net_url "net/url"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type HubicError struct {
|
||||||
|
Status int
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err HubicError) Error() string {
|
||||||
|
return fmt.Sprintf("%d %s", err.Status, err.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
var HubicRefreshTokenURL = "https://duplicacy.com/hubic_refresh"
|
||||||
|
var HubicCredentialURL = "https://api.hubic.com/1.0/account/credentials"
|
||||||
|
|
||||||
|
type HubicCredential struct {
|
||||||
|
Token string
|
||||||
|
Endpoint string
|
||||||
|
Expires time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
type HubicClient struct {
|
||||||
|
HTTPClient *http.Client
|
||||||
|
|
||||||
|
TokenFile string
|
||||||
|
Token *oauth2.Token
|
||||||
|
TokenLock *sync.Mutex
|
||||||
|
|
||||||
|
Credential HubicCredential
|
||||||
|
CredentialLock *sync.Mutex
|
||||||
|
|
||||||
|
TestMode bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHubicClient(tokenFile string) (*HubicClient, error) {
|
||||||
|
|
||||||
|
description, err := ioutil.ReadFile(tokenFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
token := new(oauth2.Token)
|
||||||
|
if err := json.Unmarshal(description, token); err != nil {
|
||||||
|
return nil, fmt.Errorf("%v: %s", err, description)
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &HubicClient{
|
||||||
|
HTTPClient: &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 30 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
TLSHandshakeTimeout: 60 * time.Second,
|
||||||
|
ResponseHeaderTimeout: 300 * time.Second,
|
||||||
|
ExpectContinueTimeout: 10 * time.Second,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
TokenFile: tokenFile,
|
||||||
|
Token: token,
|
||||||
|
TokenLock: &sync.Mutex{},
|
||||||
|
CredentialLock: &sync.Mutex{},
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.RefreshToken(false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.GetCredential()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *HubicClient) call(url string, method string, input interface{}, extraHeader map[string]string) (io.ReadCloser, int64, string, error) {
|
||||||
|
|
||||||
|
var response *http.Response
|
||||||
|
|
||||||
|
backoff := 1
|
||||||
|
for i := 0; i < 11; i++ {
|
||||||
|
|
||||||
|
LOG_DEBUG("HUBIC_CALL", "%s %s", method, url)
|
||||||
|
|
||||||
|
//fmt.Printf("%s %s\n", method, url)
|
||||||
|
|
||||||
|
var inputReader io.Reader
|
||||||
|
|
||||||
|
switch input.(type) {
|
||||||
|
default:
|
||||||
|
jsonInput, err := json.Marshal(input)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, "", err
|
||||||
|
}
|
||||||
|
inputReader = bytes.NewReader(jsonInput)
|
||||||
|
case []byte:
|
||||||
|
inputReader = bytes.NewReader(input.([]byte))
|
||||||
|
case int:
|
||||||
|
inputReader = bytes.NewReader([]byte(""))
|
||||||
|
case *bytes.Buffer:
|
||||||
|
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
|
||||||
|
case *RateLimitedReader:
|
||||||
|
input.(*RateLimitedReader).Reset()
|
||||||
|
inputReader = input.(*RateLimitedReader)
|
||||||
|
}
|
||||||
|
|
||||||
|
request, err := http.NewRequest(method, url, inputReader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if reader, ok := inputReader.(*RateLimitedReader); ok {
|
||||||
|
request.ContentLength = reader.Length()
|
||||||
|
}
|
||||||
|
|
||||||
|
if url == HubicCredentialURL {
|
||||||
|
client.TokenLock.Lock()
|
||||||
|
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
|
||||||
|
client.TokenLock.Unlock()
|
||||||
|
} else if url != HubicRefreshTokenURL {
|
||||||
|
client.CredentialLock.Lock()
|
||||||
|
request.Header.Set("X-Auth-Token", client.Credential.Token)
|
||||||
|
client.CredentialLock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, value := range extraHeader {
|
||||||
|
request.Header.Set(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err = client.HTTPClient.Do(request)
|
||||||
|
if err != nil {
|
||||||
|
if url != HubicCredentialURL {
|
||||||
|
retryAfter := time.Duration((0.5 + rand.Float32()) * 1000.0 * float32(backoff))
|
||||||
|
LOG_INFO("HUBIC_CALL", "%s %s returned an error: %v; retry after %d milliseconds", method, url, err, retryAfter)
|
||||||
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
|
backoff *= 2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, 0, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
contentType := ""
|
||||||
|
if len(response.Header["Content-Type"]) > 0 {
|
||||||
|
contentType = response.Header["Content-Type"][0]
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode < 400 {
|
||||||
|
return response.Body, response.ContentLength, contentType, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*buffer := bytes.NewBufferString("")
|
||||||
|
io.Copy(buffer, response.Body)
|
||||||
|
fmt.Printf("%s\n", buffer.String())*/
|
||||||
|
|
||||||
|
response.Body.Close()
|
||||||
|
|
||||||
|
if response.StatusCode == 401 {
|
||||||
|
|
||||||
|
if url == HubicRefreshTokenURL {
|
||||||
|
return nil, 0, "", HubicError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
|
||||||
|
}
|
||||||
|
|
||||||
|
if url == HubicCredentialURL {
|
||||||
|
return nil, 0, "", HubicError{Status: response.StatusCode, Message: "Authorization error when retrieving credentials"}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.RefreshToken(true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.GetCredential()
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, "", err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else if response.StatusCode >= 500 && response.StatusCode < 600 {
|
||||||
|
retryAfter := time.Duration((0.5 + rand.Float32()) * 1000.0 * float32(backoff))
|
||||||
|
LOG_INFO("HUBIC_RETRY", "Response status: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
|
||||||
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
|
backoff *= 2
|
||||||
|
continue
|
||||||
|
} else if response.StatusCode == 408 {
|
||||||
|
retryAfter := time.Duration((0.5 + rand.Float32()) * 1000.0 * float32(backoff))
|
||||||
|
LOG_INFO("HUBIC_RETRY", "Response status: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
|
||||||
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
|
backoff *= 2
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
return nil, 0, "", HubicError{Status: response.StatusCode, Message: "Hubic API error"}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, 0, "", fmt.Errorf("Maximum number of retries reached")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *HubicClient) RefreshToken(force bool) (err error) {
|
||||||
|
client.TokenLock.Lock()
|
||||||
|
defer client.TokenLock.Unlock()
|
||||||
|
|
||||||
|
if !force && client.Token.Valid() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser, _, _, err := client.call(HubicRefreshTokenURL, "POST", client.Token, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&client.Token); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
description, err := json.Marshal(client.Token)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(client.TokenFile, description, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *HubicClient) GetCredential() (err error) {
|
||||||
|
client.CredentialLock.Lock()
|
||||||
|
defer client.CredentialLock.Unlock()
|
||||||
|
|
||||||
|
readCloser, _, _, err := client.call(HubicCredentialURL, "GET", 0, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer := bytes.NewBufferString("")
|
||||||
|
io.Copy(buffer, readCloser)
|
||||||
|
readCloser.Close()
|
||||||
|
|
||||||
|
if err = json.NewDecoder(buffer).Decode(&client.Credential); err != nil {
|
||||||
|
return fmt.Errorf("%v (response: %s)", err, buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type HubicEntry struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
Size int64 `json:"bytes"`
|
||||||
|
Type string `json:"content_type"`
|
||||||
|
Subdir string `json:"subdir"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *HubicClient) ListEntries(path string) ([]HubicEntry, error) {
|
||||||
|
|
||||||
|
if len(path) > 0 && path[len(path)-1] != '/' {
|
||||||
|
path += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
count := 1000
|
||||||
|
if client.TestMode {
|
||||||
|
count = 8
|
||||||
|
}
|
||||||
|
|
||||||
|
marker := ""
|
||||||
|
|
||||||
|
var entries []HubicEntry
|
||||||
|
|
||||||
|
for {
|
||||||
|
|
||||||
|
client.CredentialLock.Lock()
|
||||||
|
url := client.Credential.Endpoint + "/default"
|
||||||
|
client.CredentialLock.Unlock()
|
||||||
|
url += fmt.Sprintf("?format=json&limit=%d&delimiter=%%2f", count)
|
||||||
|
if path != "" {
|
||||||
|
url += "&prefix=" + net_url.QueryEscape(path)
|
||||||
|
}
|
||||||
|
if marker != "" {
|
||||||
|
url += "&marker=" + net_url.QueryEscape(marker)
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser, _, _, err := client.call(url, "GET", 0, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
var output []HubicEntry
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range output {
|
||||||
|
if entry.Subdir == "" {
|
||||||
|
marker = entry.Name
|
||||||
|
} else {
|
||||||
|
marker = entry.Subdir
|
||||||
|
for len(entry.Subdir) > 0 && entry.Subdir[len(entry.Subdir)-1] == '/' {
|
||||||
|
entry.Subdir = entry.Subdir[:len(entry.Subdir)-1]
|
||||||
|
}
|
||||||
|
entry.Name = entry.Subdir
|
||||||
|
entry.Type = "application/directory"
|
||||||
|
}
|
||||||
|
if path != "" && strings.HasPrefix(entry.Name, path) {
|
||||||
|
entry.Name = entry.Name[len(path):]
|
||||||
|
}
|
||||||
|
entries = append(entries, entry)
|
||||||
|
}
|
||||||
|
if len(output) < count {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *HubicClient) GetFileInfo(path string) (bool, bool, int64, error) {
|
||||||
|
|
||||||
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
|
path = path[:len(path)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
client.CredentialLock.Lock()
|
||||||
|
url := client.Credential.Endpoint + "/default/" + path
|
||||||
|
client.CredentialLock.Unlock()
|
||||||
|
|
||||||
|
readCloser, size, contentType, err := client.call(url, "HEAD", 0, nil)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(HubicError); ok && e.Status == 404 {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
|
||||||
|
return true, contentType == "application/directory", size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *HubicClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
|
path = path[:len(path)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
client.CredentialLock.Lock()
|
||||||
|
url := client.Credential.Endpoint + "/default/" + path
|
||||||
|
client.CredentialLock.Unlock()
|
||||||
|
|
||||||
|
readCloser, size, _, err := client.call(url, "GET", 0, nil)
|
||||||
|
return readCloser, size, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *HubicClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
|
||||||
|
|
||||||
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
|
path = path[:len(path)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
client.CredentialLock.Lock()
|
||||||
|
url := client.Credential.Endpoint + "/default/" + path
|
||||||
|
client.CredentialLock.Unlock()
|
||||||
|
|
||||||
|
header := make(map[string]string)
|
||||||
|
header["Content-Type"] = "application/octet-stream"
|
||||||
|
|
||||||
|
readCloser, _, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), header)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *HubicClient) DeleteFile(path string) error {
|
||||||
|
|
||||||
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
|
path = path[:len(path)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
client.CredentialLock.Lock()
|
||||||
|
url := client.Credential.Endpoint + "/default/" + path
|
||||||
|
client.CredentialLock.Unlock()
|
||||||
|
|
||||||
|
readCloser, _, _, err := client.call(url, "DELETE", 0, nil)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *HubicClient) MoveFile(from string, to string) error {
|
||||||
|
|
||||||
|
for len(from) > 0 && from[len(from)-1] == '/' {
|
||||||
|
from = from[:len(from)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
for len(to) > 0 && to[len(to)-1] == '/' {
|
||||||
|
to = to[:len(to)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
client.CredentialLock.Lock()
|
||||||
|
url := client.Credential.Endpoint + "/default/" + from
|
||||||
|
client.CredentialLock.Unlock()
|
||||||
|
|
||||||
|
header := make(map[string]string)
|
||||||
|
header["Destination"] = "default/" + to
|
||||||
|
|
||||||
|
readCloser, _, _, err := client.call(url, "COPY", 0, header)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
|
||||||
|
return client.DeleteFile(from)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *HubicClient) CreateDirectory(path string) error {
|
||||||
|
|
||||||
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
|
path = path[:len(path)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
client.CredentialLock.Lock()
|
||||||
|
url := client.Credential.Endpoint + "/default/" + path
|
||||||
|
client.CredentialLock.Unlock()
|
||||||
|
|
||||||
|
header := make(map[string]string)
|
||||||
|
header["Content-Type"] = "application/directory"
|
||||||
|
|
||||||
|
readCloser, _, _, err := client.call(url, "PUT", "", header)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
149
src/duplicacy_hubicclient_test.go
Normal file
149
src/duplicacy_hubicclient_test.go
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
crypto_rand "crypto/rand"
|
||||||
|
"math/rand"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHubicClient(t *testing.T) {
|
||||||
|
|
||||||
|
hubicClient, err := NewHubicClient("hubic-token.json")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create the Hubic client: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hubicClient.TestMode = true
|
||||||
|
|
||||||
|
existingFiles, err := hubicClient.ListEntries("")
|
||||||
|
for _, file := range existingFiles {
|
||||||
|
fmt.Printf("name: %s, isDir: %t\n", file.Name, file.Type == "application/directory")
|
||||||
|
}
|
||||||
|
|
||||||
|
testExists, _, _, err := hubicClient.GetFileInfo("test")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list the test directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !testExists {
|
||||||
|
err = hubicClient.CreateDirectory("test")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create the test directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test1Exists, _, _, err := hubicClient.GetFileInfo("test/test1")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list the test1 directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !test1Exists {
|
||||||
|
err = hubicClient.CreateDirectory("test/test1")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create the test1 directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test2Exists, _, _, err := hubicClient.GetFileInfo("test/test2")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list the test2 directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if !test2Exists {
|
||||||
|
err = hubicClient.CreateDirectory("test/test2")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create the test2 directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
numberOfFiles := 20
|
||||||
|
maxFileSize := 64 * 1024
|
||||||
|
|
||||||
|
for i := 0; i < numberOfFiles; i++ {
|
||||||
|
content := make([]byte, rand.Int()%maxFileSize+1)
|
||||||
|
_, err = crypto_rand.Read(content)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error generating random content: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := sha256.New()
|
||||||
|
hasher.Write(content)
|
||||||
|
filename := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
|
fmt.Printf("file: %s\n", filename)
|
||||||
|
|
||||||
|
err = hubicClient.UploadFile("test/test1/"+filename, content, 100)
|
||||||
|
if err != nil {
|
||||||
|
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
|
||||||
|
t.Errorf("Failed to upload the file %s: %v", filename, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err := hubicClient.ListEntries("test/test1")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error list randomly generated files: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
|
||||||
|
exists, isDir, size, err := hubicClient.GetFileInfo("test/test1/" + entry.Name)
|
||||||
|
fmt.Printf("%s exists: %t, isDir: %t, size: %d, err: %v\n", "test/test1/"+entry.Name, exists, isDir, size, err)
|
||||||
|
|
||||||
|
err = hubicClient.MoveFile("test/test1/"+entry.Name, "test/test2/"+entry.Name)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to move %s: %v", entry.Name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err = hubicClient.ListEntries("test/test2")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error list randomly generated files: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
readCloser, _, err := hubicClient.DownloadFile("test/test2/" + entry.Name)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error downloading file %s: %v", entry.Name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := sha256.New()
|
||||||
|
io.Copy(hasher, readCloser)
|
||||||
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
|
if hash != entry.Name {
|
||||||
|
t.Errorf("File %s, hash %s", entry.Name, hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
|
||||||
|
err = hubicClient.DeleteFile("test/test2/" + entry.Name)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
198
src/duplicacy_hubicstorage.go
Normal file
198
src/duplicacy_hubicstorage.go
Normal file
@@ -0,0 +1,198 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type HubicStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
client *HubicClient
|
||||||
|
storageDir string
|
||||||
|
numberOfThreads int
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateHubicStorage creates an Hubic storage object.
|
||||||
|
func CreateHubicStorage(tokenFile string, storagePath string, threads int) (storage *HubicStorage, err error) {
|
||||||
|
|
||||||
|
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
|
||||||
|
storagePath = storagePath[:len(storagePath)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := NewHubicClient(tokenFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
exists, isDir, _, err := client.GetFileInfo(storagePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exists {
|
||||||
|
return nil, fmt.Errorf("Path '%s' doesn't exist", storagePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isDir {
|
||||||
|
return nil, fmt.Errorf("Path '%s' is not a directory", storagePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &HubicStorage{
|
||||||
|
client: client,
|
||||||
|
storageDir: storagePath,
|
||||||
|
numberOfThreads: threads,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, path := range []string{"chunks", "snapshots"} {
|
||||||
|
dir := storagePath + "/" + path
|
||||||
|
exists, isDir, _, err := client.GetFileInfo(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
err = client.CreateDirectory(storagePath + "/" + path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else if !isDir {
|
||||||
|
return nil, fmt.Errorf("%s is not a directory", dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
|
func (storage *HubicStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if dir == "snapshots" {
|
||||||
|
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
subDirs := []string{}
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.Type == "application/directory" {
|
||||||
|
subDirs = append(subDirs, entry.Name+"/")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return subDirs, nil, nil
|
||||||
|
} else if strings.HasPrefix(dir, "snapshots/") {
|
||||||
|
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
files := []string{}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.Type == "application/directory" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
files = append(files, entry.Name)
|
||||||
|
}
|
||||||
|
return files, nil, nil
|
||||||
|
} else {
|
||||||
|
files := []string{}
|
||||||
|
sizes := []int64{}
|
||||||
|
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
if entry.Type == "application/directory" {
|
||||||
|
files = append(files, entry.Name+"/")
|
||||||
|
sizes = append(sizes, 0)
|
||||||
|
} else {
|
||||||
|
files = append(files, entry.Name)
|
||||||
|
sizes = append(sizes, entry.Size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *HubicStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
err = storage.client.DeleteFile(storage.storageDir + "/" + filePath)
|
||||||
|
if e, ok := err.(HubicError); ok && e.Status == 404 {
|
||||||
|
LOG_DEBUG("HUBIC_DELETE", "Ignore 404 error")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *HubicStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
fromPath := storage.storageDir + "/" + from
|
||||||
|
toPath := storage.storageDir + "/" + to
|
||||||
|
|
||||||
|
return storage.client.MoveFile(fromPath, toPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *HubicStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return storage.client.CreateDirectory(storage.storageDir + "/" + dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *HubicStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
|
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
|
||||||
|
filePath = filePath[:len(filePath)-1]
|
||||||
|
}
|
||||||
|
return storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *HubicStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
readCloser, _, err := storage.client.DownloadFile(storage.storageDir + "/" + filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *HubicStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
return storage.client.UploadFile(storage.storageDir+"/"+filePath, content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *HubicStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *HubicStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *HubicStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *HubicStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *HubicStorage) EnableTestMode() {
|
||||||
|
storage.client.TestMode = true
|
||||||
|
}
|
||||||
30
src/duplicacy_keyring.go
Normal file
30
src/duplicacy_keyring.go
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/gilbertchen/keyring"
|
||||||
|
)
|
||||||
|
|
||||||
|
func SetKeyringFile(path string) {
|
||||||
|
// We only use keyring file on Windows
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyringGet(key string) (value string) {
|
||||||
|
value, err := keyring.Get("duplicacy", key)
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("KEYRING_GET", "Failed to get the value from the keyring: %v", err)
|
||||||
|
}
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyringSet(key string, value string) {
|
||||||
|
err := keyring.Set("duplicacy", key, value)
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("KEYRING_GET", "Failed to store the value to the keyring: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
170
src/duplicacy_keyring_windows.go
Normal file
170
src/duplicacy_keyring_windows.go
Normal file
@@ -0,0 +1,170 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var keyringFile string
|
||||||
|
|
||||||
|
var (
|
||||||
|
dllcrypt32 = syscall.NewLazyDLL("Crypt32.dll")
|
||||||
|
dllkernel32 = syscall.NewLazyDLL("Kernel32.dll")
|
||||||
|
|
||||||
|
procEncryptData = dllcrypt32.NewProc("CryptProtectData")
|
||||||
|
procDecryptData = dllcrypt32.NewProc("CryptUnprotectData")
|
||||||
|
procLocalFree = dllkernel32.NewProc("LocalFree")
|
||||||
|
)
|
||||||
|
|
||||||
|
type DATA_BLOB struct {
|
||||||
|
cbData uint32
|
||||||
|
pbData *byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetKeyringFile(path string) {
|
||||||
|
keyringFile = path
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyringEncrypt(value []byte) ([]byte, error) {
|
||||||
|
|
||||||
|
dataIn := DATA_BLOB{
|
||||||
|
pbData: &value[0],
|
||||||
|
cbData: uint32(len(value)),
|
||||||
|
}
|
||||||
|
dataOut := DATA_BLOB{}
|
||||||
|
|
||||||
|
r, _, err := procEncryptData.Call(uintptr(unsafe.Pointer(&dataIn)),
|
||||||
|
0, 0, 0, 0, 0, uintptr(unsafe.Pointer(&dataOut)))
|
||||||
|
if r == 0 {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
address := uintptr(unsafe.Pointer(dataOut.pbData))
|
||||||
|
defer procLocalFree.Call(address)
|
||||||
|
|
||||||
|
encryptedData := make([]byte, dataOut.cbData)
|
||||||
|
for i := 0; i < len(encryptedData); i++ {
|
||||||
|
encryptedData[i] = *(*byte)(unsafe.Pointer(uintptr(int(address) + i)))
|
||||||
|
}
|
||||||
|
return encryptedData, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyringDecrypt(value []byte) ([]byte, error) {
|
||||||
|
|
||||||
|
dataIn := DATA_BLOB{
|
||||||
|
pbData: &value[0],
|
||||||
|
cbData: uint32(len(value)),
|
||||||
|
}
|
||||||
|
dataOut := DATA_BLOB{}
|
||||||
|
|
||||||
|
r, _, err := procDecryptData.Call(uintptr(unsafe.Pointer(&dataIn)),
|
||||||
|
0, 0, 0, 0, 0, uintptr(unsafe.Pointer(&dataOut)))
|
||||||
|
if r == 0 {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
address := uintptr(unsafe.Pointer(dataOut.pbData))
|
||||||
|
defer procLocalFree.Call(address)
|
||||||
|
|
||||||
|
decryptedData := make([]byte, dataOut.cbData)
|
||||||
|
for i := 0; i < len(decryptedData); i++ {
|
||||||
|
address := int(uintptr(unsafe.Pointer(dataOut.pbData)))
|
||||||
|
decryptedData[i] = *(*byte)(unsafe.Pointer(uintptr(int(address) + i)))
|
||||||
|
}
|
||||||
|
return decryptedData, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyringGet(key string) (value string) {
|
||||||
|
if keyringFile == "" {
|
||||||
|
LOG_DEBUG("KEYRING_NOT_INITIALIZED", "Keyring file not set")
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
description, err := ioutil.ReadFile(keyringFile)
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("KEYRING_READ", "Keyring file not read: %v", err)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
var keyring map[string][]byte
|
||||||
|
err = json.Unmarshal(description, &keyring)
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("KEYRING_PARSE", "Failed to parse the keyring storage file %s: %v", keyringFile, err)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedValue := keyring[key]
|
||||||
|
|
||||||
|
if len(encryptedValue) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
valueInBytes, err := keyringDecrypt(encryptedValue)
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("KEYRING_DECRYPT", "Failed to decrypt the value: %v", err)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(valueInBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
func keyringSet(key string, value string) bool {
|
||||||
|
if value == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if keyringFile == "" {
|
||||||
|
LOG_DEBUG("KEYRING_NOT_INITIALIZED", "Keyring file not set")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
keyring := make(map[string][]byte)
|
||||||
|
|
||||||
|
description, err := ioutil.ReadFile(keyringFile)
|
||||||
|
if err == nil {
|
||||||
|
err = json.Unmarshal(description, &keyring)
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("KEYRING_PARSE", "Failed to parse the keyring storage file %s: %v", keyringFile, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if value == "" {
|
||||||
|
keyring[key] = nil
|
||||||
|
} else {
|
||||||
|
|
||||||
|
// Check if the value to be set is the same as the existing one
|
||||||
|
existingEncryptedValue := keyring[key]
|
||||||
|
if len(existingEncryptedValue) > 0 {
|
||||||
|
existingValue, err := keyringDecrypt(existingEncryptedValue)
|
||||||
|
if err == nil && string(existingValue) == value {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedValue, err := keyringEncrypt([]byte(value))
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("KEYRING_ENCRYPT", "Failed to encrypt the value: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
keyring[key] = encryptedValue
|
||||||
|
}
|
||||||
|
|
||||||
|
description, err = json.MarshalIndent(keyring, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("KEYRING_MARSHAL", "Failed to marshal the keyring storage: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(keyringFile, description, 0600)
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("KEYRING_WRITE", "Failed to save the keyring storage to file %s: %v", keyringFile, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
238
src/duplicacy_log.go
Normal file
238
src/duplicacy_log.go
Normal file
@@ -0,0 +1,238 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"log"
|
||||||
|
"runtime/debug"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
"regexp"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
DEBUG = -2
|
||||||
|
TRACE = -1
|
||||||
|
INFO = 0
|
||||||
|
WARN = 1
|
||||||
|
ERROR = 2
|
||||||
|
FATAL = 3
|
||||||
|
ASSERT = 4
|
||||||
|
)
|
||||||
|
|
||||||
|
var LogFunction func(level int, logID string, message string)
|
||||||
|
|
||||||
|
var printLogHeader = false
|
||||||
|
|
||||||
|
func EnableLogHeader() {
|
||||||
|
printLogHeader = true
|
||||||
|
}
|
||||||
|
|
||||||
|
var printStackTrace = false
|
||||||
|
|
||||||
|
func EnableStackTrace() {
|
||||||
|
printStackTrace = true
|
||||||
|
}
|
||||||
|
|
||||||
|
var testingT *testing.T
|
||||||
|
|
||||||
|
func setTestingT(t *testing.T) {
|
||||||
|
testingT = t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains the ids of logs that won't be displayed
|
||||||
|
var suppressedLogs map[string]bool = map[string]bool{}
|
||||||
|
|
||||||
|
func SuppressLog(id string) {
|
||||||
|
suppressedLogs[id] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func getLevelName(level int) string {
|
||||||
|
switch level {
|
||||||
|
case DEBUG:
|
||||||
|
return "DEBUG"
|
||||||
|
case TRACE:
|
||||||
|
return "TRACE"
|
||||||
|
case INFO:
|
||||||
|
return "INFO"
|
||||||
|
case WARN:
|
||||||
|
return "WARN"
|
||||||
|
case ERROR:
|
||||||
|
return "ERROR"
|
||||||
|
case FATAL:
|
||||||
|
return "FATAL"
|
||||||
|
case ASSERT:
|
||||||
|
return "ASSERT"
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("[%d]", level)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var loggingLevel int
|
||||||
|
|
||||||
|
func IsDebugging() bool {
|
||||||
|
return loggingLevel <= DEBUG
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsTracing() bool {
|
||||||
|
return loggingLevel <= TRACE
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetLoggingLevel(level int) {
|
||||||
|
loggingLevel = level
|
||||||
|
}
|
||||||
|
|
||||||
|
func LOG_DEBUG(logID string, format string, v ...interface{}) {
|
||||||
|
logf(DEBUG, logID, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LOG_TRACE(logID string, format string, v ...interface{}) {
|
||||||
|
logf(TRACE, logID, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LOG_INFO(logID string, format string, v ...interface{}) {
|
||||||
|
logf(INFO, logID, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LOG_WARN(logID string, format string, v ...interface{}) {
|
||||||
|
logf(WARN, logID, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LOG_ERROR(logID string, format string, v ...interface{}) {
|
||||||
|
logf(ERROR, logID, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LOG_WERROR(isWarning bool, logID string, format string, v ...interface{}) {
|
||||||
|
if isWarning {
|
||||||
|
logf(WARN, logID, format, v...)
|
||||||
|
} else {
|
||||||
|
logf(ERROR, logID, format, v...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
func LOG_FATAL(logID string, format string, v ...interface{}) {
|
||||||
|
logf(FATAL, logID, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LOG_ASSERT(logID string, format string, v ...interface{}) {
|
||||||
|
logf(ASSERT, logID, format, v...)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Exception struct {
|
||||||
|
Level int
|
||||||
|
LogID string
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
var logMutex sync.Mutex
|
||||||
|
|
||||||
|
func logf(level int, logID string, format string, v ...interface{}) {
|
||||||
|
|
||||||
|
message := fmt.Sprintf(format, v...)
|
||||||
|
|
||||||
|
if LogFunction != nil {
|
||||||
|
LogFunction(level, logID, message)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
// Uncomment this line to enable unbufferred logging for tests
|
||||||
|
// fmt.Printf("%s %s %s %s\n", now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
||||||
|
|
||||||
|
if testingT != nil {
|
||||||
|
if level <= WARN {
|
||||||
|
if level >= loggingLevel {
|
||||||
|
testingT.Logf("%s %s %s %s\n",
|
||||||
|
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
testingT.Errorf("%s %s %s %s\n",
|
||||||
|
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logMutex.Lock()
|
||||||
|
defer logMutex.Unlock()
|
||||||
|
|
||||||
|
if level >= loggingLevel {
|
||||||
|
if level <= ERROR && len(suppressedLogs) > 0 {
|
||||||
|
if _, found := suppressedLogs[logID]; found {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if printLogHeader {
|
||||||
|
fmt.Printf("%s %s %s %s\n",
|
||||||
|
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("%s\n", message)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if level > WARN {
|
||||||
|
panic(Exception{
|
||||||
|
Level: level,
|
||||||
|
LogID: logID,
|
||||||
|
Message: message,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up logging for libraries that Duplicacy depends on. They can call 'log.Printf("[ID] message")'
|
||||||
|
// to produce logs in Duplicacy's format
|
||||||
|
type Logger struct {
|
||||||
|
formatRegex *regexp.Regexp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Write(line []byte) (n int, err error) {
|
||||||
|
n = len(line)
|
||||||
|
for len(line) > 0 && line[len(line) - 1] == '\n' {
|
||||||
|
line = line[:len(line) - 1]
|
||||||
|
}
|
||||||
|
matched := logger.formatRegex.FindStringSubmatch(string(line))
|
||||||
|
if matched != nil {
|
||||||
|
LOG_INFO(matched[1], "%s", matched[2])
|
||||||
|
} else {
|
||||||
|
LOG_INFO("LOG_DEFAULT", "%s", line)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
log.SetFlags(0)
|
||||||
|
log.SetOutput(&Logger{ formatRegex: regexp.MustCompile(`^\[(.+)\]\s*(.+)`) })
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
duplicacyExitCode = 100
|
||||||
|
otherExitCode = 101
|
||||||
|
)
|
||||||
|
|
||||||
|
// This is the function to be called before exiting when an error occurs.
|
||||||
|
var RunAtError func() = func() {}
|
||||||
|
|
||||||
|
func CatchLogException() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
switch e := r.(type) {
|
||||||
|
case Exception:
|
||||||
|
if printStackTrace {
|
||||||
|
debug.PrintStack()
|
||||||
|
}
|
||||||
|
RunAtError()
|
||||||
|
os.Exit(duplicacyExitCode)
|
||||||
|
default:
|
||||||
|
fmt.Fprintf(os.Stderr, "%v\n", e)
|
||||||
|
debug.PrintStack()
|
||||||
|
RunAtError()
|
||||||
|
os.Exit(otherExitCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
499
src/duplicacy_oneclient.go
Normal file
499
src/duplicacy_oneclient.go
Normal file
@@ -0,0 +1,499 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"strconv"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
)
|
||||||
|
|
||||||
|
type OneDriveError struct {
|
||||||
|
Status int
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (err OneDriveError) Error() string {
|
||||||
|
return fmt.Sprintf("%d %s", err.Status, err.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
type OneDriveErrorResponse struct {
|
||||||
|
Error OneDriveError `json:"error"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type OneDriveClient struct {
|
||||||
|
HTTPClient *http.Client
|
||||||
|
|
||||||
|
TokenFile string
|
||||||
|
Token *oauth2.Token
|
||||||
|
TokenLock *sync.Mutex
|
||||||
|
|
||||||
|
IsConnected bool
|
||||||
|
TestMode bool
|
||||||
|
|
||||||
|
IsBusiness bool
|
||||||
|
RefreshTokenURL string
|
||||||
|
APIURL string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewOneDriveClient(tokenFile string, isBusiness bool) (*OneDriveClient, error) {
|
||||||
|
|
||||||
|
description, err := ioutil.ReadFile(tokenFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
token := new(oauth2.Token)
|
||||||
|
if err := json.Unmarshal(description, token); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client := &OneDriveClient{
|
||||||
|
HTTPClient: http.DefaultClient,
|
||||||
|
TokenFile: tokenFile,
|
||||||
|
Token: token,
|
||||||
|
TokenLock: &sync.Mutex{},
|
||||||
|
IsBusiness: isBusiness,
|
||||||
|
}
|
||||||
|
|
||||||
|
if isBusiness {
|
||||||
|
client.RefreshTokenURL = "https://duplicacy.com/odb_refresh"
|
||||||
|
client.APIURL = "https://graph.microsoft.com/v1.0/me"
|
||||||
|
} else {
|
||||||
|
client.RefreshTokenURL = "https://duplicacy.com/one_refresh"
|
||||||
|
client.APIURL = "https://api.onedrive.com/v1.0"
|
||||||
|
}
|
||||||
|
|
||||||
|
client.RefreshToken(false)
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *OneDriveClient) call(url string, method string, input interface{}, contentType string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
|
var response *http.Response
|
||||||
|
|
||||||
|
backoff := 1
|
||||||
|
for i := 0; i < 12; i++ {
|
||||||
|
|
||||||
|
LOG_DEBUG("ONEDRIVE_CALL", "%s %s", method, url)
|
||||||
|
|
||||||
|
var inputReader io.Reader
|
||||||
|
|
||||||
|
switch input.(type) {
|
||||||
|
default:
|
||||||
|
jsonInput, err := json.Marshal(input)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
inputReader = bytes.NewReader(jsonInput)
|
||||||
|
case []byte:
|
||||||
|
inputReader = bytes.NewReader(input.([]byte))
|
||||||
|
case int:
|
||||||
|
inputReader = nil
|
||||||
|
case *bytes.Buffer:
|
||||||
|
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
|
||||||
|
case *RateLimitedReader:
|
||||||
|
input.(*RateLimitedReader).Reset()
|
||||||
|
inputReader = input.(*RateLimitedReader)
|
||||||
|
}
|
||||||
|
|
||||||
|
request, err := http.NewRequest(method, url, inputReader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if reader, ok := inputReader.(*RateLimitedReader); ok {
|
||||||
|
request.ContentLength = reader.Length()
|
||||||
|
request.Header.Set("Content-Range", fmt.Sprintf("bytes 0-%d/%d", reader.Length() - 1, reader.Length()))
|
||||||
|
}
|
||||||
|
|
||||||
|
if url != client.RefreshTokenURL {
|
||||||
|
client.TokenLock.Lock()
|
||||||
|
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
|
||||||
|
client.TokenLock.Unlock()
|
||||||
|
}
|
||||||
|
if contentType != "" {
|
||||||
|
request.Header.Set("Content-Type", contentType)
|
||||||
|
}
|
||||||
|
|
||||||
|
request.Header.Set("User-Agent", "ISV|Acrosync|Duplicacy/2.0")
|
||||||
|
|
||||||
|
response, err = client.HTTPClient.Do(request)
|
||||||
|
if err != nil {
|
||||||
|
if client.IsConnected {
|
||||||
|
if strings.Contains(err.Error(), "TLS handshake timeout") {
|
||||||
|
// Give a long timeout regardless of backoff when a TLS timeout happens, hoping that
|
||||||
|
// idle connections are not to be reused on reconnect.
|
||||||
|
retryAfter := time.Duration(rand.Float32()*60000 + 180000)
|
||||||
|
LOG_INFO("ONEDRIVE_RETRY", "TLS handshake timeout; retry after %d milliseconds", retryAfter)
|
||||||
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
|
} else {
|
||||||
|
// For all other errors just blindly retry until the maximum is reached
|
||||||
|
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
||||||
|
LOG_INFO("ONEDRIVE_RETRY", "%v; retry after %d milliseconds", err, retryAfter)
|
||||||
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
|
}
|
||||||
|
backoff *= 2
|
||||||
|
if backoff > 256 {
|
||||||
|
backoff = 256
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client.IsConnected = true
|
||||||
|
|
||||||
|
if response.StatusCode < 400 {
|
||||||
|
return response.Body, response.ContentLength, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
errorResponse := &OneDriveErrorResponse{
|
||||||
|
Error: OneDriveError{Status: response.StatusCode},
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode == 401 {
|
||||||
|
|
||||||
|
if url == client.RefreshTokenURL {
|
||||||
|
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.RefreshToken(true)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else if response.StatusCode == 409 {
|
||||||
|
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Conflict"}
|
||||||
|
} else if response.StatusCode > 401 && response.StatusCode != 404 {
|
||||||
|
delay := int((rand.Float32() * 0.5 + 0.5) * 1000.0 * float32(backoff))
|
||||||
|
if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 {
|
||||||
|
retryAfter, _ := strconv.Atoi(backoffList[0])
|
||||||
|
if retryAfter * 1000 > delay {
|
||||||
|
delay = retryAfter * 1000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, delay)
|
||||||
|
time.Sleep(time.Duration(delay) * time.Millisecond)
|
||||||
|
backoff *= 2
|
||||||
|
if backoff > 256 {
|
||||||
|
backoff = 256
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
|
||||||
|
return nil, 0, OneDriveError{Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response")}
|
||||||
|
}
|
||||||
|
|
||||||
|
errorResponse.Error.Status = response.StatusCode
|
||||||
|
return nil, 0, errorResponse.Error
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, 0, fmt.Errorf("Maximum number of retries reached")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *OneDriveClient) RefreshToken(force bool) (err error) {
|
||||||
|
client.TokenLock.Lock()
|
||||||
|
defer client.TokenLock.Unlock()
|
||||||
|
|
||||||
|
if !force && client.Token.Valid() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(client.RefreshTokenURL, "POST", client.Token, "")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to refresh the access token: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
description, err := json.Marshal(client.Token)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(client.TokenFile, description, 0644)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type OneDriveEntry struct {
|
||||||
|
ID string
|
||||||
|
Name string
|
||||||
|
Folder map[string]interface{}
|
||||||
|
Size int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type OneDriveListEntriesOutput struct {
|
||||||
|
Entries []OneDriveEntry `json:"value"`
|
||||||
|
NextLink string `json:"@odata.nextLink"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error) {
|
||||||
|
|
||||||
|
entries := []OneDriveEntry{}
|
||||||
|
|
||||||
|
url := client.APIURL + "/drive/root:/" + path + ":/children"
|
||||||
|
if path == "" {
|
||||||
|
url = client.APIURL + "/drive/root/children"
|
||||||
|
}
|
||||||
|
if client.TestMode {
|
||||||
|
url += "?top=8"
|
||||||
|
} else {
|
||||||
|
url += "?top=1000"
|
||||||
|
}
|
||||||
|
url += "&select=name,size,folder"
|
||||||
|
|
||||||
|
for {
|
||||||
|
readCloser, _, err := client.call(url, "GET", 0, "")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
output := &OneDriveListEntriesOutput{}
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
entries = append(entries, output.Entries...)
|
||||||
|
|
||||||
|
url = output.NextLink
|
||||||
|
if url == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) {
|
||||||
|
|
||||||
|
url := client.APIURL + "/drive/root:/" + path
|
||||||
|
url += "?select=id,name,size,folder"
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(url, "GET", 0, "")
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(OneDriveError); ok && e.Status == 404 {
|
||||||
|
return "", false, 0, nil
|
||||||
|
} else {
|
||||||
|
return "", false, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
output := &OneDriveEntry{}
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return "", false, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return output.ID, len(output.Folder) != 0, output.Size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *OneDriveClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
|
url := client.APIURL + "/drive/items/root:/" + path + ":/content"
|
||||||
|
|
||||||
|
return client.call(url, "GET", 0, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
|
||||||
|
|
||||||
|
// Upload file using the simple method; this is only possible for OneDrive Personal or if the file
|
||||||
|
// is smaller than 4MB for OneDrive Business
|
||||||
|
if !client.IsBusiness || (client.TestMode && rand.Int() % 2 == 0) {
|
||||||
|
url := client.APIURL + "/drive/root:/" + path + ":/content"
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// For large files, create an upload session first
|
||||||
|
uploadURL, err := client.CreateUploadSession(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return client.UploadFileSession(uploadURL, content, rateLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *OneDriveClient) CreateUploadSession(path string) (uploadURL string, err error) {
|
||||||
|
|
||||||
|
type CreateUploadSessionItem struct {
|
||||||
|
ConflictBehavior string `json:"@microsoft.graph.conflictBehavior"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
input := map[string]interface{} {
|
||||||
|
"item": CreateUploadSessionItem {
|
||||||
|
ConflictBehavior: "replace",
|
||||||
|
Name: filepath.Base(path),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(client.APIURL + "/drive/root:/" + path + ":/createUploadSession", "POST", input, "application/json")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreateUploadSessionOutput struct {
|
||||||
|
UploadURL string `json:"uploadUrl"`
|
||||||
|
}
|
||||||
|
|
||||||
|
output := &CreateUploadSessionOutput{}
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return output.UploadURL, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *OneDriveClient) UploadFileSession(uploadURL string, content []byte, rateLimit int) (err error) {
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(uploadURL, "PUT", CreateRateLimitedReader(content, rateLimit), "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
type UploadFileSessionOutput struct {
|
||||||
|
Size int `json:"size"`
|
||||||
|
}
|
||||||
|
output := &UploadFileSessionOutput{}
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return fmt.Errorf("Failed to complete the file upload session: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if output.Size != len(content) {
|
||||||
|
return fmt.Errorf("Uploaded %d bytes out of %d bytes", output.Size, len(content))
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *OneDriveClient) DeleteFile(path string) error {
|
||||||
|
|
||||||
|
url := client.APIURL + "/drive/root:/" + path
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(url, "DELETE", 0, "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
||||||
|
|
||||||
|
url := client.APIURL + "/drive/root:/" + path
|
||||||
|
|
||||||
|
parentReference := make(map[string]string)
|
||||||
|
parentReference["path"] = "/drive/root:/" + parent
|
||||||
|
|
||||||
|
parameters := make(map[string]interface{})
|
||||||
|
parameters["parentReference"] = parentReference
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(url, "PATCH", parameters, "application/json")
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(OneDriveError); ok && e.Status == 400 {
|
||||||
|
// The destination directory doesn't exist; trying to create it...
|
||||||
|
dir := filepath.Dir(parent)
|
||||||
|
if dir == "." {
|
||||||
|
dir = ""
|
||||||
|
}
|
||||||
|
client.CreateDirectory(dir, filepath.Base(parent))
|
||||||
|
readCloser, _, err = client.call(url, "PATCH", parameters, "application/json")
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *OneDriveClient) CreateDirectory(path string, name string) error {
|
||||||
|
|
||||||
|
url := client.APIURL + "/root/children"
|
||||||
|
|
||||||
|
if path != "" {
|
||||||
|
|
||||||
|
pathID, isDir, _, err := client.GetFileInfo(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if pathID == "" {
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if dir != "." {
|
||||||
|
// The parent directory doesn't exist; trying to create it...
|
||||||
|
client.CreateDirectory(dir, filepath.Base(path))
|
||||||
|
isDir = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isDir {
|
||||||
|
return fmt.Errorf("The path '%s' is not a directory", path)
|
||||||
|
}
|
||||||
|
|
||||||
|
url = client.APIURL + "/drive/root:/" + path + ":/children"
|
||||||
|
}
|
||||||
|
|
||||||
|
parameters := make(map[string]interface{})
|
||||||
|
parameters["name"] = name
|
||||||
|
parameters["folder"] = make(map[string]int)
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(url, "POST", parameters, "application/json")
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
|
||||||
|
// This error usually means the directory already exists
|
||||||
|
LOG_TRACE("ONEDRIVE_MKDIR", "The directory '%s/%s' already exists", path, name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
145
src/duplicacy_oneclient_test.go
Normal file
145
src/duplicacy_oneclient_test.go
Normal file
@@ -0,0 +1,145 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
crypto_rand "crypto/rand"
|
||||||
|
"math/rand"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestOneDriveClient(t *testing.T) {
|
||||||
|
|
||||||
|
oneDriveClient, err := NewOneDriveClient("one-token.json", false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create the OneDrive client: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
oneDriveClient.TestMode = true
|
||||||
|
|
||||||
|
existingFiles, err := oneDriveClient.ListEntries("")
|
||||||
|
for _, file := range existingFiles {
|
||||||
|
fmt.Printf("name: %s, isDir: %t\n", file.Name, len(file.Folder) != 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
testID, _, _, err := oneDriveClient.GetFileInfo("test")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list the test directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if testID == "" {
|
||||||
|
err = oneDriveClient.CreateDirectory("", "test")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create the test directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test1ID, _, _, err := oneDriveClient.GetFileInfo("test/test1")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list the test1 directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if test1ID == "" {
|
||||||
|
err = oneDriveClient.CreateDirectory("test", "test1")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create the test1 directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test2ID, _, _, err := oneDriveClient.GetFileInfo("test/test2")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list the test2 directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if test2ID == "" {
|
||||||
|
err = oneDriveClient.CreateDirectory("test", "test2")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create the test2 directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
numberOfFiles := 20
|
||||||
|
maxFileSize := 64 * 1024
|
||||||
|
|
||||||
|
for i := 0; i < numberOfFiles; i++ {
|
||||||
|
content := make([]byte, rand.Int()%maxFileSize+1)
|
||||||
|
_, err = crypto_rand.Read(content)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error generating random content: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := sha256.New()
|
||||||
|
hasher.Write(content)
|
||||||
|
filename := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
|
fmt.Printf("file: %s\n", filename)
|
||||||
|
|
||||||
|
err = oneDriveClient.UploadFile("test/test1/"+filename, content, 100)
|
||||||
|
if err != nil {
|
||||||
|
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
|
||||||
|
t.Errorf("Failed to upload the file %s: %v", filename, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err := oneDriveClient.ListEntries("test/test1")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error list randomly generated files: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
err = oneDriveClient.MoveFile("test/test1/"+entry.Name, "test/test2")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to move %s: %v", entry.Name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
entries, err = oneDriveClient.ListEntries("test/test2")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error list randomly generated files: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
readCloser, _, err := oneDriveClient.DownloadFile("test/test2/" + entry.Name)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error downloading file %s: %v", entry.Name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := sha256.New()
|
||||||
|
io.Copy(hasher, readCloser)
|
||||||
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
|
if hash != entry.Name {
|
||||||
|
t.Errorf("File %s, hash %s", entry.Name, hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
|
||||||
|
err = oneDriveClient.DeleteFile("test/test2/" + entry.Name)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
246
src/duplicacy_onestorage.go
Normal file
246
src/duplicacy_onestorage.go
Normal file
@@ -0,0 +1,246 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type OneDriveStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
client *OneDriveClient
|
||||||
|
storageDir string
|
||||||
|
numberOfThread int
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateOneDriveStorage creates an OneDrive storage object.
|
||||||
|
func CreateOneDriveStorage(tokenFile string, isBusiness bool, storagePath string, threads int) (storage *OneDriveStorage, err error) {
|
||||||
|
|
||||||
|
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
|
||||||
|
storagePath = storagePath[:len(storagePath)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := NewOneDriveClient(tokenFile, isBusiness)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fileID, isDir, _, err := client.GetFileInfo(storagePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileID == "" {
|
||||||
|
return nil, fmt.Errorf("Path '%s' doesn't exist", storagePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isDir {
|
||||||
|
return nil, fmt.Errorf("Path '%s' is not a directory", storagePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &OneDriveStorage{
|
||||||
|
client: client,
|
||||||
|
storageDir: storagePath,
|
||||||
|
numberOfThread: threads,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, path := range []string{"chunks", "fossils", "snapshots"} {
|
||||||
|
dir := storagePath + "/" + path
|
||||||
|
dirID, isDir, _, err := client.GetFileInfo(dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if dirID == "" {
|
||||||
|
err = client.CreateDirectory(storagePath, path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else if !isDir {
|
||||||
|
return nil, fmt.Errorf("%s is not a directory", dir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||||
|
return storage, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *OneDriveStorage) convertFilePath(filePath string) string {
|
||||||
|
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
||||||
|
return "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
||||||
|
}
|
||||||
|
return filePath
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
|
func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
|
|
||||||
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if dir == "snapshots" {
|
||||||
|
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
subDirs := []string{}
|
||||||
|
for _, entry := range entries {
|
||||||
|
if len(entry.Folder) > 0 {
|
||||||
|
subDirs = append(subDirs, entry.Name+"/")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return subDirs, nil, nil
|
||||||
|
} else if strings.HasPrefix(dir, "snapshots/") || strings.HasPrefix(dir, "benchmark") {
|
||||||
|
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
files := []string{}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
if len(entry.Folder) == 0 {
|
||||||
|
files = append(files, entry.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return files, nil, nil
|
||||||
|
} else {
|
||||||
|
files := []string{}
|
||||||
|
sizes := []int64{}
|
||||||
|
parents := []string{"chunks", "fossils"}
|
||||||
|
for i := 0; i < len(parents); i++ {
|
||||||
|
parent := parents[i]
|
||||||
|
entries, err := storage.client.ListEntries(storage.storageDir + "/" + parent)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
if len(entry.Folder) == 0 {
|
||||||
|
name := entry.Name
|
||||||
|
if strings.HasPrefix(parent, "fossils") {
|
||||||
|
name = parent + "/" + name + ".fsl"
|
||||||
|
name = name[len("fossils/"):]
|
||||||
|
} else {
|
||||||
|
name = parent + "/" + name
|
||||||
|
name = name[len("chunks/"):]
|
||||||
|
}
|
||||||
|
files = append(files, name)
|
||||||
|
sizes = append(sizes, entry.Size)
|
||||||
|
} else {
|
||||||
|
parents = append(parents, parent+"/"+entry.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *OneDriveStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
filePath = storage.convertFilePath(filePath)
|
||||||
|
|
||||||
|
err = storage.client.DeleteFile(storage.storageDir + "/" + filePath)
|
||||||
|
if e, ok := err.(OneDriveError); ok && e.Status == 404 {
|
||||||
|
LOG_DEBUG("ONEDRIVE_DELETE", "Ignore 404 error")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *OneDriveStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
|
||||||
|
fromPath := storage.storageDir + "/" + storage.convertFilePath(from)
|
||||||
|
toPath := storage.storageDir + "/" + storage.convertFilePath(to)
|
||||||
|
|
||||||
|
err = storage.client.MoveFile(fromPath, path.Dir(toPath))
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
|
||||||
|
LOG_DEBUG("ONEDRIVE_MOVE", "Ignore 409 conflict error")
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *OneDriveStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
parent := path.Dir(dir)
|
||||||
|
|
||||||
|
if parent == "." {
|
||||||
|
return storage.client.CreateDirectory(storage.storageDir, dir)
|
||||||
|
} else {
|
||||||
|
return storage.client.CreateDirectory(storage.storageDir+"/"+parent, path.Base(dir))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *OneDriveStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
|
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
|
||||||
|
filePath = filePath[:len(filePath)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
filePath = storage.convertFilePath(filePath)
|
||||||
|
|
||||||
|
fileID, isDir, size, err := storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
||||||
|
return fileID != "", isDir, size, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *OneDriveStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
readCloser, _, err := storage.client.DownloadFile(storage.storageDir + "/" + filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThread)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *OneDriveStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
err = storage.client.UploadFile(storage.storageDir+"/"+filePath, content, storage.UploadRateLimit/storage.numberOfThread)
|
||||||
|
|
||||||
|
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
|
||||||
|
LOG_TRACE("ONEDRIVE_UPLOAD", "File %s already exists", filePath)
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *OneDriveStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *OneDriveStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *OneDriveStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *OneDriveStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *OneDriveStorage) EnableTestMode() {
|
||||||
|
storage.client.TestMode = true
|
||||||
|
}
|
||||||
134
src/duplicacy_preference.go
Normal file
134
src/duplicacy_preference.go
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Preference stores options for each storage.
|
||||||
|
type Preference struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
SnapshotID string `json:"id"`
|
||||||
|
RepositoryPath string `json:"repository"`
|
||||||
|
StorageURL string `json:"storage"`
|
||||||
|
Encrypted bool `json:"encrypted"`
|
||||||
|
BackupProhibited bool `json:"no_backup"`
|
||||||
|
RestoreProhibited bool `json:"no_restore"`
|
||||||
|
DoNotSavePassword bool `json:"no_save_password"`
|
||||||
|
NobackupFile string `json:"nobackup_file"`
|
||||||
|
Keys map[string]string `json:"keys"`
|
||||||
|
FiltersFile string `json:"filters"`
|
||||||
|
ExcludeByAttribute bool `json:"exclude_by_attribute"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var preferencePath string
|
||||||
|
var Preferences []Preference
|
||||||
|
|
||||||
|
func LoadPreferences(repository string) bool {
|
||||||
|
|
||||||
|
preferencePath = path.Join(repository, DUPLICACY_DIRECTORY)
|
||||||
|
|
||||||
|
stat, err := os.Stat(preferencePath)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", repository, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if !stat.IsDir() {
|
||||||
|
content, err := ioutil.ReadFile(preferencePath)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to locate the preference path: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
realPreferencePath := strings.TrimSpace(string(content))
|
||||||
|
stat, err := os.Stat(realPreferencePath)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", content, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !stat.IsDir() {
|
||||||
|
LOG_ERROR("PREFERENCE_PATH", "The preference path %s is not a directory", realPreferencePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
preferencePath = realPreferencePath
|
||||||
|
}
|
||||||
|
|
||||||
|
description, err := ioutil.ReadFile(path.Join(preferencePath, "preferences"))
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("PREFERENCE_OPEN", "Failed to read the preference file from repository %s: %v", repository, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal(description, &Preferences)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("PREFERENCE_PARSE", "Failed to parse the preference file for repository %s: %v", repository, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(Preferences) == 0 {
|
||||||
|
LOG_ERROR("PREFERENCE_NONE", "No preference found in the preference file")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, preference := range Preferences {
|
||||||
|
if strings.ToLower(preference.Name) == "ssh" {
|
||||||
|
LOG_ERROR("PREFERENCE_INVALID", "'%s' is an invalid storage name", preference.Name)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetDuplicacyPreferencePath() string {
|
||||||
|
if preferencePath == "" {
|
||||||
|
LOG_ERROR("PREFERENCE_PATH", "The preference path has not been set")
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return preferencePath
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normally 'preferencePath' is set in LoadPreferences; however, if LoadPreferences is not called, this function
|
||||||
|
// provide another change to set 'preferencePath'
|
||||||
|
func SetDuplicacyPreferencePath(p string) {
|
||||||
|
preferencePath = p
|
||||||
|
}
|
||||||
|
|
||||||
|
func SavePreferences() bool {
|
||||||
|
description, err := json.MarshalIndent(Preferences, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
preferenceFile := path.Join(GetDuplicacyPreferencePath(), "preferences")
|
||||||
|
|
||||||
|
err = ioutil.WriteFile(preferenceFile, description, 0600)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("PREFERENCE_WRITE", "Failed to save the preference file %s: %v", preferenceFile, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func FindPreference(name string) *Preference {
|
||||||
|
for i, preference := range Preferences {
|
||||||
|
if preference.Name == name || preference.StorageURL == name {
|
||||||
|
return &Preferences[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (preference *Preference) Equal(other *Preference) bool {
|
||||||
|
return reflect.DeepEqual(preference, other)
|
||||||
|
}
|
||||||
196
src/duplicacy_s3cstorage.go
Normal file
196
src/duplicacy_s3cstorage.go
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gilbertchen/goamz/aws"
|
||||||
|
"github.com/gilbertchen/goamz/s3"
|
||||||
|
)
|
||||||
|
|
||||||
|
// S3CStorage is a storage backend for s3 compatible storages that require V2 Signing.
|
||||||
|
type S3CStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
buckets []*s3.Bucket
|
||||||
|
storageDir string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateS3CStorage creates a amazon s3 storage object.
|
||||||
|
func CreateS3CStorage(regionName string, endpoint string, bucketName string, storageDir string,
|
||||||
|
accessKey string, secretKey string, threads int) (storage *S3CStorage, err error) {
|
||||||
|
|
||||||
|
var region aws.Region
|
||||||
|
|
||||||
|
if endpoint == "" {
|
||||||
|
if regionName == "" {
|
||||||
|
regionName = "us-east-1"
|
||||||
|
}
|
||||||
|
region = aws.Regions[regionName]
|
||||||
|
} else {
|
||||||
|
region = aws.Region{Name: regionName, S3Endpoint: "https://" + endpoint}
|
||||||
|
}
|
||||||
|
|
||||||
|
auth := aws.Auth{AccessKey: accessKey, SecretKey: secretKey}
|
||||||
|
|
||||||
|
var buckets []*s3.Bucket
|
||||||
|
for i := 0; i < threads; i++ {
|
||||||
|
s3Client := s3.New(auth, region)
|
||||||
|
s3Client.AttemptStrategy = aws.AttemptStrategy{
|
||||||
|
Min: 8,
|
||||||
|
Total: 300 * time.Second,
|
||||||
|
Delay: 1000 * time.Millisecond,
|
||||||
|
}
|
||||||
|
|
||||||
|
bucket := s3Client.Bucket(bucketName)
|
||||||
|
buckets = append(buckets, bucket)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||||
|
storageDir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &S3CStorage{
|
||||||
|
buckets: buckets,
|
||||||
|
storageDir: storageDir,
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
|
func (storage *S3CStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
if len(dir) > 0 && dir[len(dir)-1] != '/' {
|
||||||
|
dir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
dirLength := len(storage.storageDir + dir)
|
||||||
|
if dir == "snapshots/" {
|
||||||
|
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "/", "", 100)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, subDir := range results.CommonPrefixes {
|
||||||
|
files = append(files, subDir[dirLength:])
|
||||||
|
}
|
||||||
|
return files, nil, nil
|
||||||
|
} else if dir == "chunks/" {
|
||||||
|
marker := ""
|
||||||
|
for {
|
||||||
|
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "", marker, 1000)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, object := range results.Contents {
|
||||||
|
files = append(files, object.Key[dirLength:])
|
||||||
|
sizes = append(sizes, object.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !results.IsTruncated {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
marker = results.Contents[len(results.Contents)-1].Key
|
||||||
|
}
|
||||||
|
return files, sizes, nil
|
||||||
|
|
||||||
|
} else {
|
||||||
|
|
||||||
|
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "", "", 1000)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, object := range results.Contents {
|
||||||
|
files = append(files, object.Key[dirLength:])
|
||||||
|
}
|
||||||
|
return files, nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *S3CStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
return storage.buckets[threadIndex].Del(storage.storageDir + filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *S3CStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
|
||||||
|
options := s3.CopyOptions{ContentType: "application/duplicacy"}
|
||||||
|
_, err = storage.buckets[threadIndex].PutCopy(storage.storageDir+to, s3.Private, options, storage.buckets[threadIndex].Name+"/"+storage.storageDir+from)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return storage.DeleteFile(threadIndex, from)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *S3CStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *S3CStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
|
response, err := storage.buckets[threadIndex].Head(storage.storageDir+filePath, nil)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(*s3.Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode == 403 || response.StatusCode == 404 {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else {
|
||||||
|
return true, false, response.ContentLength, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *S3CStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
|
readCloser, err := storage.buckets[threadIndex].GetReader(storage.storageDir + filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
|
||||||
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.buckets))
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *S3CStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
|
options := s3.Options{}
|
||||||
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.buckets))
|
||||||
|
return storage.buckets[threadIndex].PutReader(storage.storageDir+filePath, reader, int64(len(content)), "application/duplicacy", s3.Private, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *S3CStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *S3CStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *S3CStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *S3CStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *S3CStorage) EnableTestMode() {}
|
||||||
256
src/duplicacy_s3storage.go
Normal file
256
src/duplicacy_s3storage.go
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
// NOTE: The code in the Wasabi storage module relies on all functions
|
||||||
|
// in this one except MoveFile(), IsMoveFileImplemented() and
|
||||||
|
// IsStrongConsistent(). Changes to the API here will need to be
|
||||||
|
// reflected there.
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
)
|
||||||
|
|
||||||
|
type S3Storage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
client *s3.S3
|
||||||
|
bucket string
|
||||||
|
storageDir string
|
||||||
|
numberOfThreads int
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateS3Storage creates a amazon s3 storage object.
|
||||||
|
func CreateS3Storage(regionName string, endpoint string, bucketName string, storageDir string,
|
||||||
|
accessKey string, secretKey string, threads int,
|
||||||
|
isSSLSupported bool, isMinioCompatible bool) (storage *S3Storage, err error) {
|
||||||
|
|
||||||
|
token := ""
|
||||||
|
|
||||||
|
auth := credentials.NewStaticCredentials(accessKey, secretKey, token)
|
||||||
|
|
||||||
|
if regionName == "" && endpoint == "" {
|
||||||
|
defaultRegionConfig := &aws.Config{
|
||||||
|
Region: aws.String("us-east-1"),
|
||||||
|
Credentials: auth,
|
||||||
|
}
|
||||||
|
|
||||||
|
s3Client := s3.New(session.New(defaultRegionConfig))
|
||||||
|
|
||||||
|
response, err := s3Client.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String(bucketName)})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
regionName = "us-east-1"
|
||||||
|
if response.LocationConstraint != nil {
|
||||||
|
regionName = *response.LocationConstraint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
s3Config := &aws.Config{
|
||||||
|
Region: aws.String(regionName),
|
||||||
|
Credentials: auth,
|
||||||
|
Endpoint: aws.String(endpoint),
|
||||||
|
S3ForcePathStyle: aws.Bool(isMinioCompatible),
|
||||||
|
DisableSSL: aws.Bool(!isSSLSupported),
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||||
|
storageDir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &S3Storage{
|
||||||
|
client: s3.New(session.New(s3Config)),
|
||||||
|
bucket: bucketName,
|
||||||
|
storageDir: storageDir,
|
||||||
|
numberOfThreads: threads,
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
|
func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
if len(dir) > 0 && dir[len(dir)-1] != '/' {
|
||||||
|
dir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
if dir == "snapshots/" {
|
||||||
|
dir = storage.storageDir + dir
|
||||||
|
input := s3.ListObjectsInput{
|
||||||
|
Bucket: aws.String(storage.bucket),
|
||||||
|
Prefix: aws.String(dir),
|
||||||
|
Delimiter: aws.String("/"),
|
||||||
|
MaxKeys: aws.Int64(1000),
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := storage.client.ListObjects(&input)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, subDir := range output.CommonPrefixes {
|
||||||
|
files = append(files, (*subDir.Prefix)[len(dir):])
|
||||||
|
}
|
||||||
|
return files, nil, nil
|
||||||
|
} else {
|
||||||
|
dir = storage.storageDir + dir
|
||||||
|
marker := ""
|
||||||
|
for {
|
||||||
|
input := s3.ListObjectsInput{
|
||||||
|
Bucket: aws.String(storage.bucket),
|
||||||
|
Prefix: aws.String(dir),
|
||||||
|
MaxKeys: aws.Int64(1000),
|
||||||
|
Marker: aws.String(marker),
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := storage.client.ListObjects(&input)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, object := range output.Contents {
|
||||||
|
files = append(files, (*object.Key)[len(dir):])
|
||||||
|
sizes = append(sizes, *object.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !*output.IsTruncated {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
marker = *output.Contents[len(output.Contents)-1].Key
|
||||||
|
}
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *S3Storage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
input := &s3.DeleteObjectInput{
|
||||||
|
Bucket: aws.String(storage.bucket),
|
||||||
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
|
}
|
||||||
|
_, err = storage.client.DeleteObject(input)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *S3Storage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
|
||||||
|
input := &s3.CopyObjectInput{
|
||||||
|
Bucket: aws.String(storage.bucket),
|
||||||
|
CopySource: aws.String(storage.bucket + "/" + storage.storageDir + from),
|
||||||
|
Key: aws.String(storage.storageDir + to),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = storage.client.CopyObject(input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return storage.DeleteFile(threadIndex, from)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *S3Storage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *S3Storage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
|
input := &s3.HeadObjectInput{
|
||||||
|
Bucket: aws.String(storage.bucket),
|
||||||
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := storage.client.HeadObject(input)
|
||||||
|
if err != nil {
|
||||||
|
if e, ok := err.(awserr.RequestFailure); ok && (e.StatusCode() == 403 || e.StatusCode() == 404) {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if output == nil || output.ContentLength == nil {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else {
|
||||||
|
return true, false, *output.ContentLength, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
|
input := &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String(storage.bucket),
|
||||||
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
|
}
|
||||||
|
|
||||||
|
output, err := storage.client.GetObject(input)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer output.Body.Close()
|
||||||
|
|
||||||
|
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||||
|
return err
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
|
attempts := 0
|
||||||
|
|
||||||
|
for {
|
||||||
|
input := &s3.PutObjectInput{
|
||||||
|
Bucket: aws.String(storage.bucket),
|
||||||
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
|
ACL: aws.String(s3.ObjectCannedACLPrivate),
|
||||||
|
Body: CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads),
|
||||||
|
ContentType: aws.String("application/duplicacy"),
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = storage.client.PutObject(input)
|
||||||
|
if err == nil || attempts >= 3 || !strings.Contains(err.Error(), "XAmzContentSHA256Mismatch") {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("S3_RETRY", "Retrying on %s: %v", reflect.TypeOf(err), err)
|
||||||
|
attempts += 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *S3Storage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *S3Storage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *S3Storage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *S3Storage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *S3Storage) EnableTestMode() {}
|
||||||
362
src/duplicacy_sftpstorage.go
Normal file
362
src/duplicacy_sftpstorage.go
Normal file
@@ -0,0 +1,362 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/pkg/sftp"
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SFTPStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
client *sftp.Client
|
||||||
|
clientLock sync.Mutex
|
||||||
|
minimumNesting int // The minimum level of directories to dive into before searching for the chunk file.
|
||||||
|
storageDir string
|
||||||
|
numberOfThreads int
|
||||||
|
numberOfTries int
|
||||||
|
serverAddress string
|
||||||
|
sftpConfig *ssh.ClientConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateSFTPStorageWithPassword(server string, port int, username string, storageDir string,
|
||||||
|
minimumNesting int, password string, threads int) (storage *SFTPStorage, err error) {
|
||||||
|
|
||||||
|
authMethods := []ssh.AuthMethod{ssh.Password(password)}
|
||||||
|
|
||||||
|
hostKeyCallback := func(hostname string, remote net.Addr,
|
||||||
|
key ssh.PublicKey) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return CreateSFTPStorage(false, server, port, username, storageDir, minimumNesting, authMethods, hostKeyCallback, threads)
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateSFTPStorage(compatibilityMode bool, server string, port int, username string, storageDir string, minimumNesting int,
|
||||||
|
authMethods []ssh.AuthMethod,
|
||||||
|
hostKeyCallback func(hostname string, remote net.Addr,
|
||||||
|
key ssh.PublicKey) error, threads int) (storage *SFTPStorage, err error) {
|
||||||
|
|
||||||
|
sftpConfig := &ssh.ClientConfig{
|
||||||
|
User: username,
|
||||||
|
Auth: authMethods,
|
||||||
|
HostKeyCallback: hostKeyCallback,
|
||||||
|
}
|
||||||
|
|
||||||
|
if compatibilityMode {
|
||||||
|
sftpConfig.Ciphers = []string{
|
||||||
|
"aes128-ctr", "aes192-ctr", "aes256-ctr",
|
||||||
|
"aes128-gcm@openssh.com",
|
||||||
|
"chacha20-poly1305@openssh.com",
|
||||||
|
"arcfour256", "arcfour128", "arcfour",
|
||||||
|
"aes128-cbc",
|
||||||
|
"3des-cbc",
|
||||||
|
}
|
||||||
|
sftpConfig.KeyExchanges = [] string {
|
||||||
|
"curve25519-sha256@libssh.org",
|
||||||
|
"ecdh-sha2-nistp256", "ecdh-sha2-nistp384", "ecdh-sha2-nistp521",
|
||||||
|
"diffie-hellman-group1-sha1", "diffie-hellman-group14-sha1",
|
||||||
|
"diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
serverAddress := fmt.Sprintf("%s:%d", server, port)
|
||||||
|
connection, err := ssh.Dial("tcp", serverAddress, sftpConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := sftp.NewClient(connection)
|
||||||
|
if err != nil {
|
||||||
|
connection.Close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for storageDir[len(storageDir)-1] == '/' {
|
||||||
|
storageDir = storageDir[:len(storageDir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
fileInfo, err := client.Stat(storageDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Can't access the storage path %s: %v", storageDir, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !fileInfo.IsDir() {
|
||||||
|
return nil, fmt.Errorf("The storage path %s is not a directory", storageDir)
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &SFTPStorage{
|
||||||
|
client: client,
|
||||||
|
storageDir: storageDir,
|
||||||
|
minimumNesting: minimumNesting,
|
||||||
|
numberOfThreads: threads,
|
||||||
|
numberOfTries: 8,
|
||||||
|
serverAddress: serverAddress,
|
||||||
|
sftpConfig: sftpConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Random number fo generating the temporary chunk file suffix.
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
|
||||||
|
runtime.SetFinalizer(storage, CloseSFTPStorage)
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func CloseSFTPStorage(storage *SFTPStorage) {
|
||||||
|
if storage.client != nil {
|
||||||
|
storage.client.Close()
|
||||||
|
storage.client = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *SFTPStorage) getSFTPClient() *sftp.Client {
|
||||||
|
storage.clientLock.Lock()
|
||||||
|
defer storage.clientLock.Unlock()
|
||||||
|
return storage.client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *SFTPStorage) retry(f func () error) error {
|
||||||
|
delay := time.Second
|
||||||
|
for i := 0;; i++ {
|
||||||
|
err := f()
|
||||||
|
if err != nil && strings.Contains(err.Error(), "EOF") && i < storage.numberOfTries {
|
||||||
|
LOG_WARN("SFTP_RETRY", "Encountered an error (%v); retry after %d second(s)", err, delay/time.Second)
|
||||||
|
time.Sleep(delay)
|
||||||
|
delay *= 2
|
||||||
|
|
||||||
|
storage.clientLock.Lock()
|
||||||
|
connection, err := ssh.Dial("tcp", storage.serverAddress, storage.sftpConfig)
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("SFT_RECONNECT", "Failed to connect to %s: %v; retrying", storage.serverAddress, err)
|
||||||
|
storage.clientLock.Unlock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := sftp.NewClient(connection)
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("SFT_RECONNECT", "Failed to create a new SFTP client to %s: %v; retrying", storage.serverAddress, err)
|
||||||
|
connection.Close()
|
||||||
|
storage.clientLock.Unlock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
storage.client = client
|
||||||
|
storage.clientLock.Unlock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// ListFiles return the list of files and subdirectories under 'file' (non-recursively)
|
||||||
|
func (storage *SFTPStorage) ListFiles(threadIndex int, dirPath string) (files []string, sizes []int64, err error) {
|
||||||
|
|
||||||
|
var entries []os.FileInfo
|
||||||
|
err = storage.retry(func() error {
|
||||||
|
entries, err = storage.getSFTPClient().ReadDir(path.Join(storage.storageDir, dirPath))
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
name := entry.Name()
|
||||||
|
if entry.IsDir() && name[len(name)-1] != '/' {
|
||||||
|
name += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
files = append(files, name)
|
||||||
|
sizes = append(sizes, entry.Size())
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *SFTPStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
fullPath := path.Join(storage.storageDir, filePath)
|
||||||
|
var fileInfo os.FileInfo
|
||||||
|
err = storage.retry(func() error {
|
||||||
|
fileInfo, err = storage.getSFTPClient().Stat(fullPath)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
LOG_TRACE("SFTP_STORAGE", "File %s has disappeared before deletion", filePath)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if fileInfo == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return storage.retry(func() error { return storage.getSFTPClient().Remove(path.Join(storage.storageDir, filePath)) })
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *SFTPStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
toPath := path.Join(storage.storageDir, to)
|
||||||
|
var fileInfo os.FileInfo
|
||||||
|
err = storage.retry(func() error {
|
||||||
|
fileInfo, err = storage.getSFTPClient().Stat(toPath)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if fileInfo != nil {
|
||||||
|
return fmt.Errorf("The destination file %s already exists", toPath)
|
||||||
|
}
|
||||||
|
err = storage.retry(func() error { return storage.getSFTPClient().Rename(path.Join(storage.storageDir, from),
|
||||||
|
path.Join(storage.storageDir, to)) })
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *SFTPStorage) CreateDirectory(threadIndex int, dirPath string) (err error) {
|
||||||
|
fullPath := path.Join(storage.storageDir, dirPath)
|
||||||
|
var fileInfo os.FileInfo
|
||||||
|
err = storage.retry(func() error {
|
||||||
|
fileInfo, err = storage.getSFTPClient().Stat(fullPath)
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
if fileInfo != nil && fileInfo.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return storage.retry(func() error { return storage.getSFTPClient().Mkdir(path.Join(storage.storageDir, dirPath)) })
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *SFTPStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
var fileInfo os.FileInfo
|
||||||
|
err = storage.retry(func() error {
|
||||||
|
fileInfo, err = storage.getSFTPClient().Stat(path.Join(storage.storageDir, filePath))
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileInfo == nil {
|
||||||
|
return false, false, 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, fileInfo.IsDir(), fileInfo.Size(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *SFTPStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
return storage.retry(func() error {
|
||||||
|
file, err := storage.getSFTPClient().Open(path.Join(storage.storageDir, filePath))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer file.Close()
|
||||||
|
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
|
fullPath := path.Join(storage.storageDir, filePath)
|
||||||
|
|
||||||
|
dirs := strings.Split(filePath, "/")
|
||||||
|
fullDir := path.Dir(fullPath)
|
||||||
|
return storage.retry(func() error {
|
||||||
|
|
||||||
|
if len(dirs) > 1 {
|
||||||
|
_, err := storage.getSFTPClient().Stat(fullDir)
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
for i := range dirs[1 : len(dirs)-1] {
|
||||||
|
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
|
||||||
|
// We don't check the error; just keep going blindly
|
||||||
|
storage.getSFTPClient().Mkdir(subDir)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
letters := "abcdefghijklmnopqrstuvwxyz"
|
||||||
|
suffix := make([]byte, 8)
|
||||||
|
for i := range suffix {
|
||||||
|
suffix[i] = letters[rand.Intn(len(letters))]
|
||||||
|
}
|
||||||
|
|
||||||
|
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
|
||||||
|
|
||||||
|
file, err := storage.getSFTPClient().OpenFile(temporaryFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
|
_, err = io.Copy(file, reader)
|
||||||
|
if err != nil {
|
||||||
|
file.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = file.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.getSFTPClient().Rename(temporaryFile, fullPath)
|
||||||
|
if err != nil {
|
||||||
|
if _, err = storage.getSFTPClient().Stat(fullPath); err == nil {
|
||||||
|
storage.getSFTPClient().Remove(temporaryFile)
|
||||||
|
return nil
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("Uploaded file but failed to store it at %s: %v", fullPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *SFTPStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *SFTPStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *SFTPStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *SFTPStorage) IsFastListing() bool {
|
||||||
|
for _, level := range storage.readLevels {
|
||||||
|
if level > 1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *SFTPStorage) EnableTestMode() {}
|
||||||
14
src/duplicacy_shadowcopy.go
Executable file
14
src/duplicacy_shadowcopy.go
Executable file
@@ -0,0 +1,14 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
// +build !windows
|
||||||
|
// +build !darwin
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadowTop string) {
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
func DeleteShadowCopy() {}
|
||||||
186
src/duplicacy_shadowcopy_darwin.go
Executable file
186
src/duplicacy_shadowcopy_darwin.go
Executable file
@@ -0,0 +1,186 @@
|
|||||||
|
//
|
||||||
|
// Shadow copy module for Mac OSX using APFS snapshot
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// This module copyright 2018 Adam Marcus (https://github.com/amarcu5)
|
||||||
|
// and may be distributed under the same terms as Duplicacy.
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"regexp"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var snapshotPath string
|
||||||
|
var snapshotDate string
|
||||||
|
|
||||||
|
// Converts char array to string
|
||||||
|
func CharsToString(ca []int8) string {
|
||||||
|
|
||||||
|
len := len(ca)
|
||||||
|
ba := make([]byte, len)
|
||||||
|
|
||||||
|
for i, v := range ca {
|
||||||
|
ba[i] = byte(v)
|
||||||
|
if ba[i] == 0 {
|
||||||
|
len = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(ba[:len])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get ID of device containing path
|
||||||
|
func GetPathDeviceId(path string) (deviceId int32, err error) {
|
||||||
|
|
||||||
|
stat := syscall.Stat_t{}
|
||||||
|
|
||||||
|
err = syscall.Stat(path, &stat)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return stat.Dev, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Executes shell command with timeout and returns stdout
|
||||||
|
func CommandWithTimeout(timeoutInSeconds int, name string, arg ...string) (output string, err error) {
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutInSeconds)*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
cmd := exec.CommandContext(ctx, name, arg...)
|
||||||
|
out, err := cmd.Output()
|
||||||
|
|
||||||
|
if ctx.Err() == context.DeadlineExceeded {
|
||||||
|
err = errors.New("Command '" + name + "' timed out")
|
||||||
|
}
|
||||||
|
|
||||||
|
output = string(out)
|
||||||
|
return output, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func DeleteShadowCopy() {
|
||||||
|
|
||||||
|
if snapshotPath == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err := exec.Command("/sbin/umount", "-f", snapshotPath).Run()
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("VSS_DELETE", "Error while unmounting snapshot: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = exec.Command("tmutil", "deletelocalsnapshots", snapshotDate).Run()
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("VSS_DELETE", "Error while deleting local snapshot: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = os.RemoveAll(snapshotPath)
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("VSS_DELETE", "Error while deleting temporary mount directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("VSS_DELETE", "Shadow copy unmounted and deleted at %s", snapshotPath)
|
||||||
|
|
||||||
|
snapshotPath = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadowTop string) {
|
||||||
|
|
||||||
|
if !shadowCopy {
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check repository filesystem is APFS
|
||||||
|
stat := syscall.Statfs_t{}
|
||||||
|
err := syscall.Statfs(top, &stat)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("VSS_INIT", "Unable to determine filesystem of repository path")
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
if CharsToString(stat.Fstypename[:]) != "apfs" {
|
||||||
|
LOG_WARN("VSS_INIT", "VSS requires APFS filesystem")
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check path is local as tmutil snapshots will not support APFS formatted external drives
|
||||||
|
deviceIdLocal, err := GetPathDeviceId("/")
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: /")
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
deviceIdRepository, err := GetPathDeviceId(top)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: %s", top)
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
if deviceIdLocal != deviceIdRepository {
|
||||||
|
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: %s", top)
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
if timeoutInSeconds <= 60 {
|
||||||
|
timeoutInSeconds = 60
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create mount point
|
||||||
|
snapshotPath, err = ioutil.TempDir("/tmp/", "snp_")
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("VSS_CREATE", "Failed to create temporary mount directory")
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use tmutil to create snapshot
|
||||||
|
tmutilOutput, err := CommandWithTimeout(timeoutInSeconds, "tmutil", "snapshot")
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("VSS_CREATE", "Error while calling tmutil: %v", err)
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshotDateRegex := regexp.MustCompile(`:\s+([0-9\-]+)`)
|
||||||
|
matched := snapshotDateRegex.FindStringSubmatch(tmutilOutput)
|
||||||
|
if matched == nil {
|
||||||
|
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: %s", tmutilOutput)
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
snapshotDate = matched[1]
|
||||||
|
|
||||||
|
tmutilOutput, err = CommandWithTimeout(timeoutInSeconds, "tmutil", "listlocalsnapshots", ".")
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("VSS_CREATE", "Error while calling 'tmutil listlocalsnapshots': %v", err)
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
snapshotName := "com.apple.TimeMachine." + snapshotDate
|
||||||
|
|
||||||
|
snapshotNameRegex := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`)
|
||||||
|
matched = snapshotNameRegex.FindStringSubmatch(tmutilOutput)
|
||||||
|
if len(matched) > 0 {
|
||||||
|
snapshotName = matched[0]
|
||||||
|
} else {
|
||||||
|
LOG_INFO("VSS_CREATE", "Can't find the snapshot name with 'tmutil listlocalsnapshots'; fallback to %s", snapshotName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mount snapshot as readonly and hide from GUI i.e. Finder
|
||||||
|
_, err = CommandWithTimeout(timeoutInSeconds,
|
||||||
|
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/System/Volumes/Data", snapshotPath)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: %v", err)
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("VSS_DONE", "Shadow copy created and mounted at %s", snapshotPath)
|
||||||
|
|
||||||
|
return snapshotPath + top
|
||||||
|
}
|
||||||
522
src/duplicacy_shadowcopy_windows.go
Normal file
522
src/duplicacy_shadowcopy_windows.go
Normal file
@@ -0,0 +1,522 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
ole "github.com/gilbertchen/go-ole"
|
||||||
|
)
|
||||||
|
|
||||||
|
//507C37B4-CF5B-4e95-B0AF-14EB9767467E
|
||||||
|
var IID_IVSS_ASYNC = &ole.GUID{0x507C37B4, 0xCF5B, 0x4e95, [8]byte{0xb0, 0xaf, 0x14, 0xeb, 0x97, 0x67, 0x46, 0x7e}}
|
||||||
|
|
||||||
|
type IVSSAsync struct {
|
||||||
|
ole.IUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
type IVSSAsyncVtbl struct {
|
||||||
|
ole.IUnknownVtbl
|
||||||
|
cancel uintptr
|
||||||
|
wait uintptr
|
||||||
|
queryStatus uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (async *IVSSAsync) VTable() *IVSSAsyncVtbl {
|
||||||
|
return (*IVSSAsyncVtbl)(unsafe.Pointer(async.RawVTable))
|
||||||
|
}
|
||||||
|
|
||||||
|
var VSS_S_ASYNC_PENDING int32 = 0x00042309
|
||||||
|
var VSS_S_ASYNC_FINISHED int32 = 0x0004230A
|
||||||
|
var VSS_S_ASYNC_CANCELLED int32 = 0x0004230B
|
||||||
|
|
||||||
|
func (async *IVSSAsync) Wait(seconds int) bool {
|
||||||
|
|
||||||
|
startTime := time.Now().Unix()
|
||||||
|
for {
|
||||||
|
ret, _, _ := syscall.Syscall(async.VTable().wait, 2, uintptr(unsafe.Pointer(async)), uintptr(1000), 0)
|
||||||
|
if ret != 0 {
|
||||||
|
LOG_WARN("IVSSASYNC_WAIT", "IVssAsync::Wait returned %d\n", ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
var status int32
|
||||||
|
ret, _, _ = syscall.Syscall(async.VTable().queryStatus, 3, uintptr(unsafe.Pointer(async)),
|
||||||
|
uintptr(unsafe.Pointer(&status)), 0)
|
||||||
|
if ret != 0 {
|
||||||
|
LOG_WARN("IVSSASYNC_QUERY", "IVssAsync::QueryStatus returned %d\n", ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
if status == VSS_S_ASYNC_FINISHED {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if time.Now().Unix()-startTime > int64(seconds) {
|
||||||
|
LOG_WARN("IVSSASYNC_TIMEOUT", "IVssAsync is pending for more than %d seconds\n", seconds)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIVSSAsync(unknown *ole.IUnknown, iid *ole.GUID) (async *IVSSAsync) {
|
||||||
|
r, _, _ := syscall.Syscall(
|
||||||
|
unknown.VTable().QueryInterface,
|
||||||
|
3,
|
||||||
|
uintptr(unsafe.Pointer(unknown)),
|
||||||
|
uintptr(unsafe.Pointer(iid)),
|
||||||
|
uintptr(unsafe.Pointer(&async)))
|
||||||
|
|
||||||
|
if r != 0 {
|
||||||
|
LOG_WARN("IVSSASYNC_QUERY", "IVSSAsync::QueryInterface returned %d\n", r)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
//665c1d5f-c218-414d-a05d-7fef5f9d5c86
|
||||||
|
var IID_IVSS = &ole.GUID{0x665c1d5f, 0xc218, 0x414d, [8]byte{0xa0, 0x5d, 0x7f, 0xef, 0x5f, 0x9d, 0x5c, 0x86}}
|
||||||
|
|
||||||
|
type IVSS struct {
|
||||||
|
ole.IUnknown
|
||||||
|
}
|
||||||
|
|
||||||
|
type IVSSVtbl struct {
|
||||||
|
ole.IUnknownVtbl
|
||||||
|
getWriterComponentsCount uintptr
|
||||||
|
getWriterComponents uintptr
|
||||||
|
initializeForBackup uintptr
|
||||||
|
setBackupState uintptr
|
||||||
|
initializeForRestore uintptr
|
||||||
|
setRestoreState uintptr
|
||||||
|
gatherWriterMetadata uintptr
|
||||||
|
getWriterMetadataCount uintptr
|
||||||
|
getWriterMetadata uintptr
|
||||||
|
freeWriterMetadata uintptr
|
||||||
|
addComponent uintptr
|
||||||
|
prepareForBackup uintptr
|
||||||
|
abortBackup uintptr
|
||||||
|
gatherWriterStatus uintptr
|
||||||
|
getWriterStatusCount uintptr
|
||||||
|
freeWriterStatus uintptr
|
||||||
|
getWriterStatus uintptr
|
||||||
|
setBackupSucceeded uintptr
|
||||||
|
setBackupOptions uintptr
|
||||||
|
setSelectedForRestore uintptr
|
||||||
|
setRestoreOptions uintptr
|
||||||
|
setAdditionalRestores uintptr
|
||||||
|
setPreviousBackupStamp uintptr
|
||||||
|
saveAsXML uintptr
|
||||||
|
backupComplete uintptr
|
||||||
|
addAlternativeLocationMapping uintptr
|
||||||
|
addRestoreSubcomponent uintptr
|
||||||
|
setFileRestoreStatus uintptr
|
||||||
|
addNewTarget uintptr
|
||||||
|
setRangesFilePath uintptr
|
||||||
|
preRestore uintptr
|
||||||
|
postRestore uintptr
|
||||||
|
setContext uintptr
|
||||||
|
startSnapshotSet uintptr
|
||||||
|
addToSnapshotSet uintptr
|
||||||
|
doSnapshotSet uintptr
|
||||||
|
deleteSnapshots uintptr
|
||||||
|
importSnapshots uintptr
|
||||||
|
breakSnapshotSet uintptr
|
||||||
|
getSnapshotProperties uintptr
|
||||||
|
query uintptr
|
||||||
|
isVolumeSupported uintptr
|
||||||
|
disableWriterClasses uintptr
|
||||||
|
enableWriterClasses uintptr
|
||||||
|
disableWriterInstances uintptr
|
||||||
|
exposeSnapshot uintptr
|
||||||
|
revertToSnapshot uintptr
|
||||||
|
queryRevertStatus uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vss *IVSS) VTable() *IVSSVtbl {
|
||||||
|
return (*IVSSVtbl)(unsafe.Pointer(vss.RawVTable))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vss *IVSS) InitializeForBackup() int {
|
||||||
|
ret, _, _ := syscall.Syscall(vss.VTable().initializeForBackup, 2, uintptr(unsafe.Pointer(vss)), 0, 0)
|
||||||
|
return int(ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vss *IVSS) GatherWriterMetadata() (int, *IVSSAsync) {
|
||||||
|
var unknown *ole.IUnknown
|
||||||
|
ret, _, _ := syscall.Syscall(vss.VTable().gatherWriterMetadata, 2,
|
||||||
|
uintptr(unsafe.Pointer(vss)),
|
||||||
|
uintptr(unsafe.Pointer(&unknown)), 0)
|
||||||
|
|
||||||
|
if ret != 0 {
|
||||||
|
return int(ret), nil
|
||||||
|
} else {
|
||||||
|
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vss *IVSS) StartSnapshotSet(snapshotID *ole.GUID) int {
|
||||||
|
ret, _, _ := syscall.Syscall(vss.VTable().startSnapshotSet, 2,
|
||||||
|
uintptr(unsafe.Pointer(vss)),
|
||||||
|
uintptr(unsafe.Pointer(snapshotID)), 0)
|
||||||
|
return int(ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vss *IVSS) AddToSnapshotSet(drive string, snapshotID *ole.GUID) int {
|
||||||
|
|
||||||
|
volumeName := syscall.StringToUTF16Ptr(drive)
|
||||||
|
|
||||||
|
var ret uintptr
|
||||||
|
if runtime.GOARCH == "386" {
|
||||||
|
// On 32-bit Windows, GUID is passed by value
|
||||||
|
ret, _, _ = syscall.Syscall9(vss.VTable().addToSnapshotSet, 7,
|
||||||
|
uintptr(unsafe.Pointer(vss)),
|
||||||
|
uintptr(unsafe.Pointer(volumeName)),
|
||||||
|
0, 0, 0, 0,
|
||||||
|
uintptr(unsafe.Pointer(snapshotID)), 0, 0)
|
||||||
|
} else {
|
||||||
|
ret, _, _ = syscall.Syscall6(vss.VTable().addToSnapshotSet, 4,
|
||||||
|
uintptr(unsafe.Pointer(vss)),
|
||||||
|
uintptr(unsafe.Pointer(volumeName)),
|
||||||
|
uintptr(unsafe.Pointer(ole.IID_NULL)),
|
||||||
|
uintptr(unsafe.Pointer(snapshotID)), 0, 0)
|
||||||
|
}
|
||||||
|
return int(ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vss *IVSS) SetBackupState() int {
|
||||||
|
VSS_BT_COPY := 5
|
||||||
|
ret, _, _ := syscall.Syscall6(vss.VTable().setBackupState, 4,
|
||||||
|
uintptr(unsafe.Pointer(vss)),
|
||||||
|
0, 0, uintptr(VSS_BT_COPY), 0, 0)
|
||||||
|
return int(ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vss *IVSS) PrepareForBackup() (int, *IVSSAsync) {
|
||||||
|
var unknown *ole.IUnknown
|
||||||
|
ret, _, _ := syscall.Syscall(vss.VTable().prepareForBackup, 2,
|
||||||
|
uintptr(unsafe.Pointer(vss)),
|
||||||
|
uintptr(unsafe.Pointer(&unknown)), 0)
|
||||||
|
|
||||||
|
if ret != 0 {
|
||||||
|
return int(ret), nil
|
||||||
|
} else {
|
||||||
|
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vss *IVSS) DoSnapshotSet() (int, *IVSSAsync) {
|
||||||
|
var unknown *ole.IUnknown
|
||||||
|
ret, _, _ := syscall.Syscall(vss.VTable().doSnapshotSet, 2,
|
||||||
|
uintptr(unsafe.Pointer(vss)),
|
||||||
|
uintptr(unsafe.Pointer(&unknown)), 0)
|
||||||
|
|
||||||
|
if ret != 0 {
|
||||||
|
return int(ret), nil
|
||||||
|
} else {
|
||||||
|
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type SnapshotProperties struct {
|
||||||
|
SnapshotID ole.GUID
|
||||||
|
SnapshotSetID ole.GUID
|
||||||
|
SnapshotsCount uint32
|
||||||
|
SnapshotDeviceObject *uint16
|
||||||
|
OriginalVolumeName *uint16
|
||||||
|
OriginatingMachine *uint16
|
||||||
|
ServiceMachine *uint16
|
||||||
|
ExposedName *uint16
|
||||||
|
ExposedPath *uint16
|
||||||
|
ProviderId ole.GUID
|
||||||
|
SnapshotAttributes uint32
|
||||||
|
CreationTimestamp int64
|
||||||
|
Status int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vss *IVSS) GetSnapshotProperties(snapshotSetID ole.GUID, properties *SnapshotProperties) int {
|
||||||
|
var ret uintptr
|
||||||
|
if runtime.GOARCH == "386" {
|
||||||
|
address := uint(uintptr(unsafe.Pointer(&snapshotSetID)))
|
||||||
|
ret, _, _ = syscall.Syscall6(vss.VTable().getSnapshotProperties, 6,
|
||||||
|
uintptr(unsafe.Pointer(vss)),
|
||||||
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address)))),
|
||||||
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 4)))),
|
||||||
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 8)))),
|
||||||
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 12)))),
|
||||||
|
uintptr(unsafe.Pointer(properties)))
|
||||||
|
} else {
|
||||||
|
ret, _, _ = syscall.Syscall(vss.VTable().getSnapshotProperties, 3,
|
||||||
|
uintptr(unsafe.Pointer(vss)),
|
||||||
|
uintptr(unsafe.Pointer(&snapshotSetID)),
|
||||||
|
uintptr(unsafe.Pointer(properties)))
|
||||||
|
}
|
||||||
|
return int(ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (vss *IVSS) DeleteSnapshots(snapshotID ole.GUID) (int, int, ole.GUID) {
|
||||||
|
|
||||||
|
VSS_OBJECT_SNAPSHOT := 3
|
||||||
|
|
||||||
|
deleted := int32(0)
|
||||||
|
|
||||||
|
var deletedGUID ole.GUID
|
||||||
|
|
||||||
|
var ret uintptr
|
||||||
|
if runtime.GOARCH == "386" {
|
||||||
|
address := uint(uintptr(unsafe.Pointer(&snapshotID)))
|
||||||
|
ret, _, _ = syscall.Syscall9(vss.VTable().deleteSnapshots, 9,
|
||||||
|
uintptr(unsafe.Pointer(vss)),
|
||||||
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address)))),
|
||||||
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 4)))),
|
||||||
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 8)))),
|
||||||
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 12)))),
|
||||||
|
uintptr(VSS_OBJECT_SNAPSHOT),
|
||||||
|
uintptr(1),
|
||||||
|
uintptr(unsafe.Pointer(&deleted)),
|
||||||
|
uintptr(unsafe.Pointer(&deletedGUID)))
|
||||||
|
} else {
|
||||||
|
ret, _, _ = syscall.Syscall6(vss.VTable().deleteSnapshots, 6,
|
||||||
|
uintptr(unsafe.Pointer(vss)),
|
||||||
|
uintptr(unsafe.Pointer(&snapshotID)),
|
||||||
|
uintptr(VSS_OBJECT_SNAPSHOT),
|
||||||
|
uintptr(1),
|
||||||
|
uintptr(unsafe.Pointer(&deleted)),
|
||||||
|
uintptr(unsafe.Pointer(&deletedGUID)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return int(ret), int(deleted), deletedGUID
|
||||||
|
}
|
||||||
|
|
||||||
|
func uint16ArrayToString(p *uint16) string {
|
||||||
|
if p == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
s := make([]uint16, 0)
|
||||||
|
address := uintptr(unsafe.Pointer(p))
|
||||||
|
for {
|
||||||
|
c := *(*uint16)(unsafe.Pointer(address))
|
||||||
|
if c == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
s = append(s, c)
|
||||||
|
address = uintptr(int(address) + 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
return syscall.UTF16ToString(s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIVSS(unknown *ole.IUnknown, iid *ole.GUID) (ivss *IVSS) {
|
||||||
|
r, _, _ := syscall.Syscall(
|
||||||
|
unknown.VTable().QueryInterface,
|
||||||
|
3,
|
||||||
|
uintptr(unsafe.Pointer(unknown)),
|
||||||
|
uintptr(unsafe.Pointer(iid)),
|
||||||
|
uintptr(unsafe.Pointer(&ivss)))
|
||||||
|
|
||||||
|
if r != 0 {
|
||||||
|
LOG_WARN("IVSS_QUERY", "IVSS::QueryInterface returned %d\n", r)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return ivss
|
||||||
|
}
|
||||||
|
|
||||||
|
var vssBackupComponent *IVSS
|
||||||
|
var snapshotID ole.GUID
|
||||||
|
var shadowLink string
|
||||||
|
|
||||||
|
func DeleteShadowCopy() {
|
||||||
|
if vssBackupComponent != nil {
|
||||||
|
defer vssBackupComponent.Release()
|
||||||
|
|
||||||
|
LOG_TRACE("VSS_DELETE", "Deleting the shadow copy used for this backup")
|
||||||
|
ret, _, _ := vssBackupComponent.DeleteSnapshots(snapshotID)
|
||||||
|
if ret != 0 {
|
||||||
|
LOG_WARN("VSS_DELETE", "Failed to delete the shadow copy: %x\n", uint(ret))
|
||||||
|
} else {
|
||||||
|
LOG_INFO("VSS_DELETE", "The shadow copy has been successfully deleted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if shadowLink != "" {
|
||||||
|
err := os.Remove(shadowLink)
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("VSS_SYMLINK", "Failed to remove the symbolic link for the shadow copy: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ole.CoUninitialize()
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadowTop string) {
|
||||||
|
|
||||||
|
if !shadowCopy {
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
if timeoutInSeconds <= 60 {
|
||||||
|
timeoutInSeconds = 60
|
||||||
|
}
|
||||||
|
ole.CoInitialize(0)
|
||||||
|
defer ole.CoUninitialize()
|
||||||
|
|
||||||
|
dllVssApi := syscall.NewLazyDLL("VssApi.dll")
|
||||||
|
procCreateVssBackupComponents :=
|
||||||
|
dllVssApi.NewProc("?CreateVssBackupComponents@@YAJPEAPEAVIVssBackupComponents@@@Z")
|
||||||
|
if runtime.GOARCH == "386" {
|
||||||
|
procCreateVssBackupComponents =
|
||||||
|
dllVssApi.NewProc("?CreateVssBackupComponents@@YGJPAPAVIVssBackupComponents@@@Z")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(top) < 3 || top[1] != ':' || (top[2] != '/' && top[2] != '\\') {
|
||||||
|
LOG_ERROR("VSS_PATH", "Invalid repository path: %s", top)
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
volume := top[:1] + ":\\"
|
||||||
|
|
||||||
|
LOG_INFO("VSS_CREATE", "Creating a shadow copy for %s", volume)
|
||||||
|
|
||||||
|
var unknown *ole.IUnknown
|
||||||
|
r, _, err := procCreateVssBackupComponents.Call(uintptr(unsafe.Pointer(&unknown)))
|
||||||
|
|
||||||
|
if r == 0x80070005 {
|
||||||
|
LOG_ERROR("VSS_CREATE", "Only administrators can create shadow copies")
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
if r != 0 {
|
||||||
|
LOG_ERROR("VSS_CREATE", "Failed to create the VSS backup component: %d", r)
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
vssBackupComponent = getIVSS(unknown, IID_IVSS)
|
||||||
|
if vssBackupComponent == nil {
|
||||||
|
LOG_ERROR("VSS_CREATE", "Failed to create the VSS backup component")
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
ret := vssBackupComponent.InitializeForBackup()
|
||||||
|
if ret != 0 {
|
||||||
|
LOG_ERROR("VSS_INIT", "Shadow copy creation failed: InitializeForBackup returned %x", uint(ret))
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
var async *IVSSAsync
|
||||||
|
ret, async = vssBackupComponent.GatherWriterMetadata()
|
||||||
|
if ret != 0 {
|
||||||
|
LOG_ERROR("VSS_GATHER", "Shadow copy creation failed: GatherWriterMetadata returned %x", uint(ret))
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
if async == nil {
|
||||||
|
LOG_ERROR("VSS_GATHER",
|
||||||
|
"Shadow copy creation failed: GatherWriterMetadata failed to return a valid IVssAsync object")
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
if !async.Wait(timeoutInSeconds) {
|
||||||
|
LOG_ERROR("VSS_GATHER", "Shadow copy creation failed: GatherWriterMetadata didn't finish properly")
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
async.Release()
|
||||||
|
|
||||||
|
var snapshotSetID ole.GUID
|
||||||
|
|
||||||
|
ret = vssBackupComponent.StartSnapshotSet(&snapshotSetID)
|
||||||
|
if ret != 0 {
|
||||||
|
LOG_ERROR("VSS_START", "Shadow copy creation failed: StartSnapshotSet returned %x", uint(ret))
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = vssBackupComponent.AddToSnapshotSet(volume, &snapshotID)
|
||||||
|
if ret != 0 {
|
||||||
|
LOG_ERROR("VSS_ADD", "Shadow copy creation failed: AddToSnapshotSet returned %x", uint(ret))
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
s, _ := ole.StringFromIID(&snapshotID)
|
||||||
|
LOG_DEBUG("VSS_ID", "Creating shadow copy %s", s)
|
||||||
|
|
||||||
|
ret = vssBackupComponent.SetBackupState()
|
||||||
|
if ret != 0 {
|
||||||
|
LOG_ERROR("VSS_SET", "Shadow copy creation failed: SetBackupState returned %x", uint(ret))
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
ret, async = vssBackupComponent.PrepareForBackup()
|
||||||
|
if ret != 0 {
|
||||||
|
LOG_ERROR("VSS_PREPARE", "Shadow copy creation failed: PrepareForBackup returned %x", uint(ret))
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
if async == nil {
|
||||||
|
LOG_ERROR("VSS_PREPARE",
|
||||||
|
"Shadow copy creation failed: PrepareForBackup failed to return a valid IVssAsync object")
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
if !async.Wait(timeoutInSeconds) {
|
||||||
|
LOG_ERROR("VSS_PREPARE", "Shadow copy creation failed: PrepareForBackup didn't finish properly")
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
async.Release()
|
||||||
|
|
||||||
|
ret, async = vssBackupComponent.DoSnapshotSet()
|
||||||
|
if ret != 0 {
|
||||||
|
LOG_ERROR("VSS_SNAPSHOT", "Shadow copy creation failed: DoSnapshotSet returned %x", uint(ret))
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
if async == nil {
|
||||||
|
LOG_ERROR("VSS_SNAPSHOT",
|
||||||
|
"Shadow copy creation failed: DoSnapshotSet failed to return a valid IVssAsync object")
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
if !async.Wait(timeoutInSeconds) {
|
||||||
|
LOG_ERROR("VSS_SNAPSHOT", "Shadow copy creation failed: DoSnapshotSet didn't finish properly")
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
async.Release()
|
||||||
|
|
||||||
|
properties := SnapshotProperties{}
|
||||||
|
|
||||||
|
ret = vssBackupComponent.GetSnapshotProperties(snapshotID, &properties)
|
||||||
|
if ret != 0 {
|
||||||
|
LOG_ERROR("VSS_PROPERTIES", "GetSnapshotProperties returned %x", ret)
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
SnapshotIDString, _ := ole.StringFromIID(&properties.SnapshotID)
|
||||||
|
SnapshotSetIDString, _ := ole.StringFromIID(&properties.SnapshotSetID)
|
||||||
|
|
||||||
|
LOG_DEBUG("VSS_PROPERTY", "SnapshotID: %s", SnapshotIDString)
|
||||||
|
LOG_DEBUG("VSS_PROPERTY", "SnapshotSetID: %s", SnapshotSetIDString)
|
||||||
|
|
||||||
|
LOG_DEBUG("VSS_PROPERTY", "SnapshotDeviceObject: %s", uint16ArrayToString(properties.SnapshotDeviceObject))
|
||||||
|
LOG_DEBUG("VSS_PROPERTY", "OriginalVolumeName: %s", uint16ArrayToString(properties.OriginalVolumeName))
|
||||||
|
LOG_DEBUG("VSS_PROPERTY", "OriginatingMachine: %s", uint16ArrayToString(properties.OriginatingMachine))
|
||||||
|
LOG_DEBUG("VSS_PROPERTY", "OriginatingMachine: %s", uint16ArrayToString(properties.OriginatingMachine))
|
||||||
|
LOG_DEBUG("VSS_PROPERTY", "ServiceMachine: %s", uint16ArrayToString(properties.ServiceMachine))
|
||||||
|
LOG_DEBUG("VSS_PROPERTY", "ExposedName: %s", uint16ArrayToString(properties.ExposedName))
|
||||||
|
LOG_DEBUG("VSS_PROPERTY", "ExposedPath: %s", uint16ArrayToString(properties.ExposedPath))
|
||||||
|
|
||||||
|
LOG_INFO("VSS_DONE", "Shadow copy %s created", SnapshotIDString)
|
||||||
|
|
||||||
|
snapshotPath := uint16ArrayToString(properties.SnapshotDeviceObject)
|
||||||
|
|
||||||
|
preferencePath := GetDuplicacyPreferencePath()
|
||||||
|
shadowLink = preferencePath + "\\shadow"
|
||||||
|
os.Remove(shadowLink)
|
||||||
|
err = os.Symlink(snapshotPath+"\\", shadowLink)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("VSS_SYMLINK", "Failed to create a symbolic link to the shadow copy just created: %v", err)
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
|
return shadowLink + "\\" + top[2:]
|
||||||
|
|
||||||
|
}
|
||||||
491
src/duplicacy_snapshot.go
Normal file
491
src/duplicacy_snapshot.go
Normal file
@@ -0,0 +1,491 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Snapshot represents a backup of the repository.
|
||||||
|
type Snapshot struct {
|
||||||
|
ID string // the snapshot id; must be different for different repositories
|
||||||
|
Revision int // the revision number
|
||||||
|
Options string // options used to create this snapshot (some not included)
|
||||||
|
Tag string // user-assigned tag
|
||||||
|
StartTime int64 // at what time the snapshot was created
|
||||||
|
EndTime int64 // at what time the snapshot was done
|
||||||
|
FileSize int64 // total file size
|
||||||
|
NumberOfFiles int64 // number of files
|
||||||
|
|
||||||
|
// A sequence of chunks whose aggregated content is the json representation of 'Files'.
|
||||||
|
FileSequence []string
|
||||||
|
|
||||||
|
// A sequence of chunks whose aggregated content is the json representation of 'ChunkHashes'.
|
||||||
|
ChunkSequence []string
|
||||||
|
|
||||||
|
// A sequence of chunks whose aggregated content is the json representation of 'ChunkLengths'.
|
||||||
|
LengthSequence []string
|
||||||
|
|
||||||
|
Files []*Entry // list of files and subdirectories
|
||||||
|
|
||||||
|
ChunkHashes []string // a sequence of chunks representing the file content
|
||||||
|
ChunkLengths []int // the length of each chunk
|
||||||
|
|
||||||
|
Flag bool // used to mark certain snapshots for deletion or copy
|
||||||
|
|
||||||
|
discardAttributes bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateEmptySnapshot creates an empty snapshot.
|
||||||
|
func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
|
||||||
|
return &Snapshot{
|
||||||
|
ID: id,
|
||||||
|
Revision: 0,
|
||||||
|
StartTime: time.Now().Unix(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateSnapshotFromDirectory creates a snapshot from the local directory 'top'. Only 'Files'
|
||||||
|
// will be constructed, while 'ChunkHashes' and 'ChunkLengths' can only be populated after uploading.
|
||||||
|
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, filtersFile string, excludeByAttribute bool) (snapshot *Snapshot, skippedDirectories []string,
|
||||||
|
skippedFiles []string, err error) {
|
||||||
|
|
||||||
|
snapshot = &Snapshot{
|
||||||
|
ID: id,
|
||||||
|
Revision: 0,
|
||||||
|
StartTime: time.Now().Unix(),
|
||||||
|
}
|
||||||
|
|
||||||
|
var patterns []string
|
||||||
|
|
||||||
|
if filtersFile == "" {
|
||||||
|
filtersFile = joinPath(GetDuplicacyPreferencePath(), "filters")
|
||||||
|
}
|
||||||
|
patterns = ProcessFilters(filtersFile)
|
||||||
|
|
||||||
|
directories := make([]*Entry, 0, 256)
|
||||||
|
directories = append(directories, CreateEntry("", 0, 0, 0))
|
||||||
|
|
||||||
|
snapshot.Files = make([]*Entry, 0, 256)
|
||||||
|
|
||||||
|
attributeThreshold := 1024 * 1024
|
||||||
|
if attributeThresholdValue, found := os.LookupEnv("DUPLICACY_ATTRIBUTE_THRESHOLD"); found && attributeThresholdValue != "" {
|
||||||
|
attributeThreshold, _ = strconv.Atoi(attributeThresholdValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
for len(directories) > 0 {
|
||||||
|
|
||||||
|
directory := directories[len(directories)-1]
|
||||||
|
directories = directories[:len(directories)-1]
|
||||||
|
snapshot.Files = append(snapshot.Files, directory)
|
||||||
|
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, nobackupFile, snapshot.discardAttributes, excludeByAttribute)
|
||||||
|
if err != nil {
|
||||||
|
if directory.Path == "" {
|
||||||
|
LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err)
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory: %v", err)
|
||||||
|
skippedDirectories = append(skippedDirectories, directory.Path)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
directories = append(directories, subdirectories...)
|
||||||
|
skippedFiles = append(skippedFiles, skipped...)
|
||||||
|
|
||||||
|
if !snapshot.discardAttributes && len(snapshot.Files) > attributeThreshold {
|
||||||
|
LOG_INFO("LIST_ATTRIBUTES", "Discarding file attributes")
|
||||||
|
snapshot.discardAttributes = true
|
||||||
|
for _, file := range snapshot.Files {
|
||||||
|
file.Attributes = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the root entry
|
||||||
|
snapshot.Files = snapshot.Files[1:]
|
||||||
|
|
||||||
|
return snapshot, skippedDirectories, skippedFiles, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func AppendPattern(patterns []string, new_pattern string) (new_patterns []string) {
|
||||||
|
for _, pattern := range patterns {
|
||||||
|
if pattern == new_pattern {
|
||||||
|
LOG_INFO("SNAPSHOT_FILTER", "Ignoring duplicate pattern: %s ...", new_pattern)
|
||||||
|
return patterns
|
||||||
|
}
|
||||||
|
}
|
||||||
|
new_patterns = append(patterns, new_pattern)
|
||||||
|
return new_patterns
|
||||||
|
}
|
||||||
|
func ProcessFilters(filtersFile string) (patterns []string) {
|
||||||
|
patterns = ProcessFilterFile(filtersFile, make([]string, 0))
|
||||||
|
|
||||||
|
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))
|
||||||
|
|
||||||
|
LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
|
||||||
|
|
||||||
|
if IsTracing() {
|
||||||
|
for _, pattern := range patterns {
|
||||||
|
LOG_TRACE("SNAPSHOT_PATTERN", "Pattern: %s", pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return patterns
|
||||||
|
}
|
||||||
|
|
||||||
|
func ProcessFilterFile(patternFile string, includedFiles []string) (patterns []string) {
|
||||||
|
for _, file := range includedFiles {
|
||||||
|
if file == patternFile {
|
||||||
|
// cycle in include mechanism discovered.
|
||||||
|
LOG_ERROR("SNAPSHOT_FILTER", "The filter file %s has already been included", patternFile)
|
||||||
|
return patterns
|
||||||
|
}
|
||||||
|
}
|
||||||
|
includedFiles = append(includedFiles, patternFile)
|
||||||
|
LOG_INFO("SNAPSHOT_FILTER", "Parsing filter file %s", patternFile)
|
||||||
|
patternFileContent, err := ioutil.ReadFile(patternFile)
|
||||||
|
if err == nil {
|
||||||
|
patternFileLines := strings.Split(string(patternFileContent), "\n")
|
||||||
|
patterns = ProcessFilterLines(patternFileLines, includedFiles)
|
||||||
|
}
|
||||||
|
return patterns
|
||||||
|
}
|
||||||
|
|
||||||
|
func ProcessFilterLines(patternFileLines []string, includedFiles []string) (patterns []string) {
|
||||||
|
for _, pattern := range patternFileLines {
|
||||||
|
pattern = strings.TrimSpace(pattern)
|
||||||
|
if len(pattern) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(pattern, "@") {
|
||||||
|
patternIncludeFile := strings.TrimSpace(pattern[1:])
|
||||||
|
if patternIncludeFile == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ! filepath.IsAbs(patternIncludeFile) {
|
||||||
|
basePath := ""
|
||||||
|
if len(includedFiles) == 0 {
|
||||||
|
basePath, _ = os.Getwd()
|
||||||
|
} else {
|
||||||
|
basePath = filepath.Dir(includedFiles[len(includedFiles)-1])
|
||||||
|
}
|
||||||
|
patternIncludeFile = joinPath(basePath, patternIncludeFile)
|
||||||
|
}
|
||||||
|
for _, pattern := range ProcessFilterFile(patternIncludeFile, includedFiles) {
|
||||||
|
patterns = AppendPattern(patterns, pattern)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if pattern[0] == '#' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if IsUnspecifiedFilter(pattern) {
|
||||||
|
pattern = "+" + pattern
|
||||||
|
}
|
||||||
|
|
||||||
|
if IsEmptyFilter(pattern) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
||||||
|
valid, err := IsValidRegex(pattern[2:])
|
||||||
|
if !valid || err != nil {
|
||||||
|
LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
patterns = AppendPattern(patterns, pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
return patterns
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is the struct used to save/load incomplete snapshots
|
||||||
|
type IncompleteSnapshot struct {
|
||||||
|
Files []*Entry
|
||||||
|
ChunkHashes []string
|
||||||
|
ChunkLengths []int
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadIncompleteSnapshot loads the incomplete snapshot if it exists
|
||||||
|
func LoadIncompleteSnapshot() (snapshot *Snapshot) {
|
||||||
|
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
|
||||||
|
description, err := ioutil.ReadFile(snapshotFile)
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("INCOMPLETE_LOCATE", "Failed to locate incomplete snapshot: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var incompleteSnapshot IncompleteSnapshot
|
||||||
|
|
||||||
|
err = json.Unmarshal(description, &incompleteSnapshot)
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("INCOMPLETE_PARSE", "Failed to parse incomplete snapshot: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var chunkHashes []string
|
||||||
|
for _, chunkHash := range incompleteSnapshot.ChunkHashes {
|
||||||
|
hash, err := hex.DecodeString(chunkHash)
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("INCOMPLETE_DECODE", "Failed to decode incomplete snapshot: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
chunkHashes = append(chunkHashes, string(hash))
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot = &Snapshot{
|
||||||
|
Files: incompleteSnapshot.Files,
|
||||||
|
ChunkHashes: chunkHashes,
|
||||||
|
ChunkLengths: incompleteSnapshot.ChunkLengths,
|
||||||
|
}
|
||||||
|
LOG_INFO("INCOMPLETE_LOAD", "Incomplete snapshot loaded from %s", snapshotFile)
|
||||||
|
return snapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveIncompleteSnapshot saves the incomplete snapshot under the preference directory
|
||||||
|
func SaveIncompleteSnapshot(snapshot *Snapshot) {
|
||||||
|
var files []*Entry
|
||||||
|
for _, file := range snapshot.Files {
|
||||||
|
// All unprocessed files will have a size of -1
|
||||||
|
if file.Size >= 0 {
|
||||||
|
file.Attributes = nil
|
||||||
|
files = append(files, file)
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
var chunkHashes []string
|
||||||
|
for _, chunkHash := range snapshot.ChunkHashes {
|
||||||
|
chunkHashes = append(chunkHashes, hex.EncodeToString([]byte(chunkHash)))
|
||||||
|
}
|
||||||
|
|
||||||
|
incompleteSnapshot := IncompleteSnapshot{
|
||||||
|
Files: files,
|
||||||
|
ChunkHashes: chunkHashes,
|
||||||
|
ChunkLengths: snapshot.ChunkLengths,
|
||||||
|
}
|
||||||
|
|
||||||
|
description, err := json.MarshalIndent(incompleteSnapshot, "", " ")
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("INCOMPLETE_ENCODE", "Failed to encode the incomplete snapshot: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
|
||||||
|
err = ioutil.WriteFile(snapshotFile, description, 0644)
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("INCOMPLETE_WRITE", "Failed to save the incomplete snapshot: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("INCOMPLETE_SAVE", "Incomplete snapshot saved to %s", snapshotFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
func RemoveIncompleteSnapshot() {
|
||||||
|
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
|
||||||
|
if stat, err := os.Stat(snapshotFile); err == nil && !stat.IsDir() {
|
||||||
|
err = os.Remove(snapshotFile)
|
||||||
|
if err != nil {
|
||||||
|
LOG_INFO("INCOMPLETE_SAVE", "Failed to remove ncomplete snapshot: %v", err)
|
||||||
|
} else {
|
||||||
|
LOG_INFO("INCOMPLETE_SAVE", "Removed incomplete snapshot %s", snapshotFile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateSnapshotFromDescription creates a snapshot from json decription.
|
||||||
|
func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err error) {
|
||||||
|
|
||||||
|
var root map[string]interface{}
|
||||||
|
|
||||||
|
err = json.Unmarshal(description, &root)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot = &Snapshot{}
|
||||||
|
|
||||||
|
if value, ok := root["id"]; !ok {
|
||||||
|
return nil, fmt.Errorf("No id is specified in the snapshot")
|
||||||
|
} else if snapshot.ID, ok = value.(string); !ok {
|
||||||
|
return nil, fmt.Errorf("Invalid id is specified in the snapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok := root["revision"]; !ok {
|
||||||
|
return nil, fmt.Errorf("No revision is specified in the snapshot")
|
||||||
|
} else if _, ok = value.(float64); !ok {
|
||||||
|
return nil, fmt.Errorf("Invalid revision is specified in the snapshot")
|
||||||
|
} else {
|
||||||
|
snapshot.Revision = int(value.(float64))
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok := root["tag"]; !ok {
|
||||||
|
} else if snapshot.Tag, ok = value.(string); !ok {
|
||||||
|
return nil, fmt.Errorf("Invalid tag is specified in the snapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok := root["options"]; !ok {
|
||||||
|
} else if snapshot.Options, ok = value.(string); !ok {
|
||||||
|
return nil, fmt.Errorf("Invalid options is specified in the snapshot")
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok := root["start_time"]; !ok {
|
||||||
|
return nil, fmt.Errorf("No creation time is specified in the snapshot")
|
||||||
|
} else if _, ok = value.(float64); !ok {
|
||||||
|
return nil, fmt.Errorf("Invalid creation time is specified in the snapshot")
|
||||||
|
} else {
|
||||||
|
snapshot.StartTime = int64(value.(float64))
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok := root["end_time"]; !ok {
|
||||||
|
return nil, fmt.Errorf("No creation time is specified in the snapshot")
|
||||||
|
} else if _, ok = value.(float64); !ok {
|
||||||
|
return nil, fmt.Errorf("Invalid creation time is specified in the snapshot")
|
||||||
|
} else {
|
||||||
|
snapshot.EndTime = int64(value.(float64))
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok := root["file_size"]; ok {
|
||||||
|
if _, ok = value.(float64); ok {
|
||||||
|
snapshot.FileSize = int64(value.(float64))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if value, ok := root["number_of_files"]; ok {
|
||||||
|
if _, ok = value.(float64); ok {
|
||||||
|
snapshot.NumberOfFiles = int64(value.(float64))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, sequenceType := range []string{"files", "chunks", "lengths"} {
|
||||||
|
if value, ok := root[sequenceType]; !ok {
|
||||||
|
return nil, fmt.Errorf("No %s are specified in the snapshot", sequenceType)
|
||||||
|
} else if _, ok = value.([]interface{}); !ok {
|
||||||
|
return nil, fmt.Errorf("Invalid %s are specified in the snapshot", sequenceType)
|
||||||
|
} else {
|
||||||
|
array := value.([]interface{})
|
||||||
|
sequence := make([]string, len(array))
|
||||||
|
for i := 0; i < len(array); i++ {
|
||||||
|
if hashInHex, ok := array[i].(string); !ok {
|
||||||
|
return nil, fmt.Errorf("Invalid file sequence is specified in the snapshot")
|
||||||
|
} else if hash, err := hex.DecodeString(hashInHex); err != nil {
|
||||||
|
return nil, fmt.Errorf("Hash %s is not a valid hex string in the snapshot", hashInHex)
|
||||||
|
} else {
|
||||||
|
sequence[i] = string(hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot.SetSequence(sequenceType, sequence)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return snapshot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadChunks construct 'ChunkHashes' from the json description.
|
||||||
|
func (snapshot *Snapshot) LoadChunks(description []byte) (err error) {
|
||||||
|
|
||||||
|
var root []interface{}
|
||||||
|
err = json.Unmarshal(description, &root)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot.ChunkHashes = make([]string, len(root))
|
||||||
|
|
||||||
|
for i, object := range root {
|
||||||
|
if hashInHex, ok := object.(string); !ok {
|
||||||
|
return fmt.Errorf("Invalid chunk hash is specified in the snapshot")
|
||||||
|
} else if hash, err := hex.DecodeString(hashInHex); err != nil {
|
||||||
|
return fmt.Errorf("The chunk hash %s is not a valid hex string", hashInHex)
|
||||||
|
} else {
|
||||||
|
snapshot.ChunkHashes[i] = string(hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// ClearChunks removes loaded chunks from memory
|
||||||
|
func (snapshot *Snapshot) ClearChunks() {
|
||||||
|
snapshot.ChunkHashes = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// LoadLengths construct 'ChunkLengths' from the json description.
|
||||||
|
func (snapshot *Snapshot) LoadLengths(description []byte) (err error) {
|
||||||
|
return json.Unmarshal(description, &snapshot.ChunkLengths)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON creates a json representation of the snapshot.
|
||||||
|
func (snapshot *Snapshot) MarshalJSON() ([]byte, error) {
|
||||||
|
|
||||||
|
object := make(map[string]interface{})
|
||||||
|
|
||||||
|
object["id"] = snapshot.ID
|
||||||
|
object["revision"] = snapshot.Revision
|
||||||
|
object["options"] = snapshot.Options
|
||||||
|
object["tag"] = snapshot.Tag
|
||||||
|
object["start_time"] = snapshot.StartTime
|
||||||
|
object["end_time"] = snapshot.EndTime
|
||||||
|
|
||||||
|
if snapshot.FileSize != 0 && snapshot.NumberOfFiles != 0 {
|
||||||
|
object["file_size"] = snapshot.FileSize
|
||||||
|
object["number_of_files"] = snapshot.NumberOfFiles
|
||||||
|
}
|
||||||
|
object["files"] = encodeSequence(snapshot.FileSequence)
|
||||||
|
object["chunks"] = encodeSequence(snapshot.ChunkSequence)
|
||||||
|
object["lengths"] = encodeSequence(snapshot.LengthSequence)
|
||||||
|
|
||||||
|
return json.Marshal(object)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalSequence creates a json represetion for the specified chunk sequence.
|
||||||
|
func (snapshot *Snapshot) MarshalSequence(sequenceType string) ([]byte, error) {
|
||||||
|
|
||||||
|
if sequenceType == "files" {
|
||||||
|
return json.Marshal(snapshot.Files)
|
||||||
|
} else if sequenceType == "chunks" {
|
||||||
|
return json.Marshal(encodeSequence(snapshot.ChunkHashes))
|
||||||
|
} else {
|
||||||
|
return json.Marshal(snapshot.ChunkLengths)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSequence assign a chunk sequence to the specified field.
|
||||||
|
func (snapshot *Snapshot) SetSequence(sequenceType string, sequence []string) {
|
||||||
|
if sequenceType == "files" {
|
||||||
|
snapshot.FileSequence = sequence
|
||||||
|
} else if sequenceType == "chunks" {
|
||||||
|
snapshot.ChunkSequence = sequence
|
||||||
|
} else {
|
||||||
|
snapshot.LengthSequence = sequence
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeSequence turns a sequence of binary hashes into a sequence of hex hashes.
|
||||||
|
func encodeSequence(sequence []string) []string {
|
||||||
|
|
||||||
|
sequenceInHex := make([]string, len(sequence))
|
||||||
|
|
||||||
|
for i, hash := range sequence {
|
||||||
|
sequenceInHex[i] = hex.EncodeToString([]byte(hash))
|
||||||
|
}
|
||||||
|
|
||||||
|
return sequenceInHex
|
||||||
|
}
|
||||||
2655
src/duplicacy_snapshotmanager.go
Normal file
2655
src/duplicacy_snapshotmanager.go
Normal file
File diff suppressed because it is too large
Load Diff
687
src/duplicacy_snapshotmanager_test.go
Normal file
687
src/duplicacy_snapshotmanager_test.go
Normal file
@@ -0,0 +1,687 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot {
|
||||||
|
return &Snapshot{
|
||||||
|
ID: snapshotID,
|
||||||
|
Revision: revision,
|
||||||
|
EndTime: endTime,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIsDeletable(t *testing.T) {
|
||||||
|
|
||||||
|
//SetLoggingLevel(DEBUG)
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
day := int64(3600 * 24)
|
||||||
|
|
||||||
|
allSnapshots := make(map[string][]*Snapshot)
|
||||||
|
allSnapshots["host1"] = append([]*Snapshot{}, createDummySnapshot("host1", 1, now-2*day))
|
||||||
|
allSnapshots["host2"] = append([]*Snapshot{}, createDummySnapshot("host2", 1, now-2*day))
|
||||||
|
allSnapshots["host1"] = append(allSnapshots["host1"], createDummySnapshot("host1", 2, now-1*day))
|
||||||
|
allSnapshots["host2"] = append(allSnapshots["host2"], createDummySnapshot("host2", 2, now-1*day))
|
||||||
|
|
||||||
|
collection := &FossilCollection{
|
||||||
|
EndTime: now - day - 3600,
|
||||||
|
LastRevisions: make(map[string]int),
|
||||||
|
}
|
||||||
|
|
||||||
|
collection.LastRevisions["host1"] = 1
|
||||||
|
collection.LastRevisions["host2"] = 1
|
||||||
|
|
||||||
|
isDeletable, newSnapshots := collection.IsDeletable(true, nil, allSnapshots)
|
||||||
|
if !isDeletable || len(newSnapshots) != 2 {
|
||||||
|
t.Errorf("Scenario 1: should be deletable, 2 new snapshots")
|
||||||
|
}
|
||||||
|
|
||||||
|
collection.LastRevisions["host3"] = 1
|
||||||
|
allSnapshots["host3"] = append([]*Snapshot{}, createDummySnapshot("host3", 1, now-2*day))
|
||||||
|
|
||||||
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
|
if isDeletable {
|
||||||
|
t.Errorf("Scenario 2: should not be deletable")
|
||||||
|
}
|
||||||
|
|
||||||
|
allSnapshots["host3"] = append(allSnapshots["host3"], createDummySnapshot("host3", 2, now-day))
|
||||||
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
|
if !isDeletable || len(newSnapshots) != 3 {
|
||||||
|
t.Errorf("Scenario 3: should be deletable, 3 new snapshots")
|
||||||
|
}
|
||||||
|
|
||||||
|
collection.LastRevisions["host4"] = 1
|
||||||
|
allSnapshots["host4"] = append([]*Snapshot{}, createDummySnapshot("host4", 1, now-8*day))
|
||||||
|
|
||||||
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
|
if !isDeletable || len(newSnapshots) != 3 {
|
||||||
|
t.Errorf("Scenario 4: should be deletable, 3 new snapshots")
|
||||||
|
}
|
||||||
|
|
||||||
|
collection.LastRevisions["repository1@host5"] = 1
|
||||||
|
allSnapshots["repository1@host5"] = append([]*Snapshot{}, createDummySnapshot("repository1@host5", 1, now-3*day))
|
||||||
|
|
||||||
|
collection.LastRevisions["repository2@host5"] = 1
|
||||||
|
allSnapshots["repository2@host5"] = append([]*Snapshot{}, createDummySnapshot("repository2@host5", 1, now-2*day))
|
||||||
|
|
||||||
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
|
if isDeletable {
|
||||||
|
t.Errorf("Scenario 5: should not be deletable")
|
||||||
|
}
|
||||||
|
|
||||||
|
allSnapshots["repository1@host5"] = append(allSnapshots["repository1@host5"], createDummySnapshot("repository1@host5", 2, now-day))
|
||||||
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
|
if !isDeletable || len(newSnapshots) != 4 {
|
||||||
|
t.Errorf("Scenario 6: should be deletable, 4 new snapshots")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestSnapshotManager(testDir string) *SnapshotManager {
|
||||||
|
|
||||||
|
os.RemoveAll(testDir)
|
||||||
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
|
storage, _ := CreateFileStorage(testDir, false, 1)
|
||||||
|
storage.CreateDirectory(0, "chunks")
|
||||||
|
storage.CreateDirectory(0, "snapshots")
|
||||||
|
config := CreateConfig()
|
||||||
|
snapshotManager := CreateSnapshotManager(config, storage)
|
||||||
|
|
||||||
|
cacheDir := path.Join(testDir, "cache")
|
||||||
|
snapshotCache, _ := CreateFileStorage(cacheDir, false, 1)
|
||||||
|
snapshotCache.CreateDirectory(0, "chunks")
|
||||||
|
snapshotCache.CreateDirectory(0, "snapshots")
|
||||||
|
|
||||||
|
snapshotManager.snapshotCache = snapshotCache
|
||||||
|
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/.duplicacy")
|
||||||
|
|
||||||
|
return snapshotManager
|
||||||
|
}
|
||||||
|
|
||||||
|
func uploadTestChunk(manager *SnapshotManager, content []byte) string {
|
||||||
|
|
||||||
|
completionFunc := func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
|
||||||
|
LOG_INFO("UPLOAD_CHUNK", "Chunk %s size %d uploaded", chunk.GetID(), chunkSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, testThreads, nil)
|
||||||
|
chunkUploader.completionFunc = completionFunc
|
||||||
|
chunkUploader.Start()
|
||||||
|
|
||||||
|
chunk := CreateChunk(manager.config, true)
|
||||||
|
chunk.Reset(true)
|
||||||
|
chunk.Write(content)
|
||||||
|
chunkUploader.StartChunk(chunk, 0)
|
||||||
|
chunkUploader.Stop()
|
||||||
|
|
||||||
|
return chunk.GetHash()
|
||||||
|
}
|
||||||
|
|
||||||
|
func uploadRandomChunk(manager *SnapshotManager, chunkSize int) string {
|
||||||
|
content := make([]byte, chunkSize)
|
||||||
|
_, err := rand.Read(content)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("UPLOAD_RANDOM", "Error generating random content: %v", err)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return uploadTestChunk(manager, content)
|
||||||
|
}
|
||||||
|
|
||||||
|
func uploadRandomChunks(manager *SnapshotManager, chunkSize int, numberOfChunks int) []string {
|
||||||
|
chunkList := make([]string, 0)
|
||||||
|
for i := 0; i < numberOfChunks; i++ {
|
||||||
|
chunkHash := uploadRandomChunk(manager, chunkSize)
|
||||||
|
chunkList = append(chunkList, chunkHash)
|
||||||
|
}
|
||||||
|
return chunkList
|
||||||
|
}
|
||||||
|
|
||||||
|
func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision int, startTime int64, endTime int64, chunkHashes []string, tag string) {
|
||||||
|
|
||||||
|
snapshot := &Snapshot{
|
||||||
|
ID: snapshotID,
|
||||||
|
Revision: revision,
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: endTime,
|
||||||
|
ChunkHashes: chunkHashes,
|
||||||
|
Tag: tag,
|
||||||
|
}
|
||||||
|
|
||||||
|
var chunkHashesInHex []string
|
||||||
|
for _, chunkHash := range chunkHashes {
|
||||||
|
chunkHashesInHex = append(chunkHashesInHex, hex.EncodeToString([]byte(chunkHash)))
|
||||||
|
}
|
||||||
|
|
||||||
|
sequence, _ := json.Marshal(chunkHashesInHex)
|
||||||
|
snapshot.ChunkSequence = []string{uploadTestChunk(manager, sequence)}
|
||||||
|
|
||||||
|
description, _ := snapshot.MarshalJSON()
|
||||||
|
path := fmt.Sprintf("snapshots/%s/%d", snapshotID, snapshot.Revision)
|
||||||
|
manager.storage.CreateDirectory(0, "snapshots/"+snapshotID)
|
||||||
|
manager.UploadFile(path, path, description)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkTestSnapshots(manager *SnapshotManager, expectedSnapshots int, expectedFossils int) {
|
||||||
|
|
||||||
|
var snapshotIDs []string
|
||||||
|
var err error
|
||||||
|
|
||||||
|
chunks := make(map[string]bool)
|
||||||
|
files, _ := manager.ListAllFiles(manager.storage, "chunks/")
|
||||||
|
for _, file := range files {
|
||||||
|
if file[len(file)-1] == '/' {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
chunk := strings.Replace(file, "/", "", -1)
|
||||||
|
chunks[chunk] = false
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshotIDs, err = manager.ListSnapshotIDs()
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
numberOfSnapshots := 0
|
||||||
|
|
||||||
|
for _, snapshotID := range snapshotIDs {
|
||||||
|
|
||||||
|
revisions, err := manager.ListSnapshotRevisions(snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", snapshotID, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, revision := range revisions {
|
||||||
|
snapshot := manager.DownloadSnapshot(snapshotID, revision)
|
||||||
|
numberOfSnapshots++
|
||||||
|
|
||||||
|
for _, chunk := range manager.GetSnapshotChunks(snapshot, false) {
|
||||||
|
chunks[chunk] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
numberOfFossils := 0
|
||||||
|
for chunk, referenced := range chunks {
|
||||||
|
if !referenced {
|
||||||
|
LOG_INFO("UNREFERENCED_CHUNK", "Unreferenced chunk %s", chunk)
|
||||||
|
numberOfFossils++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if numberOfSnapshots != expectedSnapshots {
|
||||||
|
LOG_ERROR("SNAPSHOT_COUNT", "Expecting %d snapshots, got %d instead", expectedSnapshots, numberOfSnapshots)
|
||||||
|
}
|
||||||
|
|
||||||
|
if numberOfFossils != expectedFossils {
|
||||||
|
LOG_ERROR("FOSSIL_COUNT", "Expecting %d unreferenced chunks, got %d instead", expectedFossils, numberOfFossils)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPruneSingleRepository(t *testing.T) {
|
||||||
|
|
||||||
|
setTestingT(t)
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
|
chunkSize := 1024
|
||||||
|
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
day := int64(24 * 3600)
|
||||||
|
t.Logf("Creating 2 snapshots")
|
||||||
|
createTestSnapshot(snapshotManager, "repository1", 1, now-4*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
|
||||||
|
createTestSnapshot(snapshotManager, "repository1", 2, now-4*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
|
t.Logf("Creating 2 snapshots")
|
||||||
|
createTestSnapshot(snapshotManager, "repository1", 3, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
|
||||||
|
createTestSnapshot(snapshotManager, "repository1", 4, now-1*day-3600, now-1*day-60, []string{chunkHash3, chunkHash4}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 4, 0)
|
||||||
|
|
||||||
|
t.Logf("Removing snapshot repository1 revisions 1 and 2 with --exclusive")
|
||||||
|
snapshotManager.PruneSnapshots("repository1", "repository1", []int{1, 2}, []string{}, []string{}, false, true, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
|
|
||||||
|
t.Logf("Removing snapshot repository1 revision 3 without --exclusive")
|
||||||
|
snapshotManager.PruneSnapshots("repository1", "repository1", []int{3}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 1, 2)
|
||||||
|
|
||||||
|
t.Logf("Creating 1 snapshot")
|
||||||
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
createTestSnapshot(snapshotManager, "repository1", 5, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
|
snapshotManager.PruneSnapshots("repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPruneSingleHost(t *testing.T) {
|
||||||
|
|
||||||
|
setTestingT(t)
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
|
chunkSize := 1024
|
||||||
|
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
day := int64(24 * 3600)
|
||||||
|
t.Logf("Creating 3 snapshots")
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
|
||||||
|
createTestSnapshot(snapshotManager, "vm2@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
|
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
|
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
|
t.Logf("Creating 1 snapshot")
|
||||||
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
createTestSnapshot(snapshotManager, "vm2@host1", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPruneMultipleHost(t *testing.T) {
|
||||||
|
|
||||||
|
setTestingT(t)
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
|
chunkSize := 1024
|
||||||
|
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
day := int64(24 * 3600)
|
||||||
|
t.Logf("Creating 3 snapshot")
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
|
||||||
|
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
|
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
|
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
|
t.Logf("Creating 1 snapshot")
|
||||||
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
createTestSnapshot(snapshotManager, "vm2@host2", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
|
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
|
t.Logf("Creating 1 snapshot")
|
||||||
|
chunkHash6 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash5, chunkHash6}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 4, 2)
|
||||||
|
|
||||||
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 4, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPruneAndResurrect(t *testing.T) {
|
||||||
|
|
||||||
|
setTestingT(t)
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
|
chunkSize := 1024
|
||||||
|
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
day := int64(24 * 3600)
|
||||||
|
t.Logf("Creating 2 snapshots")
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
|
|
||||||
|
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 1, 2)
|
||||||
|
|
||||||
|
t.Logf("Creating 1 snapshot")
|
||||||
|
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 4, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash1}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
|
t.Logf("Prune without removing any snapshots -- one fossil will be resurrected")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPruneWithInactiveHost(t *testing.T) {
|
||||||
|
|
||||||
|
setTestingT(t)
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
|
chunkSize := 1024
|
||||||
|
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
day := int64(24 * 3600)
|
||||||
|
t.Logf("Creating 3 snapshot")
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
|
||||||
|
// Host2 is inactive
|
||||||
|
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-7*day-3600, now-7*day-60, []string{chunkHash3, chunkHash4}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
|
t.Logf("Removing snapshot vm1@host1 revision 1")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
|
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
|
t.Logf("Creating 1 snapshot")
|
||||||
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPruneWithRetentionPolicy(t *testing.T) {
|
||||||
|
|
||||||
|
setTestingT(t)
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
|
chunkSize := 1024
|
||||||
|
var chunkHashes []string
|
||||||
|
for i := 0; i < 30; i++ {
|
||||||
|
chunkHashes = append(chunkHashes, uploadRandomChunk(snapshotManager, chunkSize))
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
day := int64(24 * 3600)
|
||||||
|
t.Logf("Creating 30 snapshots")
|
||||||
|
for i := 0; i < 30; i++ {
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]}, "tag")
|
||||||
|
}
|
||||||
|
|
||||||
|
checkTestSnapshots(snapshotManager, 30, 0)
|
||||||
|
|
||||||
|
t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 19, 0)
|
||||||
|
|
||||||
|
t.Logf("Removing snapshot vm1@host1 -k 0:20 with --exclusive")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 19, 0)
|
||||||
|
|
||||||
|
t.Logf("Removing snapshot vm1@host1 -k 3:14 -k 2:7 with --exclusive")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 12, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPruneWithRetentionPolicyAndTag(t *testing.T) {
|
||||||
|
|
||||||
|
setTestingT(t)
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
|
chunkSize := 1024
|
||||||
|
var chunkHashes []string
|
||||||
|
for i := 0; i < 30; i++ {
|
||||||
|
chunkHashes = append(chunkHashes, uploadRandomChunk(snapshotManager, chunkSize))
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
day := int64(24 * 3600)
|
||||||
|
t.Logf("Creating 30 snapshots")
|
||||||
|
for i := 0; i < 30; i++ {
|
||||||
|
tag := "auto"
|
||||||
|
if i%3 == 0 {
|
||||||
|
tag = "manual"
|
||||||
|
}
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]}, tag)
|
||||||
|
}
|
||||||
|
|
||||||
|
checkTestSnapshots(snapshotManager, 30, 0)
|
||||||
|
|
||||||
|
t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive and --tag manual")
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{"manual"}, []string{"0:7"}, false, true, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 22, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that an unreferenced fossil shouldn't be removed as it may be the result of another prune job in-progress.
|
||||||
|
func TestPruneWithFossils(t *testing.T) {
|
||||||
|
setTestingT(t)
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
|
chunkSize := 1024
|
||||||
|
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
// Create an unreferenced fossil
|
||||||
|
snapshotManager.storage.UploadFile(0, "chunks/113b6a2350dcfd836829c47304dd330fa6b58b93dd7ac696c6b7b913e6868662.fsl", []byte("this is a test fossil"))
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
day := int64(24 * 3600)
|
||||||
|
t.Logf("Creating 2 snapshots")
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 1)
|
||||||
|
|
||||||
|
t.Logf("Prune without removing any snapshots but with --exhaustive")
|
||||||
|
// The unreferenced fossil shouldn't be removed
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, true, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 1)
|
||||||
|
|
||||||
|
t.Logf("Prune without removing any snapshots but with --exclusive")
|
||||||
|
// Now the unreferenced fossil should be removed
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, true, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPruneMultipleThread(t *testing.T) {
|
||||||
|
|
||||||
|
setTestingT(t)
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
|
chunkSize := 1024
|
||||||
|
numberOfChunks := 256
|
||||||
|
numberOfThreads := 4
|
||||||
|
|
||||||
|
chunkList1 := uploadRandomChunks(snapshotManager, chunkSize, numberOfChunks)
|
||||||
|
chunkList2 := uploadRandomChunks(snapshotManager, chunkSize, numberOfChunks)
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
day := int64(24 * 3600)
|
||||||
|
t.Logf("Creating 2 snapshots")
|
||||||
|
createTestSnapshot(snapshotManager, "repository1", 1, now-4*day-3600, now-3*day-60, chunkList1, "tag")
|
||||||
|
createTestSnapshot(snapshotManager, "repository1", 2, now-3*day-3600, now-2*day-60, chunkList2, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
|
|
||||||
|
t.Logf("Removing snapshot revisions 1 with --exclusive")
|
||||||
|
snapshotManager.PruneSnapshots("repository1", "repository1", []int{1}, []string{}, []string{}, false, true, []string{}, false, false, false, numberOfThreads)
|
||||||
|
checkTestSnapshots(snapshotManager, 1, 0)
|
||||||
|
|
||||||
|
t.Logf("Creating 1 more snapshot")
|
||||||
|
chunkList3 := uploadRandomChunks(snapshotManager, chunkSize, numberOfChunks)
|
||||||
|
createTestSnapshot(snapshotManager, "repository1", 3, now-2*day-3600, now-1*day-60, chunkList3, "tag")
|
||||||
|
|
||||||
|
t.Logf("Removing snapshot repository1 revision 2 without --exclusive")
|
||||||
|
snapshotManager.PruneSnapshots("repository1", "repository1", []int{2}, []string{}, []string{}, false, false, []string{}, false, false, false, numberOfThreads)
|
||||||
|
|
||||||
|
t.Logf("Prune without removing any snapshots but with --exclusive")
|
||||||
|
snapshotManager.PruneSnapshots("repository1", "repository1", []int{}, []string{}, []string{}, false, true, []string{}, false, false, false, numberOfThreads)
|
||||||
|
checkTestSnapshots(snapshotManager, 1, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A snapshot not seen by a fossil collection should always be consider a new snapshot in the fossil deletion step
|
||||||
|
func TestPruneNewSnapshots(t *testing.T) {
|
||||||
|
setTestingT(t)
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
|
chunkSize := 1024
|
||||||
|
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
day := int64(24 * 3600)
|
||||||
|
t.Logf("Creating 3 snapshots")
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
|
||||||
|
createTestSnapshot(snapshotManager, "vm2@host1", 1, now-2*day-3600, now-2*day-60, []string{chunkHash3, chunkHash4}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
|
t.Logf("Prune snapshot 1")
|
||||||
|
// chunkHash1 should be marked as fossil
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
// Create another snapshot of vm1 that brings back chunkHash1
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 3, now-0*day-3600, now-0*day-60, []string{chunkHash1, chunkHash3}, "tag")
|
||||||
|
// Create another snapshot of vm2 so the fossil collection will be processed by next prune
|
||||||
|
createTestSnapshot(snapshotManager, "vm2@host1", 2, now+3600, now+3600*2, []string{chunkHash4, chunkHash5}, "tag")
|
||||||
|
|
||||||
|
// Now chunkHash1 wil be resurrected
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 4, 0)
|
||||||
|
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false, false, 1, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A fossil collection left by an aborted prune should be ignored if any supposedly deleted snapshot exists
|
||||||
|
func TestPruneGhostSnapshots(t *testing.T) {
|
||||||
|
setTestingT(t)
|
||||||
|
|
||||||
|
EnableStackTrace()
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
|
chunkSize := 1024
|
||||||
|
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
|
||||||
|
now := time.Now().Unix()
|
||||||
|
day := int64(24 * 3600)
|
||||||
|
t.Logf("Creating 2 snapshots")
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
|
|
||||||
|
snapshot1, err := ioutil.ReadFile(path.Join(testDir, "snapshots", "vm1@host1", "1"))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to read snapshot file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Prune snapshot 1")
|
||||||
|
// chunkHash1 should be marked as fossil
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 1, 2)
|
||||||
|
|
||||||
|
// Recover the snapshot file for revision 1; this is to simulate a scenario where prune may encounter a network error after
|
||||||
|
// leaving the fossil collection but before deleting any snapshots.
|
||||||
|
err = ioutil.WriteFile(path.Join(testDir, "snapshots", "vm1@host1", "1"), snapshot1, 0644)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to write snapshot file: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create another snapshot of vm1 so the fossil collection becomes eligible for processing.
|
||||||
|
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 3, now-day-3600, now-day-60, []string{chunkHash3, chunkHash4}, "tag")
|
||||||
|
|
||||||
|
// Run the prune again but the fossil collection should be igored, since revision 1 still exists
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, false, true /*searchFossils*/, false, 1, false)
|
||||||
|
|
||||||
|
// Prune snapshot 1 again
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
|
// Create another snapshot
|
||||||
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
createTestSnapshot(snapshotManager, "vm1@host1", 4, now+3600, now+3600*2, []string{chunkHash5, chunkHash5}, "tag")
|
||||||
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
|
// Run the prune again and this time the fossil collection will be processed and the fossils removed
|
||||||
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false, false, 1, false)
|
||||||
|
}
|
||||||
722
src/duplicacy_storage.go
Normal file
722
src/duplicacy_storage.go
Normal file
@@ -0,0 +1,722 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
|
"golang.org/x/crypto/ssh/agent"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Storage interface {
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
|
||||||
|
// a size of 0. If 'dir' is 'snapshots', only subdirectories will be returned. If 'dir' is 'snapshots/repository_id', then only
|
||||||
|
// files will be returned. If 'dir' is 'chunks', the implementation can return the list either recusively or non-recusively.
|
||||||
|
ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error)
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
DeleteFile(threadIndex int, filePath string) (err error)
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
MoveFile(threadIndex int, from string, to string) (err error)
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
CreateDirectory(threadIndex int, dir string) (err error)
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error)
|
||||||
|
|
||||||
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||||
|
// the suffix '.fsl'.
|
||||||
|
FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error)
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error)
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
UploadFile(threadIndex int, filePath string, content []byte) (err error)
|
||||||
|
|
||||||
|
// SetNestingLevels sets up the chunk nesting structure.
|
||||||
|
SetNestingLevels(config *Config)
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
IsCacheNeeded() bool
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
IsMoveFileImplemented() bool
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
IsStrongConsistent() bool
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
IsFastListing() bool
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
EnableTestMode()
|
||||||
|
|
||||||
|
// Set the maximum transfer speeds.
|
||||||
|
SetRateLimits(downloadRateLimit int, uploadRateLimit int)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StorageBase is the base struct from which all storages are derived from
|
||||||
|
type StorageBase struct {
|
||||||
|
DownloadRateLimit int // Maximum download rate (bytes/seconds)
|
||||||
|
UploadRateLimit int // Maximum upload reate (bytes/seconds)
|
||||||
|
|
||||||
|
DerivedStorage Storage // Used as the pointer to the derived storage class
|
||||||
|
|
||||||
|
readLevels []int // At which nesting level to find the chunk with the given id
|
||||||
|
writeLevel int // Store the uploaded chunk to this level
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRateLimits sets the maximum download and upload rates
|
||||||
|
func (storage *StorageBase) SetRateLimits(downloadRateLimit int, uploadRateLimit int) {
|
||||||
|
storage.DownloadRateLimit = downloadRateLimit
|
||||||
|
storage.UploadRateLimit = uploadRateLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDefaultNestingLevels sets the default read and write levels. This is usually called by
|
||||||
|
// derived storages to set the levels with old values so that storages initialized by earlier versions
|
||||||
|
// will continue to work.
|
||||||
|
func (storage *StorageBase) SetDefaultNestingLevels(readLevels []int, writeLevel int) {
|
||||||
|
storage.readLevels = readLevels
|
||||||
|
storage.writeLevel = writeLevel
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNestingLevels sets the new read and write levels (normally both at 1) if the 'config' file has
|
||||||
|
// the 'fixed-nesting' key, or if a file named 'nesting' exists on the storage.
|
||||||
|
func (storage *StorageBase) SetNestingLevels(config *Config) {
|
||||||
|
|
||||||
|
// 'FixedNesting' is true only for the 'config' file with the new format (2.0.10+)
|
||||||
|
if config.FixedNesting {
|
||||||
|
|
||||||
|
storage.readLevels = nil
|
||||||
|
|
||||||
|
// Check if the 'nesting' file exist
|
||||||
|
exist, _, _, err := storage.DerivedStorage.GetFileInfo(0, "nesting")
|
||||||
|
if err == nil && exist {
|
||||||
|
nestingFile := CreateChunk(CreateConfig(), true)
|
||||||
|
if storage.DerivedStorage.DownloadFile(0, "nesting", nestingFile) == nil {
|
||||||
|
var nesting struct {
|
||||||
|
ReadLevels []int `json:"read-levels"`
|
||||||
|
WriteLevel int `json:"write-level"`
|
||||||
|
}
|
||||||
|
if json.Unmarshal(nestingFile.GetBytes(), &nesting) == nil {
|
||||||
|
storage.readLevels = nesting.ReadLevels
|
||||||
|
storage.writeLevel = nesting.WriteLevel
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(storage.readLevels) == 0 {
|
||||||
|
storage.readLevels = []int{1}
|
||||||
|
storage.writeLevel = 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_DEBUG("STORAGE_NESTING", "Chunk read levels: %v, write level: %d", storage.readLevels, storage.writeLevel)
|
||||||
|
for _, level := range storage.readLevels {
|
||||||
|
if storage.writeLevel == level {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LOG_ERROR("STORAGE_NESTING", "The write level %d isn't in the read levels %v", storage.readLevels, storage.writeLevel)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindChunk finds the chunk with the specified id at the levels one by one as specified by 'readLevels'.
|
||||||
|
func (storage *StorageBase) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||||
|
chunkPaths := make([]string, 0)
|
||||||
|
for _, level := range storage.readLevels {
|
||||||
|
chunkPath := "chunks/"
|
||||||
|
for i := 0; i < level; i++ {
|
||||||
|
chunkPath += chunkID[2*i:2*i+2] + "/"
|
||||||
|
}
|
||||||
|
chunkPath += chunkID[2*level:]
|
||||||
|
if isFossil {
|
||||||
|
chunkPath += ".fsl"
|
||||||
|
}
|
||||||
|
exist, _, size, err = storage.DerivedStorage.GetFileInfo(threadIndex, chunkPath)
|
||||||
|
if err == nil && exist {
|
||||||
|
return chunkPath, exist, size, err
|
||||||
|
}
|
||||||
|
chunkPaths = append(chunkPaths, chunkPath)
|
||||||
|
}
|
||||||
|
for i, level := range storage.readLevels {
|
||||||
|
if storage.writeLevel == level {
|
||||||
|
return chunkPaths[i], false, 0, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", false, 0, fmt.Errorf("Invalid chunk nesting setup")
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkHostKey(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||||
|
|
||||||
|
if preferencePath == "" {
|
||||||
|
return fmt.Errorf("Can't verify SSH host since the preference path is not set")
|
||||||
|
}
|
||||||
|
hostFile := path.Join(preferencePath, "known_hosts")
|
||||||
|
file, err := os.OpenFile(hostFile, os.O_RDWR|os.O_CREATE, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer file.Close()
|
||||||
|
content, err := ioutil.ReadAll(file)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
lineRegex := regexp.MustCompile(`^([^\s]+)\s+(.+)`)
|
||||||
|
|
||||||
|
keyString := string(ssh.MarshalAuthorizedKey(key))
|
||||||
|
keyString = strings.Replace(keyString, "\n", "", -1)
|
||||||
|
remoteAddress := remote.String()
|
||||||
|
if strings.HasSuffix(remoteAddress, ":22") {
|
||||||
|
remoteAddress = remoteAddress[:len(remoteAddress)-len(":22")]
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, line := range strings.Split(string(content), "\n") {
|
||||||
|
matched := lineRegex.FindStringSubmatch(line)
|
||||||
|
if matched == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if matched[1] == remote.String() {
|
||||||
|
if keyString != matched[2] {
|
||||||
|
LOG_WARN("HOSTKEY_OLD", "The existing key for '%s' is %s (file %s, line %d)",
|
||||||
|
remote.String(), matched[2], hostFile, i)
|
||||||
|
LOG_WARN("HOSTKEY_NEW", "The new key is '%s'", keyString)
|
||||||
|
return fmt.Errorf("The host key for '%s' has changed", remote.String())
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
file.Write([]byte(remote.String() + " " + keyString + "\n"))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateStorage creates a storage object based on the provide storage URL.
|
||||||
|
func CreateStorage(preference Preference, resetPassword bool, threads int) (storage Storage) {
|
||||||
|
|
||||||
|
storageURL := preference.StorageURL
|
||||||
|
|
||||||
|
isFileStorage := false
|
||||||
|
isCacheNeeded := false
|
||||||
|
|
||||||
|
if strings.HasPrefix(storageURL, "/") {
|
||||||
|
isFileStorage = true
|
||||||
|
} else if runtime.GOOS == "windows" {
|
||||||
|
if len(storageURL) >= 3 && storageURL[1] == ':' && (storageURL[2] == '/' || storageURL[2] == '\\') {
|
||||||
|
volume := strings.ToLower(storageURL[:1])
|
||||||
|
if volume[0] >= 'a' && volume[0] <= 'z' {
|
||||||
|
isFileStorage = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isFileStorage && strings.HasPrefix(storageURL, `\\`) {
|
||||||
|
isFileStorage = true
|
||||||
|
isCacheNeeded = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if isFileStorage {
|
||||||
|
fileStorage, err := CreateFileStorage(storageURL, isCacheNeeded, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fileStorage
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(storageURL, "flat://") {
|
||||||
|
fileStorage, err := CreateFileStorage(storageURL[7:], false, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fileStorage
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(storageURL, "samba://") {
|
||||||
|
fileStorage, err := CreateFileStorage(storageURL[8:], true, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fileStorage
|
||||||
|
}
|
||||||
|
|
||||||
|
urlRegex := regexp.MustCompile(`^([\w-]+)://([\w\-@\.]+@)?([^/]+)(/(.+))?`)
|
||||||
|
|
||||||
|
matched := urlRegex.FindStringSubmatch(storageURL)
|
||||||
|
|
||||||
|
if matched == nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Unrecognizable storage URL: %s", storageURL)
|
||||||
|
return nil
|
||||||
|
} else if matched[1] == "sftp" || matched[1] == "sftpc" {
|
||||||
|
server := matched[3]
|
||||||
|
username := matched[2]
|
||||||
|
storageDir := matched[5]
|
||||||
|
port := 22
|
||||||
|
|
||||||
|
if strings.Contains(server, ":") {
|
||||||
|
index := strings.Index(server, ":")
|
||||||
|
port, _ = strconv.Atoi(server[index+1:])
|
||||||
|
server = server[:index]
|
||||||
|
}
|
||||||
|
|
||||||
|
if storageDir == "" {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "The SFTP storage directory can't be empty")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if username != "" {
|
||||||
|
username = username[:len(username)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// If ssh_key_file is set, skip password-based login
|
||||||
|
keyFile := GetPasswordFromPreference(preference, "ssh_key_file")
|
||||||
|
passphrase := ""
|
||||||
|
|
||||||
|
password := ""
|
||||||
|
passwordCallback := func() (string, error) {
|
||||||
|
LOG_DEBUG("SSH_PASSWORD", "Attempting password login")
|
||||||
|
password = GetPassword(preference, "ssh_password", "Enter SSH password:", false, resetPassword)
|
||||||
|
return password, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
keyboardInteractive := func(user, instruction string, questions []string, echos []bool) (answers []string,
|
||||||
|
err error) {
|
||||||
|
if len(questions) == 1 {
|
||||||
|
LOG_DEBUG("SSH_INTERACTIVE", "Attempting keyboard interactive login")
|
||||||
|
password = GetPassword(preference, "ssh_password", "Enter SSH password:", false, resetPassword)
|
||||||
|
answers = []string{password}
|
||||||
|
return answers, nil
|
||||||
|
} else {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
publicKeysCallback := func() ([]ssh.Signer, error) {
|
||||||
|
LOG_DEBUG("SSH_PUBLICKEY", "Attempting public key authentication")
|
||||||
|
|
||||||
|
signers := []ssh.Signer{}
|
||||||
|
|
||||||
|
agentSock := os.Getenv("SSH_AUTH_SOCK")
|
||||||
|
if agentSock != "" {
|
||||||
|
connection, err := net.Dial("unix", agentSock)
|
||||||
|
// TODO: looks like we need to close the connection
|
||||||
|
if err == nil {
|
||||||
|
LOG_DEBUG("SSH_AGENT", "Attempting public key authentication via agent")
|
||||||
|
sshAgent := agent.NewClient(connection)
|
||||||
|
signers, err = sshAgent.Signers()
|
||||||
|
if err != nil {
|
||||||
|
LOG_DEBUG("SSH_AGENT", "Can't log in using public key authentication via agent: %v", err)
|
||||||
|
} else if len(signers) == 0 {
|
||||||
|
LOG_DEBUG("SSH_AGENT", "SSH agent doesn't return any signer")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
keyFile = GetPassword(preference, "ssh_key_file", "Enter the path of the private key file:",
|
||||||
|
true, resetPassword)
|
||||||
|
|
||||||
|
var keySigner ssh.Signer
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if keyFile == "" {
|
||||||
|
LOG_INFO("SSH_PUBLICKEY", "No private key file is provided")
|
||||||
|
} else {
|
||||||
|
var content []byte
|
||||||
|
content, err = ioutil.ReadFile(keyFile)
|
||||||
|
if err != nil {
|
||||||
|
LOG_INFO("SSH_PUBLICKEY", "Failed to read the private key file: %v", err)
|
||||||
|
} else {
|
||||||
|
keySigner, err = ssh.ParsePrivateKey(content)
|
||||||
|
if err != nil {
|
||||||
|
if _, ok := err.(*ssh.PassphraseMissingError); ok {
|
||||||
|
LOG_TRACE("SSH_PUBLICKEY", "The private key file is encrypted")
|
||||||
|
passphrase = GetPassword(preference, "ssh_passphrase", "Enter the passphrase to decrypt the private key file:", false, resetPassword)
|
||||||
|
if len(passphrase) == 0 {
|
||||||
|
LOG_INFO("SSH_PUBLICKEY", "No passphrase to descrypt the private key file %s", keyFile)
|
||||||
|
} else {
|
||||||
|
keySigner, err = ssh.ParsePrivateKeyWithPassphrase(content, []byte(passphrase))
|
||||||
|
if err != nil {
|
||||||
|
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the encrypted private key file %s: %v", keyFile, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the private key file %s: %v", keyFile, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if keySigner != nil {
|
||||||
|
certFile := keyFile + "-cert.pub"
|
||||||
|
if stat, err := os.Stat(certFile); err == nil && !stat.IsDir() {
|
||||||
|
LOG_DEBUG("SSH_CERTIFICATE", "Attempting to use ssh certificate from file %s", certFile)
|
||||||
|
var content []byte
|
||||||
|
content, err = ioutil.ReadFile(certFile)
|
||||||
|
if err != nil {
|
||||||
|
LOG_INFO("SSH_CERTIFICATE", "Failed to read ssh certificate file %s: %v", certFile, err)
|
||||||
|
} else {
|
||||||
|
pubKey, _, _, _, err := ssh.ParseAuthorizedKey(content)
|
||||||
|
if err != nil {
|
||||||
|
LOG_INFO("SSH_CERTIFICATE", "Failed parse ssh certificate file %s: %v", certFile, err)
|
||||||
|
} else {
|
||||||
|
certSigner, err := ssh.NewCertSigner(pubKey.(*ssh.Certificate), keySigner)
|
||||||
|
if err != nil {
|
||||||
|
LOG_INFO("SSH_CERTIFICATE", "Failed to create certificate signer: %v", err)
|
||||||
|
} else {
|
||||||
|
keySigner = certSigner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if keySigner != nil {
|
||||||
|
signers = append(signers, keySigner)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(signers) > 0 {
|
||||||
|
return signers, nil
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
authMethods := []ssh.AuthMethod{}
|
||||||
|
passwordAuthMethods := []ssh.AuthMethod{
|
||||||
|
ssh.PasswordCallback(passwordCallback),
|
||||||
|
ssh.KeyboardInteractive(keyboardInteractive),
|
||||||
|
}
|
||||||
|
keyFileAuthMethods := []ssh.AuthMethod{
|
||||||
|
ssh.PublicKeysCallback(publicKeysCallback),
|
||||||
|
}
|
||||||
|
if keyFile != "" {
|
||||||
|
authMethods = append(keyFileAuthMethods, passwordAuthMethods...)
|
||||||
|
} else {
|
||||||
|
authMethods = append(passwordAuthMethods, keyFileAuthMethods...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if RunInBackground {
|
||||||
|
|
||||||
|
passwordKey := "ssh_password"
|
||||||
|
keyFileKey := "ssh_key_file"
|
||||||
|
if preference.Name != "default" {
|
||||||
|
passwordKey = preference.Name + "_" + passwordKey
|
||||||
|
keyFileKey = preference.Name + "_" + keyFileKey
|
||||||
|
}
|
||||||
|
|
||||||
|
authMethods = []ssh.AuthMethod{}
|
||||||
|
if keyringGet(passwordKey) != "" {
|
||||||
|
authMethods = append(authMethods, ssh.PasswordCallback(passwordCallback))
|
||||||
|
authMethods = append(authMethods, ssh.KeyboardInteractive(keyboardInteractive))
|
||||||
|
}
|
||||||
|
if keyringGet(keyFileKey) != "" || os.Getenv("SSH_AUTH_SOCK") != "" {
|
||||||
|
authMethods = append(authMethods, ssh.PublicKeysCallback(publicKeysCallback))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
hostKeyChecker := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||||
|
return checkHostKey(hostname, remote, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
sftpStorage, err := CreateSFTPStorage(matched[1] == "sftpc", server, port, username, storageDir, 2, authMethods, hostKeyChecker, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the SFTP storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if keyFile != "" {
|
||||||
|
SavePassword(preference, "ssh_key_file", keyFile)
|
||||||
|
if passphrase != "" {
|
||||||
|
SavePassword(preference, "ssh_passphrase", passphrase)
|
||||||
|
}
|
||||||
|
} else if password != "" {
|
||||||
|
SavePassword(preference, "ssh_password", password)
|
||||||
|
}
|
||||||
|
return sftpStorage
|
||||||
|
} else if matched[1] == "s3" || matched[1] == "s3c" || matched[1] == "minio" || matched[1] == "minios" {
|
||||||
|
|
||||||
|
// urlRegex := regexp.MustCompile(`^(\w+)://([\w\-]+@)?([^/]+)(/(.+))?`)
|
||||||
|
|
||||||
|
region := matched[2]
|
||||||
|
endpoint := matched[3]
|
||||||
|
bucket := matched[5]
|
||||||
|
|
||||||
|
if region != "" {
|
||||||
|
region = region[:len(region)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if strings.EqualFold(endpoint, "amazon") || strings.EqualFold(endpoint, "amazon.com") {
|
||||||
|
endpoint = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
storageDir := ""
|
||||||
|
if strings.Contains(bucket, "/") {
|
||||||
|
firstSlash := strings.Index(bucket, "/")
|
||||||
|
storageDir = bucket[firstSlash+1:]
|
||||||
|
bucket = bucket[:firstSlash]
|
||||||
|
}
|
||||||
|
|
||||||
|
accessKey := GetPassword(preference, "s3_id", "Enter S3 Access Key ID:", true, resetPassword)
|
||||||
|
secretKey := GetPassword(preference, "s3_secret", "Enter S3 Secret Access Key:", true, resetPassword)
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if matched[1] == "s3c" {
|
||||||
|
storage, err = CreateS3CStorage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3C storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
isMinioCompatible := (matched[1] == "minio" || matched[1] == "minios")
|
||||||
|
isSSLSupported := (matched[1] == "s3" || matched[1] == "minios")
|
||||||
|
storage, err = CreateS3Storage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads, isSSLSupported, isMinioCompatible)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3 storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
SavePassword(preference, "s3_id", accessKey)
|
||||||
|
SavePassword(preference, "s3_secret", secretKey)
|
||||||
|
|
||||||
|
return storage
|
||||||
|
|
||||||
|
} else if matched[1] == "wasabi" {
|
||||||
|
|
||||||
|
region := matched[2]
|
||||||
|
endpoint := matched[3]
|
||||||
|
bucket := matched[5]
|
||||||
|
|
||||||
|
if region != "" {
|
||||||
|
region = region[:len(region)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
key := GetPassword(preference, "wasabi_key",
|
||||||
|
"Enter Wasabi key:", true, resetPassword)
|
||||||
|
secret := GetPassword(preference, "wasabi_secret",
|
||||||
|
"Enter Wasabi secret:", true, resetPassword)
|
||||||
|
|
||||||
|
storageDir := ""
|
||||||
|
if strings.Contains(bucket, "/") {
|
||||||
|
firstSlash := strings.Index(bucket, "/")
|
||||||
|
storageDir = bucket[firstSlash+1:]
|
||||||
|
bucket = bucket[:firstSlash]
|
||||||
|
}
|
||||||
|
|
||||||
|
storage, err := CreateWasabiStorage(region, endpoint,
|
||||||
|
bucket, storageDir, key, secret, threads)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Wasabi storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
SavePassword(preference, "wasabi_key", key)
|
||||||
|
SavePassword(preference, "wasabi_secret", secret)
|
||||||
|
|
||||||
|
return storage
|
||||||
|
|
||||||
|
} else if matched[1] == "dropbox" {
|
||||||
|
storageDir := matched[3] + matched[5]
|
||||||
|
token := GetPassword(preference, "dropbox_token", "Enter Dropbox access token:", true, resetPassword)
|
||||||
|
dropboxStorage, err := CreateDropboxStorage(token, storageDir, 1, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the dropbox storage: %v", err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, "dropbox_token", token)
|
||||||
|
return dropboxStorage
|
||||||
|
} else if matched[1] == "b2" {
|
||||||
|
bucket := matched[3]
|
||||||
|
storageDir := matched[5]
|
||||||
|
|
||||||
|
accountID := GetPassword(preference, "b2_id", "Enter Backblaze account or application id:", true, resetPassword)
|
||||||
|
applicationKey := GetPassword(preference, "b2_key", "Enter corresponding Backblaze application key:", true, resetPassword)
|
||||||
|
|
||||||
|
b2Storage, err := CreateB2Storage(accountID, applicationKey, "", bucket, storageDir, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, "b2_id", accountID)
|
||||||
|
SavePassword(preference, "b2_key", applicationKey)
|
||||||
|
return b2Storage
|
||||||
|
} else if matched[1] == "b2-custom" {
|
||||||
|
b2customUrlRegex := regexp.MustCompile(`^b2-custom://([^/]+)/([^/]+)(/(.+))?`)
|
||||||
|
matched := b2customUrlRegex.FindStringSubmatch(storageURL)
|
||||||
|
downloadURL := "https://" + matched[1]
|
||||||
|
bucket := matched[2]
|
||||||
|
storageDir := matched[4]
|
||||||
|
|
||||||
|
accountID := GetPassword(preference, "b2_id", "Enter Backblaze account or application id:", true, resetPassword)
|
||||||
|
applicationKey := GetPassword(preference, "b2_key", "Enter corresponding Backblaze application key:", true, resetPassword)
|
||||||
|
|
||||||
|
b2Storage, err := CreateB2Storage(accountID, applicationKey, downloadURL, bucket, storageDir, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, "b2_id", accountID)
|
||||||
|
SavePassword(preference, "b2_key", applicationKey)
|
||||||
|
return b2Storage
|
||||||
|
} else if matched[1] == "azure" {
|
||||||
|
account := matched[3]
|
||||||
|
container := matched[5]
|
||||||
|
|
||||||
|
if container == "" {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "The container name for the Azure storage can't be empty")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
prompt := fmt.Sprintf("Enter the Access Key for the Azure storage account %s:", account)
|
||||||
|
accessKey := GetPassword(preference, "azure_key", prompt, true, resetPassword)
|
||||||
|
|
||||||
|
azureStorage, err := CreateAzureStorage(account, accessKey, container, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Azure storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, "azure_key", accessKey)
|
||||||
|
return azureStorage
|
||||||
|
} else if matched[1] == "acd" {
|
||||||
|
storagePath := matched[3] + matched[4]
|
||||||
|
prompt := fmt.Sprintf("Enter the path of the Amazon Cloud Drive token file (downloadable from https://duplicacy.com/acd_start):")
|
||||||
|
tokenFile := GetPassword(preference, "acd_token", prompt, true, resetPassword)
|
||||||
|
acdStorage, err := CreateACDStorage(tokenFile, storagePath, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Amazon Cloud Drive storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, "acd_token", tokenFile)
|
||||||
|
return acdStorage
|
||||||
|
} else if matched[1] == "gcs" {
|
||||||
|
bucket := matched[3]
|
||||||
|
storageDir := matched[5]
|
||||||
|
prompt := fmt.Sprintf("Enter the path of the Google Cloud Storage token file (downloadable from https://duplicacy.com/gcs_start) or the service account credential file:")
|
||||||
|
tokenFile := GetPassword(preference, "gcs_token", prompt, true, resetPassword)
|
||||||
|
gcsStorage, err := CreateGCSStorage(tokenFile, bucket, storageDir, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Cloud Storage backend at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, "gcs_token", tokenFile)
|
||||||
|
return gcsStorage
|
||||||
|
} else if matched[1] == "gcd" {
|
||||||
|
// Handle writing directly to the root of the drive
|
||||||
|
// For gcd://driveid@/, driveid@ is match[3] not match[2]
|
||||||
|
if matched[2] == "" && strings.HasSuffix(matched[3], "@") {
|
||||||
|
matched[2], matched[3] = matched[3], matched[2]
|
||||||
|
}
|
||||||
|
driveID := matched[2]
|
||||||
|
if driveID != "" {
|
||||||
|
driveID = driveID[:len(driveID)-1]
|
||||||
|
}
|
||||||
|
storagePath := matched[3] + matched[4]
|
||||||
|
prompt := fmt.Sprintf("Enter the path of the Google Drive token file (downloadable from https://duplicacy.com/gcd_start):")
|
||||||
|
tokenFile := GetPassword(preference, "gcd_token", prompt, true, resetPassword)
|
||||||
|
gcdStorage, err := CreateGCDStorage(tokenFile, driveID, storagePath, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Drive storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, "gcd_token", tokenFile)
|
||||||
|
return gcdStorage
|
||||||
|
} else if matched[1] == "one" || matched[1] == "odb" {
|
||||||
|
storagePath := matched[3] + matched[4]
|
||||||
|
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
|
||||||
|
tokenFile := GetPassword(preference, matched[1] + "_token", prompt, true, resetPassword)
|
||||||
|
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, matched[1] + "_token", tokenFile)
|
||||||
|
return oneDriveStorage
|
||||||
|
} else if matched[1] == "hubic" {
|
||||||
|
storagePath := matched[3] + matched[4]
|
||||||
|
prompt := fmt.Sprintf("Enter the path of the Hubic token file (downloadable from https://duplicacy.com/hubic_start):")
|
||||||
|
tokenFile := GetPassword(preference, "hubic_token", prompt, true, resetPassword)
|
||||||
|
hubicStorage, err := CreateHubicStorage(tokenFile, storagePath, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Hubic storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, "hubic_token", tokenFile)
|
||||||
|
return hubicStorage
|
||||||
|
} else if matched[1] == "swift" {
|
||||||
|
prompt := fmt.Sprintf("Enter the OpenStack Swift key:")
|
||||||
|
key := GetPassword(preference, "swift_key", prompt, true, resetPassword)
|
||||||
|
swiftStorage, err := CreateSwiftStorage(storageURL[8:], key, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the OpenStack Swift storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, "swift_key", key)
|
||||||
|
return swiftStorage
|
||||||
|
} else if matched[1] == "webdav" || matched[1] == "webdav-http" {
|
||||||
|
server := matched[3]
|
||||||
|
username := matched[2]
|
||||||
|
if username == "" {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "No username is provided to access the WebDAV storage")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
username = username[:len(username)-1]
|
||||||
|
storageDir := matched[5]
|
||||||
|
port := 0
|
||||||
|
useHTTP := matched[1] == "webdav-http"
|
||||||
|
|
||||||
|
if strings.Contains(server, ":") {
|
||||||
|
index := strings.Index(server, ":")
|
||||||
|
port, _ = strconv.Atoi(server[index+1:])
|
||||||
|
server = server[:index]
|
||||||
|
}
|
||||||
|
|
||||||
|
prompt := fmt.Sprintf("Enter the WebDAV password:")
|
||||||
|
password := GetPassword(preference, "webdav_password", prompt, true, resetPassword)
|
||||||
|
webDAVStorage, err := CreateWebDAVStorage(server, port, username, password, storageDir, useHTTP, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the WebDAV storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, "webdav_password", password)
|
||||||
|
return webDAVStorage
|
||||||
|
} else if matched[1] == "fabric" {
|
||||||
|
endpoint := matched[3]
|
||||||
|
storageDir := matched[5]
|
||||||
|
prompt := fmt.Sprintf("Enter the token for accessing the Storage Made Easy File Fabric storage:")
|
||||||
|
token := GetPassword(preference, "fabric_token", prompt, true, resetPassword)
|
||||||
|
smeStorage, err := CreateFileFabricStorage(endpoint, token, storageDir, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the File Fabric storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, "fabric_token", token)
|
||||||
|
return smeStorage
|
||||||
|
} else {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
624
src/duplicacy_storage_test.go
Normal file
624
src/duplicacy_storage_test.go
Normal file
@@ -0,0 +1,624 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"runtime/debug"
|
||||||
|
"strconv"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
crypto_rand "crypto/rand"
|
||||||
|
"math/rand"
|
||||||
|
)
|
||||||
|
|
||||||
|
var testStorageName string
|
||||||
|
var testRateLimit int
|
||||||
|
var testQuickMode bool
|
||||||
|
var testThreads int
|
||||||
|
var testFixedChunkSize bool
|
||||||
|
var testRSAEncryption bool
|
||||||
|
var testErasureCoding bool
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
|
||||||
|
flag.IntVar(&testRateLimit, "limit-rate", 0, "maximum transfer speed in kbytes/sec")
|
||||||
|
flag.BoolVar(&testQuickMode, "quick", false, "quick test")
|
||||||
|
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
|
||||||
|
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
|
||||||
|
flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption")
|
||||||
|
flag.BoolVar(&testErasureCoding, "erasure-coding", false, "enable Erasure Coding")
|
||||||
|
flag.Parse()
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
||||||
|
|
||||||
|
if testStorageName == "" || testStorageName == "file" {
|
||||||
|
storage, err := CreateFileStorage(localStoragePath, false, threads)
|
||||||
|
if storage != nil {
|
||||||
|
// Use a read level of at least 2 because this will catch more errors than a read level of 1.
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
}
|
||||||
|
return storage, err
|
||||||
|
}
|
||||||
|
|
||||||
|
description, err := ioutil.ReadFile("test_storage.conf")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
configs := make(map[string]map[string]string)
|
||||||
|
|
||||||
|
err = json.Unmarshal(description, &configs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
config, found := configs[testStorageName]
|
||||||
|
if !found {
|
||||||
|
return nil, fmt.Errorf("No storage named '%s' found", testStorageName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if testStorageName == "flat" {
|
||||||
|
storage, err := CreateFileStorage(localStoragePath, false, threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "samba" {
|
||||||
|
storage, err := CreateFileStorage(localStoragePath, true, threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "sftp" {
|
||||||
|
port, _ := strconv.Atoi(config["port"])
|
||||||
|
storage, err := CreateSFTPStorageWithPassword(config["server"], port, config["username"], config["directory"], 2, config["password"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "s3" {
|
||||||
|
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "wasabi" {
|
||||||
|
storage, err := CreateWasabiStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "s3c" {
|
||||||
|
storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "digitalocean" {
|
||||||
|
storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "minio" {
|
||||||
|
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, false, true)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "minios" {
|
||||||
|
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, true)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "dropbox" {
|
||||||
|
storage, err := CreateDropboxStorage(config["token"], config["directory"], 1, threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "b2" {
|
||||||
|
storage, err := CreateB2Storage(config["account"], config["key"], "", config["bucket"], config["directory"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "gcs-s3" {
|
||||||
|
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "gcs" {
|
||||||
|
storage, err := CreateGCSStorage(config["token_file"], config["bucket"], config["directory"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "gcs-sa" {
|
||||||
|
storage, err := CreateGCSStorage(config["token_file"], config["bucket"], config["directory"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "azure" {
|
||||||
|
storage, err := CreateAzureStorage(config["account"], config["key"], config["container"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "acd" {
|
||||||
|
storage, err := CreateACDStorage(config["token_file"], config["storage_path"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "gcd" {
|
||||||
|
storage, err := CreateGCDStorage(config["token_file"], "", config["storage_path"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "gcd-shared" {
|
||||||
|
storage, err := CreateGCDStorage(config["token_file"], config["drive"], config["storage_path"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "one" {
|
||||||
|
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "odb" {
|
||||||
|
storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "one" {
|
||||||
|
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "hubic" {
|
||||||
|
storage, err := CreateHubicStorage(config["token_file"], config["storage_path"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "memset" {
|
||||||
|
storage, err := CreateSwiftStorage(config["storage_url"], config["key"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "pcloud" || testStorageName == "box" {
|
||||||
|
storage, err := CreateWebDAVStorage(config["host"], 0, config["username"], config["password"], config["storage_path"], false, threads)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
|
||||||
|
}
|
||||||
|
|
||||||
|
func cleanStorage(storage Storage) {
|
||||||
|
|
||||||
|
directories := make([]string, 0, 1024)
|
||||||
|
snapshots := make([]string, 0, 1024)
|
||||||
|
|
||||||
|
directories = append(directories, "snapshots/")
|
||||||
|
|
||||||
|
LOG_INFO("STORAGE_LIST", "Listing snapshots in the storage")
|
||||||
|
for len(directories) > 0 {
|
||||||
|
|
||||||
|
dir := directories[len(directories)-1]
|
||||||
|
directories = directories[:len(directories)-1]
|
||||||
|
|
||||||
|
files, _, err := storage.ListFiles(0, dir)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_LIST", "Failed to list the directory %s: %v", dir, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
if len(file) > 0 && file[len(file)-1] == '/' {
|
||||||
|
directories = append(directories, dir+file)
|
||||||
|
} else {
|
||||||
|
snapshots = append(snapshots, dir+file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("STORAGE_DELETE", "Deleting %d snapshots in the storage", len(snapshots))
|
||||||
|
for _, snapshot := range snapshots {
|
||||||
|
storage.DeleteFile(0, snapshot)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, chunk := range listChunks(storage) {
|
||||||
|
storage.DeleteFile(0, "chunks/"+chunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DeleteFile(0, "config")
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func listChunks(storage Storage) (chunks []string) {
|
||||||
|
|
||||||
|
directories := make([]string, 0, 1024)
|
||||||
|
|
||||||
|
directories = append(directories, "chunks/")
|
||||||
|
|
||||||
|
for len(directories) > 0 {
|
||||||
|
|
||||||
|
dir := directories[len(directories)-1]
|
||||||
|
directories = directories[:len(directories)-1]
|
||||||
|
|
||||||
|
files, _, err := storage.ListFiles(0, dir)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CHUNK_LIST", "Failed to list the directory %s: %v", dir, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
if len(file) > 0 && file[len(file)-1] == '/' {
|
||||||
|
directories = append(directories, dir+file)
|
||||||
|
} else {
|
||||||
|
chunk := dir + file
|
||||||
|
chunk = chunk[len("chunks/"):]
|
||||||
|
chunks = append(chunks, chunk)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func moveChunk(t *testing.T, storage Storage, chunkID string, isFossil bool, delay int) {
|
||||||
|
|
||||||
|
filePath, exist, _, err := storage.FindChunk(0, chunkID, isFossil)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error find chunk %s: %v", chunkID, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
to := filePath + ".fsl"
|
||||||
|
if isFossil {
|
||||||
|
to = filePath[:len(filePath)-len(".fsl")]
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.MoveFile(0, filePath, to)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error renaming file %s to %s: %v", filePath, to, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
_, exist, _, err = storage.FindChunk(0, chunkID, isFossil)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error get file info for chunk %s: %v", chunkID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if exist {
|
||||||
|
t.Errorf("File %s still exists after renaming", filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
_, exist, _, err = storage.FindChunk(0, chunkID, !isFossil)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error get file info for %s: %v", to, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exist {
|
||||||
|
t.Errorf("File %s doesn't exist", to)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStorage(t *testing.T) {
|
||||||
|
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
setTestingT(t)
|
||||||
|
SetLoggingLevel(INFO)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
switch e := r.(type) {
|
||||||
|
case Exception:
|
||||||
|
t.Errorf("%s %s", e.LogID, e.Message)
|
||||||
|
debug.PrintStack()
|
||||||
|
default:
|
||||||
|
t.Errorf("%v", e)
|
||||||
|
debug.PrintStack()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
||||||
|
os.RemoveAll(testDir)
|
||||||
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
|
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
|
||||||
|
|
||||||
|
threads := 8
|
||||||
|
storage, err := loadStorage(testDir, threads)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create storage: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
storage.EnableTestMode()
|
||||||
|
storage.SetRateLimits(testRateLimit, testRateLimit)
|
||||||
|
|
||||||
|
delay := 0
|
||||||
|
if _, ok := storage.(*ACDStorage); ok {
|
||||||
|
delay = 5
|
||||||
|
}
|
||||||
|
if _, ok := storage.(*HubicStorage); ok {
|
||||||
|
delay = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, dir := range []string{"chunks", "snapshots"} {
|
||||||
|
err = storage.CreateDirectory(0, dir)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create directory %s: %v", dir, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.CreateDirectory(0, "snapshots/repository1")
|
||||||
|
storage.CreateDirectory(0, "snapshots/repository2")
|
||||||
|
|
||||||
|
storage.CreateDirectory(0, "shared")
|
||||||
|
|
||||||
|
// Upload to the same directory by multiple goroutines
|
||||||
|
count := threads
|
||||||
|
finished := make(chan int, count)
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
go func(threadIndex int, name string) {
|
||||||
|
err := storage.UploadFile(threadIndex, name, []byte("this is a test file"))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error to upload '%s': %v", name, err)
|
||||||
|
}
|
||||||
|
finished <- 0
|
||||||
|
}(i, fmt.Sprintf("shared/a/b/c/%d", i))
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
<-finished
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
storage.DeleteFile(0, fmt.Sprintf("shared/a/b/c/%d", i))
|
||||||
|
}
|
||||||
|
storage.DeleteFile(0, "shared/a/b/c")
|
||||||
|
storage.DeleteFile(0, "shared/a/b")
|
||||||
|
storage.DeleteFile(0, "shared/a")
|
||||||
|
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
{
|
||||||
|
|
||||||
|
// Upload fake snapshot files so that for storages having no concept of directories,
|
||||||
|
// ListFiles("snapshots") still returns correct snapshot IDs.
|
||||||
|
|
||||||
|
// Create a random file not a text file to make ACD Storage happy.
|
||||||
|
content := make([]byte, 100)
|
||||||
|
_, err = crypto_rand.Read(content)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error generating random content: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.UploadFile(0, "snapshots/repository1/1", content)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error to upload snapshots/repository1/1: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.UploadFile(0, "snapshots/repository2/1", content)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error to upload snapshots/repository2/1: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
snapshotDirs, _, err := storage.ListFiles(0, "snapshots/")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list snapshot ids: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshotIDs := []string{}
|
||||||
|
for _, snapshotDir := range snapshotDirs {
|
||||||
|
if len(snapshotDir) > 0 && snapshotDir[len(snapshotDir)-1] == '/' {
|
||||||
|
snapshotIDs = append(snapshotIDs, snapshotDir[:len(snapshotDir)-1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(snapshotIDs) < 2 {
|
||||||
|
t.Errorf("Snapshot directories not created")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, snapshotID := range snapshotIDs {
|
||||||
|
snapshots, _, err := storage.ListFiles(0, "snapshots/"+snapshotID)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list snapshots for %s: %v", snapshotID, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, snapshot := range snapshots {
|
||||||
|
storage.DeleteFile(0, "snapshots/"+snapshotID+"/"+snapshot)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
storage.DeleteFile(0, "config")
|
||||||
|
|
||||||
|
for _, file := range []string{"snapshots/repository1/1", "snapshots/repository2/1"} {
|
||||||
|
exist, _, _, err := storage.GetFileInfo(0, file)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to get file info for %s: %v", file, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if exist {
|
||||||
|
t.Errorf("File %s still exists after deletion", file)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
numberOfFiles := 10
|
||||||
|
maxFileSize := 64 * 1024
|
||||||
|
|
||||||
|
if testQuickMode {
|
||||||
|
numberOfFiles = 2
|
||||||
|
}
|
||||||
|
|
||||||
|
chunks := []string{}
|
||||||
|
|
||||||
|
for i := 0; i < numberOfFiles; i++ {
|
||||||
|
content := make([]byte, rand.Int()%maxFileSize+1)
|
||||||
|
_, err = crypto_rand.Read(content)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error generating random content: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := sha256.New()
|
||||||
|
hasher.Write(content)
|
||||||
|
chunkID := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
chunks = append(chunks, chunkID)
|
||||||
|
|
||||||
|
filePath, exist, _, err := storage.FindChunk(0, chunkID, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list the chunk %s: %v", chunkID, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if exist {
|
||||||
|
t.Errorf("Chunk %s already exists", chunkID)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.UploadFile(0, filePath, content)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to upload the file %s: %v", filePath, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
LOG_INFO("STORAGE_CHUNK", "Uploaded chunk: %s, size: %d", filePath, len(content))
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("STORAGE_FOSSIL", "Making %s a fossil", chunks[0])
|
||||||
|
moveChunk(t, storage, chunks[0], false, delay)
|
||||||
|
LOG_INFO("STORAGE_FOSSIL", "Making %s a chunk", chunks[0])
|
||||||
|
moveChunk(t, storage, chunks[0], true, delay)
|
||||||
|
|
||||||
|
config := CreateConfig()
|
||||||
|
config.MinimumChunkSize = 100
|
||||||
|
config.chunkPool = make(chan *Chunk, numberOfFiles*2)
|
||||||
|
|
||||||
|
chunk := CreateChunk(config, true)
|
||||||
|
|
||||||
|
for _, chunkID := range chunks {
|
||||||
|
|
||||||
|
chunk.Reset(false)
|
||||||
|
filePath, exist, _, err := storage.FindChunk(0, chunkID, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error getting file info for chunk %s: %v", chunkID, err)
|
||||||
|
continue
|
||||||
|
} else if !exist {
|
||||||
|
t.Errorf("Chunk %s does not exist", chunkID)
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
err = storage.DownloadFile(0, filePath, chunk)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error downloading file %s: %v", filePath, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
LOG_INFO("STORAGE_CHUNK", "Downloaded chunk: %s, size: %d", filePath, chunk.GetLength())
|
||||||
|
}
|
||||||
|
|
||||||
|
hasher := sha256.New()
|
||||||
|
hasher.Write(chunk.GetBytes())
|
||||||
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
|
if hash != chunkID {
|
||||||
|
t.Errorf("File %s, hash %s, size %d", chunkID, hash, chunk.GetBytes())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("STORAGE_FOSSIL", "Making %s a fossil", chunks[1])
|
||||||
|
moveChunk(t, storage, chunks[1], false, delay)
|
||||||
|
|
||||||
|
filePath, exist, _, err := storage.FindChunk(0, chunks[1], true)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error getting file info for fossil %s: %v", chunks[1], err)
|
||||||
|
} else if !exist {
|
||||||
|
t.Errorf("Fossil %s does not exist", chunks[1])
|
||||||
|
} else {
|
||||||
|
err = storage.DeleteFile(0, filePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to delete file %s: %v", filePath, err)
|
||||||
|
} else {
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
filePath, exist, _, err = storage.FindChunk(0, chunks[1], true)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error get file info for deleted fossil %s: %v", chunks[1], err)
|
||||||
|
} else if exist {
|
||||||
|
t.Errorf("Fossil %s still exists after deletion", chunks[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
allChunks := []string{}
|
||||||
|
for _, file := range listChunks(storage) {
|
||||||
|
allChunks = append(allChunks, file)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range allChunks {
|
||||||
|
|
||||||
|
err = storage.DeleteFile(0, "chunks/"+file)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to delete the file %s: %v", file, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCleanStorage(t *testing.T) {
|
||||||
|
setTestingT(t)
|
||||||
|
SetLoggingLevel(INFO)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
switch e := r.(type) {
|
||||||
|
case Exception:
|
||||||
|
t.Errorf("%s %s", e.LogID, e.Message)
|
||||||
|
debug.PrintStack()
|
||||||
|
default:
|
||||||
|
t.Errorf("%v", e)
|
||||||
|
debug.PrintStack()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
||||||
|
os.RemoveAll(testDir)
|
||||||
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
|
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
|
||||||
|
|
||||||
|
storage, err := loadStorage(testDir, 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create storage: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
directories := make([]string, 0, 1024)
|
||||||
|
directories = append(directories, "snapshots/")
|
||||||
|
directories = append(directories, "chunks/")
|
||||||
|
|
||||||
|
for len(directories) > 0 {
|
||||||
|
|
||||||
|
dir := directories[len(directories)-1]
|
||||||
|
directories = directories[:len(directories)-1]
|
||||||
|
|
||||||
|
LOG_INFO("LIST_FILES", "Listing %s", dir)
|
||||||
|
|
||||||
|
files, _, err := storage.ListFiles(0, dir)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("LIST_FILES", "Failed to list the directory %s: %v", dir, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, file := range files {
|
||||||
|
if len(file) > 0 && file[len(file)-1] == '/' {
|
||||||
|
directories = append(directories, dir+file)
|
||||||
|
} else {
|
||||||
|
storage.DeleteFile(0, dir+file)
|
||||||
|
LOG_INFO("DELETE_FILE", "Deleted file %s", file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DeleteFile(0, "config")
|
||||||
|
LOG_INFO("DELETE_FILE", "Deleted config")
|
||||||
|
|
||||||
|
files, _, err := storage.ListFiles(0, "chunks/")
|
||||||
|
for _, file := range files {
|
||||||
|
if len(file) > 0 && file[len(file)-1] != '/' {
|
||||||
|
LOG_DEBUG("FILE_EXIST", "File %s exists after deletion", file)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
256
src/duplicacy_swiftstorage.go
Normal file
256
src/duplicacy_swiftstorage.go
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/ncw/swift"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SwiftStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
connection *swift.Connection
|
||||||
|
container string
|
||||||
|
storageDir string
|
||||||
|
threads int
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateSwiftStorage creates an OpenStack Swift storage object. storageURL is in the form of
|
||||||
|
// `user@authURL/container/path?arg1=value1&arg2=value2``
|
||||||
|
func CreateSwiftStorage(storageURL string, key string, threads int) (storage *SwiftStorage, err error) {
|
||||||
|
|
||||||
|
// This is the map to store all arguments
|
||||||
|
arguments := make(map[string]string)
|
||||||
|
|
||||||
|
// Check if there are arguments provided as a query string
|
||||||
|
if strings.Contains(storageURL, "?") {
|
||||||
|
urlAndArguments := strings.SplitN(storageURL, "?", 2)
|
||||||
|
storageURL = urlAndArguments[0]
|
||||||
|
for _, pair := range strings.Split(urlAndArguments[1], "&") {
|
||||||
|
if strings.Contains(pair, "=") {
|
||||||
|
keyAndValue := strings.Split(pair, "=")
|
||||||
|
arguments[keyAndValue[0]] = keyAndValue[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take out the user name if there is one
|
||||||
|
if strings.Contains(storageURL, "@") {
|
||||||
|
userAndURL := strings.Split(storageURL, "@")
|
||||||
|
arguments["user"] = userAndURL[0]
|
||||||
|
storageURL = userAndURL[1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// The version is used to split authURL and container/path
|
||||||
|
versions := []string{"/v1/", "/v1.0/", "/v2/", "/v2.0/", "/v3/", "/v3.0/", "/v4/", "/v4.0/"}
|
||||||
|
storageDir := ""
|
||||||
|
for _, version := range versions {
|
||||||
|
if strings.Contains(storageURL, version) {
|
||||||
|
urlAndStorageDir := strings.SplitN(storageURL, version, 2)
|
||||||
|
storageURL = urlAndStorageDir[0] + version[0:len(version)-1]
|
||||||
|
storageDir = urlAndStorageDir[1]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no container/path is specified, find them from the arguments
|
||||||
|
if storageDir == "" {
|
||||||
|
storageDir = arguments["storage_dir"]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now separate the container name from the storage path
|
||||||
|
container := ""
|
||||||
|
if strings.Contains(storageDir, "/") {
|
||||||
|
containerAndStorageDir := strings.SplitN(storageDir, "/", 2)
|
||||||
|
container = containerAndStorageDir[0]
|
||||||
|
storageDir = containerAndStorageDir[1]
|
||||||
|
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||||
|
storageDir += "/"
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
container = storageDir
|
||||||
|
storageDir = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Number of retries on err
|
||||||
|
retries := 4
|
||||||
|
if value, ok := arguments["retries"]; ok {
|
||||||
|
retries, _ = strconv.Atoi(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connect channel timeout
|
||||||
|
connectionTimeout := 10
|
||||||
|
if value, ok := arguments["connection_timeout"]; ok {
|
||||||
|
connectionTimeout, _ = strconv.Atoi(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Data channel timeout
|
||||||
|
timeout := 60
|
||||||
|
if value, ok := arguments["timeout"]; ok {
|
||||||
|
timeout, _ = strconv.Atoi(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Auth version; default to auto-detect
|
||||||
|
authVersion := 0
|
||||||
|
if value, ok := arguments["auth_version"]; ok {
|
||||||
|
authVersion, _ = strconv.Atoi(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow http to be used by setting "protocol=http" in arguments
|
||||||
|
if _, ok := arguments["protocol"]; !ok {
|
||||||
|
arguments["protocol"] = "https"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Please refer to https://godoc.org/github.com/ncw/swift#Connection
|
||||||
|
connection := swift.Connection{
|
||||||
|
Domain: arguments["domain"],
|
||||||
|
DomainId: arguments["domain_id"],
|
||||||
|
UserName: arguments["user"],
|
||||||
|
UserId: arguments["user_id"],
|
||||||
|
ApiKey: key,
|
||||||
|
AuthUrl: arguments["protocol"] + "://" + storageURL,
|
||||||
|
Retries: retries,
|
||||||
|
UserAgent: arguments["user_agent"],
|
||||||
|
ConnectTimeout: time.Duration(connectionTimeout) * time.Second,
|
||||||
|
Timeout: time.Duration(timeout) * time.Second,
|
||||||
|
Region: arguments["region"],
|
||||||
|
AuthVersion: authVersion,
|
||||||
|
Internal: false,
|
||||||
|
Tenant: arguments["tenant"],
|
||||||
|
TenantId: arguments["tenant_id"],
|
||||||
|
EndpointType: swift.EndpointType(arguments["endpiont_type"]),
|
||||||
|
TenantDomain: arguments["tenant_domain"],
|
||||||
|
TenantDomainId: arguments["tenant_domain_id"],
|
||||||
|
TrustId: arguments["trust_id"],
|
||||||
|
}
|
||||||
|
|
||||||
|
err = connection.Authenticate()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, err = connection.Container(container)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &SwiftStorage{
|
||||||
|
connection: &connection,
|
||||||
|
container: container,
|
||||||
|
storageDir: storageDir,
|
||||||
|
threads: threads,
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{1}, 1)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
|
func (storage *SwiftStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
if len(dir) > 0 && dir[len(dir)-1] != '/' {
|
||||||
|
dir += "/"
|
||||||
|
}
|
||||||
|
isSnapshotDir := dir == "snapshots/"
|
||||||
|
dir = storage.storageDir + dir
|
||||||
|
|
||||||
|
options := swift.ObjectsOpts{
|
||||||
|
Prefix: dir,
|
||||||
|
Limit: 1000,
|
||||||
|
}
|
||||||
|
|
||||||
|
if isSnapshotDir {
|
||||||
|
options.Delimiter = '/'
|
||||||
|
}
|
||||||
|
|
||||||
|
objects, err := storage.connection.ObjectsAll(storage.container, &options)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, obj := range objects {
|
||||||
|
if isSnapshotDir {
|
||||||
|
if obj.SubDir != "" {
|
||||||
|
files = append(files, obj.SubDir[len(dir):])
|
||||||
|
sizes = append(sizes, 0)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
files = append(files, obj.Name[len(dir):])
|
||||||
|
sizes = append(sizes, obj.Bytes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *SwiftStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
return storage.connection.ObjectDelete(storage.container, storage.storageDir+filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *SwiftStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
return storage.connection.ObjectMove(storage.container, storage.storageDir+from,
|
||||||
|
storage.container, storage.storageDir+to)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *SwiftStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
// Does nothing as directories do not exist in OpenStack Swift
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *SwiftStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
object, _, err := storage.connection.Object(storage.container, storage.storageDir+filePath)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
if err == swift.ObjectNotFound {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else {
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, false, object.Bytes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *SwiftStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
|
file, _, err := storage.connection.ObjectOpen(storage.container, storage.storageDir+filePath, false, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.threads)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *SwiftStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.threads)
|
||||||
|
_, err = storage.connection.ObjectPut(storage.container, storage.storageDir+filePath, reader, true, "", "application/duplicacy", nil)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *SwiftStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *SwiftStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *SwiftStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *SwiftStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *SwiftStorage) EnableTestMode() {
|
||||||
|
}
|
||||||
462
src/duplicacy_utils.go
Normal file
462
src/duplicacy_utils.go
Normal file
@@ -0,0 +1,462 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"crypto/sha256"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gilbertchen/gopass"
|
||||||
|
"golang.org/x/crypto/pbkdf2"
|
||||||
|
)
|
||||||
|
|
||||||
|
var RunInBackground bool = false
|
||||||
|
|
||||||
|
type RateLimitedReader struct {
|
||||||
|
Content []byte
|
||||||
|
Rate float64
|
||||||
|
Next int
|
||||||
|
StartTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
var RegexMap map[string]*regexp.Regexp
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
|
||||||
|
if RegexMap == nil {
|
||||||
|
RegexMap = make(map[string]*regexp.Regexp)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateRateLimitedReader(content []byte, rate int) *RateLimitedReader {
|
||||||
|
return &RateLimitedReader{
|
||||||
|
Content: content,
|
||||||
|
Rate: float64(rate * 1024),
|
||||||
|
Next: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsEmptyFilter(pattern string) bool {
|
||||||
|
if pattern == "+" || pattern == "-" || pattern == "i:" || pattern == "e:" {
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsUnspecifiedFilter(pattern string) bool {
|
||||||
|
if pattern[0] != '+' && pattern[0] != '-' && !strings.HasPrefix(pattern, "i:") && !strings.HasPrefix(pattern, "e:") {
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsValidRegex(pattern string) (valid bool, err error) {
|
||||||
|
|
||||||
|
var re *regexp.Regexp = nil
|
||||||
|
|
||||||
|
if re, valid = RegexMap[pattern]; valid && re != nil {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
re, err = regexp.Compile(pattern)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
} else {
|
||||||
|
RegexMap[pattern] = re
|
||||||
|
LOG_DEBUG("REGEX_STORED", "Saved compiled regex for pattern \"%s\", regex=%#v", pattern, re)
|
||||||
|
return true, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reader *RateLimitedReader) Length() int64 {
|
||||||
|
return int64(len(reader.Content))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reader *RateLimitedReader) Reset() {
|
||||||
|
reader.Next = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reader *RateLimitedReader) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
if whence == io.SeekStart {
|
||||||
|
reader.Next = int(offset)
|
||||||
|
} else if whence == io.SeekCurrent {
|
||||||
|
reader.Next += int(offset)
|
||||||
|
} else {
|
||||||
|
reader.Next = len(reader.Content) - int(offset)
|
||||||
|
}
|
||||||
|
return int64(reader.Next), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (reader *RateLimitedReader) Read(p []byte) (n int, err error) {
|
||||||
|
|
||||||
|
if reader.Next >= len(reader.Content) {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
if reader.Rate <= 0 {
|
||||||
|
n := copy(p, reader.Content[reader.Next:])
|
||||||
|
reader.Next += n
|
||||||
|
if reader.Next >= len(reader.Content) {
|
||||||
|
return n, io.EOF
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if reader.StartTime.IsZero() {
|
||||||
|
reader.StartTime = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
elapsed := time.Since(reader.StartTime).Seconds()
|
||||||
|
delay := float64(reader.Next)/reader.Rate - elapsed
|
||||||
|
end := reader.Next + int(reader.Rate/5)
|
||||||
|
if delay > 0 {
|
||||||
|
time.Sleep(time.Duration(delay * float64(time.Second)))
|
||||||
|
} else {
|
||||||
|
end += -int(delay * reader.Rate)
|
||||||
|
}
|
||||||
|
|
||||||
|
if end > len(reader.Content) {
|
||||||
|
end = len(reader.Content)
|
||||||
|
}
|
||||||
|
|
||||||
|
n = copy(p, reader.Content[reader.Next:end])
|
||||||
|
reader.Next += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func RateLimitedCopy(writer io.Writer, reader io.Reader, rate int) (written int64, err error) {
|
||||||
|
if rate <= 0 {
|
||||||
|
return io.Copy(writer, reader)
|
||||||
|
}
|
||||||
|
for range time.Tick(time.Second / 5) {
|
||||||
|
n, err := io.CopyN(writer, reader, int64(rate*1024/5))
|
||||||
|
written += n
|
||||||
|
if err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return written, nil
|
||||||
|
} else {
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenerateKeyFromPassword generates a key from the password.
|
||||||
|
func GenerateKeyFromPassword(password string, salt []byte, iterations int) []byte {
|
||||||
|
return pbkdf2.Key([]byte(password), salt, iterations, 32, sha256.New)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get password from preference, env, but don't start any keyring request
|
||||||
|
func GetPasswordFromPreference(preference Preference, passwordType string) string {
|
||||||
|
passwordID := passwordType
|
||||||
|
if preference.Name != "default" {
|
||||||
|
passwordID = preference.Name + "_" + passwordID
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
name := strings.ToUpper("duplicacy_" + passwordID)
|
||||||
|
LOG_DEBUG("PASSWORD_ENV_VAR", "Reading the environment variable %s", name)
|
||||||
|
if password, found := os.LookupEnv(name); found && password != "" {
|
||||||
|
return password
|
||||||
|
}
|
||||||
|
|
||||||
|
re := regexp.MustCompile(`[^a-zA-Z0-9_]`)
|
||||||
|
namePlain := re.ReplaceAllString(name, "_")
|
||||||
|
if namePlain != name {
|
||||||
|
LOG_DEBUG("PASSWORD_ENV_VAR", "Reading the environment variable %s", namePlain)
|
||||||
|
if password, found := os.LookupEnv(namePlain); found && password != "" {
|
||||||
|
return password
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the password is stored in the preference, there is no need to include the storage name
|
||||||
|
// (i.e., preference.Name) in the key, so the key name should really be passwordType rather
|
||||||
|
// than passwordID; we're using passwordID here only for backward compatibility
|
||||||
|
if len(preference.Keys) > 0 && len(preference.Keys[passwordID]) > 0 {
|
||||||
|
LOG_DEBUG("PASSWORD_PREFERENCE", "Reading %s from preferences", passwordID)
|
||||||
|
return preference.Keys[passwordID]
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(preference.Keys) > 0 && len(preference.Keys[passwordType]) > 0 {
|
||||||
|
LOG_DEBUG("PASSWORD_PREFERENCE", "Reading %s from preferences", passwordType)
|
||||||
|
return preference.Keys[passwordType]
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPassword attempts to get the password from KeyChain/KeyRing, environment variables, or keyboard input.
|
||||||
|
func GetPassword(preference Preference, passwordType string, prompt string,
|
||||||
|
showPassword bool, resetPassword bool) string {
|
||||||
|
passwordID := passwordType
|
||||||
|
|
||||||
|
preferencePassword := GetPasswordFromPreference(preference, passwordType)
|
||||||
|
if preferencePassword != "" {
|
||||||
|
return preferencePassword
|
||||||
|
}
|
||||||
|
|
||||||
|
if preference.Name != "default" {
|
||||||
|
passwordID = preference.Name + "_" + passwordID
|
||||||
|
}
|
||||||
|
|
||||||
|
if resetPassword && !RunInBackground {
|
||||||
|
keyringSet(passwordID, "")
|
||||||
|
} else {
|
||||||
|
password := keyringGet(passwordID)
|
||||||
|
if password != "" {
|
||||||
|
LOG_DEBUG("PASSWORD_KEYCHAIN", "Reading %s from keychain/keyring", passwordType)
|
||||||
|
return password
|
||||||
|
}
|
||||||
|
|
||||||
|
if RunInBackground {
|
||||||
|
LOG_INFO("PASSWORD_MISSING", "%s is not found in Keychain/Keyring", passwordID)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
password := ""
|
||||||
|
fmt.Printf("%s", prompt)
|
||||||
|
if showPassword {
|
||||||
|
scanner := bufio.NewScanner(os.Stdin)
|
||||||
|
scanner.Scan()
|
||||||
|
password = scanner.Text()
|
||||||
|
} else {
|
||||||
|
passwordInBytes, err := gopass.GetPasswdMasked()
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("PASSWORD_READ", "Failed to read the password: %v", err)
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
password = string(passwordInBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
return password
|
||||||
|
}
|
||||||
|
|
||||||
|
// SavePassword saves the specified password in the keyring/keychain.
|
||||||
|
func SavePassword(preference Preference, passwordType string, password string) {
|
||||||
|
|
||||||
|
if password == "" || RunInBackground {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if preference.DoNotSavePassword {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the password is retrieved from env or preference, don't save it to keyring
|
||||||
|
if GetPasswordFromPreference(preference, passwordType) == password {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
passwordID := passwordType
|
||||||
|
if preference.Name != "default" {
|
||||||
|
passwordID = preference.Name + "_" + passwordID
|
||||||
|
}
|
||||||
|
keyringSet(passwordID, password)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The following code was modified from the online article 'Matching Wildcards: An Algorithm', by Kirk J. Krauss,
|
||||||
|
// Dr. Dobb's, August 26, 2008. However, the version in the article doesn't handle cases like matching 'abcccd'
|
||||||
|
// against '*ccd', and the version here fixed that issue.
|
||||||
|
//
|
||||||
|
func matchPattern(text string, pattern string) bool {
|
||||||
|
|
||||||
|
textLength := len(text)
|
||||||
|
patternLength := len(pattern)
|
||||||
|
afterLastWildcard := 0
|
||||||
|
afterLastMatched := 0
|
||||||
|
|
||||||
|
t := 0
|
||||||
|
p := 0
|
||||||
|
|
||||||
|
for {
|
||||||
|
if t >= textLength {
|
||||||
|
if p >= patternLength {
|
||||||
|
return true // "x" matches "x"
|
||||||
|
} else if pattern[p] == '*' {
|
||||||
|
p++
|
||||||
|
continue // "x*" matches "x" or "xy"
|
||||||
|
}
|
||||||
|
return false // "x" doesn't match "xy"
|
||||||
|
}
|
||||||
|
|
||||||
|
w := byte(0)
|
||||||
|
if p < patternLength {
|
||||||
|
w = pattern[p]
|
||||||
|
}
|
||||||
|
|
||||||
|
if text[t] != w {
|
||||||
|
if w == '?' {
|
||||||
|
t++
|
||||||
|
p++
|
||||||
|
continue
|
||||||
|
} else if w == '*' {
|
||||||
|
p++
|
||||||
|
afterLastWildcard = p
|
||||||
|
if p >= patternLength {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
} else if afterLastWildcard > 0 {
|
||||||
|
p = afterLastWildcard
|
||||||
|
t = afterLastMatched
|
||||||
|
t++
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for t < textLength && text[t] != pattern[p] && pattern[p] != '?' {
|
||||||
|
t++
|
||||||
|
}
|
||||||
|
|
||||||
|
if t >= textLength {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
afterLastMatched = t
|
||||||
|
}
|
||||||
|
t++
|
||||||
|
p++
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchPath returns 'true' if the file 'filePath' is excluded by the specified 'patterns'. Each pattern starts with
|
||||||
|
// either '+' or '-', whereas '-' indicates exclusion and '+' indicates inclusion. Wildcards like '*' and '?' may
|
||||||
|
// appear in the patterns. In case no matching pattern is found, the file will be excluded if all patterns are
|
||||||
|
// include patterns, and included otherwise.
|
||||||
|
func MatchPath(filePath string, patterns []string) (included bool) {
|
||||||
|
|
||||||
|
var re *regexp.Regexp = nil
|
||||||
|
var found bool
|
||||||
|
var matched bool
|
||||||
|
|
||||||
|
allIncludes := true
|
||||||
|
|
||||||
|
for _, pattern := range patterns {
|
||||||
|
if pattern[0] == '+' {
|
||||||
|
if matchPattern(filePath, pattern[1:]) {
|
||||||
|
LOG_DEBUG("PATTERN_INCLUDE", "%s is included by pattern %s", filePath, pattern)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
} else if pattern[0] == '-' {
|
||||||
|
allIncludes = false
|
||||||
|
if matchPattern(filePath, pattern[1:]) {
|
||||||
|
LOG_DEBUG("PATTERN_EXCLUDE", "%s is excluded by pattern %s", filePath, pattern)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
} else if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
||||||
|
if re, found = RegexMap[pattern[2:]]; found {
|
||||||
|
matched = re.MatchString(filePath)
|
||||||
|
} else {
|
||||||
|
re, err := regexp.Compile(pattern)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("REGEX_ERROR", "Invalid regex encountered for pattern \"%s\" - %v", pattern[2:], err)
|
||||||
|
}
|
||||||
|
RegexMap[pattern] = re
|
||||||
|
matched = re.MatchString(filePath)
|
||||||
|
}
|
||||||
|
if matched {
|
||||||
|
if strings.HasPrefix(pattern, "i:") {
|
||||||
|
LOG_DEBUG("PATTERN_INCLUDE", "%s is included by pattern %s", filePath, pattern)
|
||||||
|
return true
|
||||||
|
} else {
|
||||||
|
LOG_DEBUG("PATTERN_EXCLUDE", "%s is excluded by pattern %s", filePath, pattern)
|
||||||
|
return false
|
||||||
|
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if strings.HasPrefix(pattern, "e:") {
|
||||||
|
allIncludes = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if allIncludes {
|
||||||
|
LOG_DEBUG("PATTERN_EXCLUDE", "%s is excluded", filePath)
|
||||||
|
return false
|
||||||
|
} else {
|
||||||
|
LOG_DEBUG("PATTERN_INCLUDE", "%s is included", filePath)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func PrettyNumber(number int64) string {
|
||||||
|
|
||||||
|
G := int64(1024 * 1024 * 1024)
|
||||||
|
M := int64(1024 * 1024)
|
||||||
|
K := int64(1024)
|
||||||
|
|
||||||
|
if number > 1000*G {
|
||||||
|
return fmt.Sprintf("%dG", number/G)
|
||||||
|
} else if number > G {
|
||||||
|
return fmt.Sprintf("%d,%03dM", number/(1000*M), (number/M)%1000)
|
||||||
|
} else if number > M {
|
||||||
|
return fmt.Sprintf("%d,%03dK", number/(1000*K), (number/K)%1000)
|
||||||
|
} else if number > K {
|
||||||
|
return fmt.Sprintf("%dK", number/K)
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("%d", number)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func PrettySize(size int64) string {
|
||||||
|
if size > 1024*1024 {
|
||||||
|
return fmt.Sprintf("%.2fM", float64(size)/(1024.0*1024.0))
|
||||||
|
} else if size > 1024 {
|
||||||
|
return fmt.Sprintf("%.0fK", float64(size)/1024.0)
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("%d", size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func PrettyTime(seconds int64) string {
|
||||||
|
|
||||||
|
day := int64(3600 * 24)
|
||||||
|
|
||||||
|
if seconds > day*2 {
|
||||||
|
return fmt.Sprintf("%d days %02d:%02d:%02d",
|
||||||
|
seconds/day, (seconds%day)/3600, (seconds%3600)/60, seconds%60)
|
||||||
|
} else if seconds > day {
|
||||||
|
return fmt.Sprintf("1 day %02d:%02d:%02d", (seconds%day)/3600, (seconds%3600)/60, seconds%60)
|
||||||
|
} else if seconds >= 0 {
|
||||||
|
return fmt.Sprintf("%02d:%02d:%02d", seconds/3600, (seconds%3600)/60, seconds%60)
|
||||||
|
} else {
|
||||||
|
return "n/a"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func AtoSize(sizeString string) int {
|
||||||
|
sizeString = strings.ToLower(sizeString)
|
||||||
|
|
||||||
|
sizeRegex := regexp.MustCompile(`^([0-9]+)([mk])?$`)
|
||||||
|
matched := sizeRegex.FindStringSubmatch(sizeString)
|
||||||
|
if matched == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
size, _ := strconv.Atoi(matched[1])
|
||||||
|
|
||||||
|
if matched[2] == "m" {
|
||||||
|
size *= 1024 * 1024
|
||||||
|
} else if matched[2] == "k" {
|
||||||
|
size *= 1024
|
||||||
|
}
|
||||||
|
|
||||||
|
return size
|
||||||
|
}
|
||||||
14
src/duplicacy_utils_darwin.go
Normal file
14
src/duplicacy_utils_darwin.go
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
func excludedByAttribute(attirbutes map[string][]byte) bool {
|
||||||
|
value, ok := attirbutes["com.apple.metadata:com_apple_backup_excludeItem"]
|
||||||
|
return ok && strings.Contains(string(value), "com.apple.backupd")
|
||||||
|
}
|
||||||
13
src/duplicacy_utils_freebsd.go
Normal file
13
src/duplicacy_utils_freebsd.go
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
)
|
||||||
|
|
||||||
|
func excludedByAttribute(attirbutes map[string][]byte) bool {
|
||||||
|
_, ok := attirbutes["duplicacy_exclude"]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
13
src/duplicacy_utils_linux.go
Normal file
13
src/duplicacy_utils_linux.go
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
)
|
||||||
|
|
||||||
|
func excludedByAttribute(attirbutes map[string][]byte) bool {
|
||||||
|
_, ok := attirbutes["duplicacy_exclude"]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
95
src/duplicacy_utils_others.go
Normal file
95
src/duplicacy_utils_others.go
Normal file
@@ -0,0 +1,95 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/pkg/xattr"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Readlink(path string) (isRegular bool, s string, err error) {
|
||||||
|
s, err = os.Readlink(path)
|
||||||
|
return false, s, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetOwner(entry *Entry, fileInfo *os.FileInfo) {
|
||||||
|
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
|
||||||
|
if ok && stat != nil {
|
||||||
|
entry.UID = int(stat.Uid)
|
||||||
|
entry.GID = int(stat.Gid)
|
||||||
|
} else {
|
||||||
|
entry.UID = -1
|
||||||
|
entry.GID = -1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
|
||||||
|
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
|
||||||
|
if ok && stat != nil && (int(stat.Uid) != entry.UID || int(stat.Gid) != entry.GID) {
|
||||||
|
if entry.UID != -1 && entry.GID != -1 {
|
||||||
|
err := os.Lchown(fullPath, entry.UID, entry.GID)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RESTORE_CHOWN", "Failed to change uid or gid: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) ReadAttributes(top string) {
|
||||||
|
|
||||||
|
fullPath := filepath.Join(top, entry.Path)
|
||||||
|
attributes, _ := xattr.List(fullPath)
|
||||||
|
if len(attributes) > 0 {
|
||||||
|
entry.Attributes = make(map[string][]byte)
|
||||||
|
for _, name := range attributes {
|
||||||
|
attribute, err := xattr.Get(fullPath, name)
|
||||||
|
if err == nil {
|
||||||
|
entry.Attributes[name] = attribute
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) SetAttributesToFile(fullPath string) {
|
||||||
|
names, _ := xattr.List(fullPath)
|
||||||
|
|
||||||
|
for _, name := range names {
|
||||||
|
|
||||||
|
|
||||||
|
newAttribute, found := entry.Attributes[name]
|
||||||
|
if found {
|
||||||
|
oldAttribute, _ := xattr.Get(fullPath, name)
|
||||||
|
if !bytes.Equal(oldAttribute, newAttribute) {
|
||||||
|
xattr.Set(fullPath, name, newAttribute)
|
||||||
|
}
|
||||||
|
delete(entry.Attributes, name)
|
||||||
|
} else {
|
||||||
|
xattr.Remove(fullPath, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, attribute := range entry.Attributes {
|
||||||
|
xattr.Set(fullPath, name, attribute)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func joinPath(components ...string) string {
|
||||||
|
return path.Join(components...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func SplitDir(fullPath string) (dir string, file string) {
|
||||||
|
return path.Split(fullPath)
|
||||||
|
}
|
||||||
149
src/duplicacy_utils_test.go
Normal file
149
src/duplicacy_utils_test.go
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
crypto_rand "crypto/rand"
|
||||||
|
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMatchPattern(t *testing.T) {
|
||||||
|
|
||||||
|
// Test cases were copied from Matching Wildcards: An Empirical Way to Tame an Algorithm
|
||||||
|
// By Kirk J. Krauss, October 07, 2014
|
||||||
|
|
||||||
|
DATA := []struct {
|
||||||
|
text string
|
||||||
|
pattern string
|
||||||
|
matched bool
|
||||||
|
}{
|
||||||
|
// Cases with repeating character sequences.
|
||||||
|
{"abcccd", "*ccd", true},
|
||||||
|
{"mississipissippi", "*issip*ss*", true},
|
||||||
|
{"xxxx*zzzzzzzzy*f", "xxxx*zzy*fffff", false},
|
||||||
|
{"xxxx*zzzzzzzzy*f", "xxx*zzy*f", true},
|
||||||
|
{"xxxxzzzzzzzzyf", "xxxx*zzy*fffff", false},
|
||||||
|
{"xxxxzzzzzzzzyf", "xxxx*zzy*f", true},
|
||||||
|
{"xyxyxyzyxyz", "xy*z*xyz", true},
|
||||||
|
{"mississippi", "*sip*", true},
|
||||||
|
{"xyxyxyxyz", "xy*xyz", true},
|
||||||
|
{"mississippi", "mi*sip*", true},
|
||||||
|
{"ababac", "*abac*", true},
|
||||||
|
{"ababac", "*abac*", true},
|
||||||
|
{"aaazz", "a*zz*", true},
|
||||||
|
{"a12b12", "*12*23", false},
|
||||||
|
{"a12b12", "a12b", false},
|
||||||
|
{"a12b12", "*12*12*", true},
|
||||||
|
|
||||||
|
// More double wildcard scenarios.
|
||||||
|
{"XYXYXYZYXYz", "XY*Z*XYz", true},
|
||||||
|
{"missisSIPpi", "*SIP*", true},
|
||||||
|
{"mississipPI", "*issip*PI", true},
|
||||||
|
{"xyxyxyxyz", "xy*xyz", true},
|
||||||
|
{"miSsissippi", "mi*sip*", true},
|
||||||
|
{"miSsissippi", "mi*Sip*", false},
|
||||||
|
{"abAbac", "*Abac*", true},
|
||||||
|
{"abAbac", "*Abac*", true},
|
||||||
|
{"aAazz", "a*zz*", true},
|
||||||
|
{"A12b12", "*12*23", false},
|
||||||
|
{"a12B12", "*12*12*", true},
|
||||||
|
{"oWn", "*oWn*", true},
|
||||||
|
|
||||||
|
// Completely tame (no wildcards) cases.
|
||||||
|
{"bLah", "bLah", true},
|
||||||
|
{"bLah", "bLaH", false},
|
||||||
|
|
||||||
|
// Simple mixed wildcard tests suggested by IBMer Marlin Deckert.
|
||||||
|
{"a", "*?", true},
|
||||||
|
{"ab", "*?", true},
|
||||||
|
{"abc", "*?", true},
|
||||||
|
|
||||||
|
// More mixed wildcard tests including coverage for false positives.
|
||||||
|
{"a", "??", false},
|
||||||
|
{"ab", "?*?", true},
|
||||||
|
{"ab", "*?*?*", true},
|
||||||
|
{"abc", "?*?*?", true},
|
||||||
|
{"abc", "?*?*&?", false},
|
||||||
|
{"abcd", "?b*??", true},
|
||||||
|
{"abcd", "?a*??", false},
|
||||||
|
{"abcd", "?*?c?", true},
|
||||||
|
{"abcd", "?*?d?", false},
|
||||||
|
{"abcde", "?*b*?*d*?", true},
|
||||||
|
|
||||||
|
// Single-character-match cases.
|
||||||
|
{"bLah", "bL?h", true},
|
||||||
|
{"bLaaa", "bLa?", false},
|
||||||
|
{"bLah", "bLa?", true},
|
||||||
|
{"bLaH", "?Lah", false},
|
||||||
|
{"bLaH", "?LaH", true},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, data := range DATA {
|
||||||
|
if matchPattern(data.text, data.pattern) != data.matched {
|
||||||
|
t.Errorf("text: %s, pattern %s, expected: %t", data.text, data.pattern, data.matched)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pattern := range []string{ "+", "-", "i:", "e:", "+a", "-a", "i:a", "e:a"} {
|
||||||
|
if IsUnspecifiedFilter(pattern) {
|
||||||
|
t.Errorf("pattern %s has a specified filter", pattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pattern := range []string{ "i", "e", "ia", "ib", "a", "b"} {
|
||||||
|
if !IsUnspecifiedFilter(pattern) {
|
||||||
|
t.Errorf("pattern %s does not have a specified filter", pattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRateLimit(t *testing.T) {
|
||||||
|
content := make([]byte, 100*1024)
|
||||||
|
_, err := crypto_rand.Read(content)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error generating random content: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedRate := 10
|
||||||
|
rateLimiter := CreateRateLimitedReader(content, expectedRate)
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
n, err := io.Copy(ioutil.Discard, rateLimiter)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error reading from the rate limited reader: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if int(n) != len(content) {
|
||||||
|
t.Errorf("Wrote %d bytes instead of %d", n, len(content))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
elapsed := time.Since(startTime)
|
||||||
|
actualRate := float64(len(content)) / elapsed.Seconds() / 1024
|
||||||
|
t.Logf("Elapsed time: %s, actual rate: %.3f kB/s, expected rate: %d kB/s", elapsed, actualRate, expectedRate)
|
||||||
|
|
||||||
|
startTime = time.Now()
|
||||||
|
n, err = RateLimitedCopy(ioutil.Discard, bytes.NewBuffer(content), expectedRate)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error writing with rate limit: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if int(n) != len(content) {
|
||||||
|
t.Errorf("Copied %d bytes instead of %d", n, len(content))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
elapsed = time.Since(startTime)
|
||||||
|
actualRate = float64(len(content)) / elapsed.Seconds() / 1024
|
||||||
|
t.Logf("Elapsed time: %s, actual rate: %.3f kB/s, expected rate: %d kB/s", elapsed, actualRate, expectedRate)
|
||||||
|
|
||||||
|
}
|
||||||
137
src/duplicacy_utils_windows.go
Normal file
137
src/duplicacy_utils_windows.go
Normal file
@@ -0,0 +1,137 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
type symbolicLinkReparseBuffer struct {
|
||||||
|
SubstituteNameOffset uint16
|
||||||
|
SubstituteNameLength uint16
|
||||||
|
PrintNameOffset uint16
|
||||||
|
PrintNameLength uint16
|
||||||
|
Flags uint32
|
||||||
|
PathBuffer [1]uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
type mountPointReparseBuffer struct {
|
||||||
|
SubstituteNameOffset uint16
|
||||||
|
SubstituteNameLength uint16
|
||||||
|
PrintNameOffset uint16
|
||||||
|
PrintNameLength uint16
|
||||||
|
PathBuffer [1]uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
type reparseDataBuffer struct {
|
||||||
|
ReparseTag uint32
|
||||||
|
ReparseDataLength uint16
|
||||||
|
Reserved uint16
|
||||||
|
|
||||||
|
// GenericReparseBuffer
|
||||||
|
reparseBuffer byte
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
FSCTL_GET_REPARSE_POINT = 0x900A8
|
||||||
|
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024
|
||||||
|
IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
|
||||||
|
IO_REPARSE_TAG_SYMLINK = 0xA000000C
|
||||||
|
IO_REPARSE_TAG_DEDUP = 0x80000013
|
||||||
|
SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1
|
||||||
|
|
||||||
|
FILE_READ_ATTRIBUTES = 0x0080
|
||||||
|
)
|
||||||
|
|
||||||
|
// We copied golang source code for Readlink but made a simple modification here: use FILE_READ_ATTRIBUTES instead of
|
||||||
|
// GENERIC_READ to read the symlink, because the latter would cause a Access Denied error on links such as
|
||||||
|
// C:\Documents and Settings
|
||||||
|
|
||||||
|
// Readlink returns the destination of the named symbolic link.
|
||||||
|
func Readlink(path string) (isRegular bool, s string, err error) {
|
||||||
|
fd, err := syscall.CreateFile(syscall.StringToUTF16Ptr(path), FILE_READ_ATTRIBUTES,
|
||||||
|
syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING,
|
||||||
|
syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||||
|
if err != nil {
|
||||||
|
return false, "", err
|
||||||
|
}
|
||||||
|
defer syscall.CloseHandle(fd)
|
||||||
|
|
||||||
|
rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
|
||||||
|
var bytesReturned uint32
|
||||||
|
err = syscall.DeviceIoControl(fd, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0],
|
||||||
|
uint32(len(rdbbuf)), &bytesReturned, nil)
|
||||||
|
if err != nil {
|
||||||
|
return false, "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0]))
|
||||||
|
switch rdb.ReparseTag {
|
||||||
|
case IO_REPARSE_TAG_SYMLINK:
|
||||||
|
data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
|
||||||
|
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
|
||||||
|
if data.PrintNameLength > 0 {
|
||||||
|
s = syscall.UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength+data.PrintNameOffset)/2])
|
||||||
|
} else {
|
||||||
|
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameLength+data.SubstituteNameOffset)/2])
|
||||||
|
}
|
||||||
|
case IO_REPARSE_TAG_MOUNT_POINT:
|
||||||
|
data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
|
||||||
|
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
|
||||||
|
if data.PrintNameLength > 0 {
|
||||||
|
s = syscall.UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength+data.PrintNameOffset)/2])
|
||||||
|
} else {
|
||||||
|
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameLength+data.SubstituteNameOffset)/2])
|
||||||
|
}
|
||||||
|
case IO_REPARSE_TAG_DEDUP:
|
||||||
|
return true, "", nil
|
||||||
|
default:
|
||||||
|
// the path is not a symlink or junction but another type of reparse
|
||||||
|
// point
|
||||||
|
return false, "", fmt.Errorf("Unhandled reparse point type %x", rdb.ReparseTag)
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetOwner(entry *Entry, fileInfo *os.FileInfo) {
|
||||||
|
entry.UID = -1
|
||||||
|
entry.GID = -1
|
||||||
|
}
|
||||||
|
|
||||||
|
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) ReadAttributes(top string) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) SetAttributesToFile(fullPath string) {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func joinPath(components ...string) string {
|
||||||
|
|
||||||
|
combinedPath := `\\?\` + filepath.Join(components...)
|
||||||
|
// If the path is on a samba drive we must use the UNC format
|
||||||
|
if strings.HasPrefix(combinedPath, `\\?\\\`) {
|
||||||
|
combinedPath = `\\?\UNC\` + combinedPath[6:]
|
||||||
|
}
|
||||||
|
return combinedPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func SplitDir(fullPath string) (dir string, file string) {
|
||||||
|
i := strings.LastIndex(fullPath, "\\")
|
||||||
|
return fullPath[:i+1], fullPath[i+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
func excludedByAttribute(attirbutes map[string][]byte) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
192
src/duplicacy_wasabistorage.go
Normal file
192
src/duplicacy_wasabistorage.go
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
//
|
||||||
|
// Storage module for Wasabi (https://www.wasabi.com)
|
||||||
|
//
|
||||||
|
|
||||||
|
// Wasabi is nominally compatible with AWS S3, but the copy-and-delete
|
||||||
|
// method used for renaming objects creates additional expense under
|
||||||
|
// Wasabi's billing system. This module is a pass-through to the
|
||||||
|
// existing S3 module for everything other than that one operation.
|
||||||
|
//
|
||||||
|
// This module copyright 2017 Mark Feit (https://github.com/markfeit)
|
||||||
|
// and may be distributed under the same terms as Duplicacy.
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/hmac"
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WasabiStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
s3 *S3Storage
|
||||||
|
region string
|
||||||
|
endpoint string
|
||||||
|
bucket string
|
||||||
|
storageDir string
|
||||||
|
key string
|
||||||
|
secret string
|
||||||
|
client *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// See the Storage interface in duplicacy_storage.go for function
|
||||||
|
// descriptions.
|
||||||
|
|
||||||
|
func CreateWasabiStorage(
|
||||||
|
regionName string, endpoint string,
|
||||||
|
bucketName string, storageDir string,
|
||||||
|
accessKey string, secretKey string,
|
||||||
|
threads int,
|
||||||
|
) (storage *WasabiStorage, err error) {
|
||||||
|
|
||||||
|
s3storage, error := CreateS3Storage(regionName, endpoint, bucketName,
|
||||||
|
storageDir, accessKey, secretKey, threads,
|
||||||
|
true, // isSSLSupported
|
||||||
|
false, // isMinioCompatible
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, error
|
||||||
|
}
|
||||||
|
|
||||||
|
wasabi := &WasabiStorage{
|
||||||
|
|
||||||
|
// Pass-through to existing S3 module
|
||||||
|
s3: s3storage,
|
||||||
|
|
||||||
|
// Local copies required for renaming
|
||||||
|
region: regionName,
|
||||||
|
endpoint: endpoint,
|
||||||
|
bucket: bucketName,
|
||||||
|
storageDir: storageDir,
|
||||||
|
key: accessKey,
|
||||||
|
secret: secretKey,
|
||||||
|
client: &http.Client{},
|
||||||
|
}
|
||||||
|
|
||||||
|
wasabi.DerivedStorage = wasabi
|
||||||
|
wasabi.SetDefaultNestingLevels([]int{0}, 0)
|
||||||
|
|
||||||
|
return wasabi, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WasabiStorage) ListFiles(
|
||||||
|
threadIndex int, dir string,
|
||||||
|
) (files []string, sizes []int64, err error) {
|
||||||
|
return storage.s3.ListFiles(threadIndex, dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WasabiStorage) DeleteFile(
|
||||||
|
threadIndex int, filePath string,
|
||||||
|
) (err error) {
|
||||||
|
return storage.s3.DeleteFile(threadIndex, filePath)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is a lightweight implementation of a call to Wasabi for a
|
||||||
|
// rename. It's designed to get the job done with as few dependencies
|
||||||
|
// on other packages as possible rather than being somethng
|
||||||
|
// general-purpose and reusable.
|
||||||
|
func (storage *WasabiStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
|
||||||
|
var fromPath string
|
||||||
|
// The from path includes the bucket. Take care not to include an empty storageDir
|
||||||
|
// string as Wasabi's backend will return 404 on URLs with double slashes.
|
||||||
|
if storage.storageDir == "" {
|
||||||
|
fromPath = fmt.Sprintf("/%s/%s", storage.bucket, from)
|
||||||
|
} else {
|
||||||
|
fromPath = fmt.Sprintf("/%s/%s/%s", storage.bucket, storage.storageDir, from)
|
||||||
|
}
|
||||||
|
|
||||||
|
object := fmt.Sprintf("https://%s@%s%s", storage.region, storage.endpoint, fromPath)
|
||||||
|
|
||||||
|
toPath := to
|
||||||
|
// The object's new name is relative to the top of the bucket.
|
||||||
|
if storage.storageDir != "" {
|
||||||
|
toPath = fmt.Sprintf("%s/%s", storage.storageDir, to)
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamp := time.Now().Format(time.RFC1123Z)
|
||||||
|
|
||||||
|
signingString := fmt.Sprintf("MOVE\n\n\n%s\n%s", timestamp, fromPath)
|
||||||
|
|
||||||
|
signer := hmac.New(sha1.New, []byte(storage.secret))
|
||||||
|
signer.Write([]byte(signingString))
|
||||||
|
|
||||||
|
signature := base64.StdEncoding.EncodeToString(signer.Sum(nil))
|
||||||
|
|
||||||
|
authorization := fmt.Sprintf("AWS %s:%s", storage.key, signature)
|
||||||
|
|
||||||
|
request, err := http.NewRequest("MOVE", object, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
request.Header.Add("Authorization", authorization)
|
||||||
|
request.Header.Add("Date", timestamp)
|
||||||
|
request.Header.Add("Destination", toPath)
|
||||||
|
request.Header.Add("Host", storage.endpoint)
|
||||||
|
request.Header.Add("Overwrite", "true")
|
||||||
|
|
||||||
|
response, err := storage.client.Do(request)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
if response.StatusCode != 200 {
|
||||||
|
return errors.New(response.Status)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WasabiStorage) CreateDirectory(
|
||||||
|
threadIndex int, dir string,
|
||||||
|
) (err error) {
|
||||||
|
return storage.s3.CreateDirectory(threadIndex, dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WasabiStorage) GetFileInfo(
|
||||||
|
threadIndex int, filePath string,
|
||||||
|
) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
return storage.s3.GetFileInfo(threadIndex, filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WasabiStorage) DownloadFile(
|
||||||
|
threadIndex int, filePath string, chunk *Chunk,
|
||||||
|
) (err error) {
|
||||||
|
return storage.s3.DownloadFile(threadIndex, filePath, chunk)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WasabiStorage) UploadFile(
|
||||||
|
threadIndex int, filePath string, content []byte,
|
||||||
|
) (err error) {
|
||||||
|
return storage.s3.UploadFile(threadIndex, filePath, content)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WasabiStorage) IsCacheNeeded() bool {
|
||||||
|
return storage.s3.IsCacheNeeded()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WasabiStorage) IsMoveFileImplemented() bool {
|
||||||
|
// This is implemented locally since S3 does a copy and delete
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WasabiStorage) IsStrongConsistent() bool {
|
||||||
|
// Wasabi has it, S3 doesn't.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WasabiStorage) IsFastListing() bool {
|
||||||
|
return storage.s3.IsFastListing()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WasabiStorage) EnableTestMode() {
|
||||||
|
}
|
||||||
485
src/duplicacy_webdavstorage.go
Normal file
485
src/duplicacy_webdavstorage.go
Normal file
@@ -0,0 +1,485 @@
|
|||||||
|
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||||
|
// Free for personal use and commercial trial
|
||||||
|
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// This storage backend is based on the work by Yuri Karamani from https://github.com/karamani/webdavclnt,
|
||||||
|
// released under the MIT license.
|
||||||
|
//
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/xml"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
//"net/http/httputil"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
"io/ioutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
type WebDAVStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
host string
|
||||||
|
port int
|
||||||
|
username string
|
||||||
|
password string
|
||||||
|
storageDir string
|
||||||
|
useHTTP bool
|
||||||
|
|
||||||
|
client *http.Client
|
||||||
|
threads int
|
||||||
|
directoryCache map[string]int // stores directories known to exist by this backend
|
||||||
|
directoryCacheLock sync.Mutex // lock for accessing directoryCache
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errWebDAVAuthorizationFailure = errors.New("Authentication failed")
|
||||||
|
errWebDAVMovedPermanently = errors.New("Moved permanently")
|
||||||
|
errWebDAVNotExist = errors.New("Path does not exist")
|
||||||
|
errWebDAVMaximumBackoff = errors.New("Maximum backoff reached")
|
||||||
|
errWebDAVMethodNotAllowed = errors.New("Method not allowed")
|
||||||
|
)
|
||||||
|
|
||||||
|
func CreateWebDAVStorage(host string, port int, username string, password string, storageDir string, useHTTP bool, threads int) (storage *WebDAVStorage, err error) {
|
||||||
|
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||||
|
storageDir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &WebDAVStorage{
|
||||||
|
host: host,
|
||||||
|
port: port,
|
||||||
|
username: username,
|
||||||
|
password: password,
|
||||||
|
storageDir: "",
|
||||||
|
useHTTP: useHTTP,
|
||||||
|
|
||||||
|
client: http.DefaultClient,
|
||||||
|
threads: threads,
|
||||||
|
directoryCache: make(map[string]int),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure it doesn't follow redirect
|
||||||
|
storage.client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||||
|
return http.ErrUseLastResponse
|
||||||
|
}
|
||||||
|
|
||||||
|
exist, isDir, _, err := storage.GetFileInfo(0, storageDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !exist {
|
||||||
|
return nil, fmt.Errorf("Storage path %s does not exist", storageDir)
|
||||||
|
}
|
||||||
|
if !isDir {
|
||||||
|
return nil, fmt.Errorf("Storage path %s is not a directory", storageDir)
|
||||||
|
}
|
||||||
|
storage.storageDir = storageDir
|
||||||
|
|
||||||
|
for _, dir := range []string{"snapshots", "chunks"} {
|
||||||
|
storage.CreateDirectory(0, dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WebDAVStorage) createConnectionString(uri string) string {
|
||||||
|
|
||||||
|
url := storage.host
|
||||||
|
|
||||||
|
if storage.useHTTP {
|
||||||
|
url = "http://" + url
|
||||||
|
} else {
|
||||||
|
url = "https://" + url
|
||||||
|
}
|
||||||
|
|
||||||
|
if storage.port > 0 {
|
||||||
|
url += fmt.Sprintf(":%d", storage.port)
|
||||||
|
}
|
||||||
|
return url + "/" + storage.storageDir + uri
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WebDAVStorage) retry(backoff int) int {
|
||||||
|
delay := rand.Intn(backoff*500) + backoff*500
|
||||||
|
time.Sleep(time.Duration(delay) * time.Millisecond)
|
||||||
|
backoff *= 2
|
||||||
|
return backoff
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WebDAVStorage) sendRequest(method string, uri string, depth int, data []byte) (io.ReadCloser, http.Header, error) {
|
||||||
|
|
||||||
|
backoff := 1
|
||||||
|
for i := 0; i < 8; i++ {
|
||||||
|
|
||||||
|
var dataReader io.Reader
|
||||||
|
headers := make(map[string]string)
|
||||||
|
if method == "PROPFIND" {
|
||||||
|
headers["Content-Type"] = "application/xml"
|
||||||
|
headers["Depth"] = fmt.Sprintf("%d", depth)
|
||||||
|
dataReader = bytes.NewReader(data)
|
||||||
|
} else if method == "PUT" {
|
||||||
|
headers["Content-Type"] = "application/octet-stream"
|
||||||
|
headers["Content-Length"] = fmt.Sprintf("%d", len(data))
|
||||||
|
if storage.UploadRateLimit <= 0 {
|
||||||
|
dataReader = bytes.NewReader(data)
|
||||||
|
} else {
|
||||||
|
dataReader = CreateRateLimitedReader(data, storage.UploadRateLimit/storage.threads)
|
||||||
|
}
|
||||||
|
} else if method == "MOVE" {
|
||||||
|
headers["Destination"] = storage.createConnectionString(string(data))
|
||||||
|
headers["Content-Type"] = "application/octet-stream"
|
||||||
|
dataReader = bytes.NewReader([]byte(""))
|
||||||
|
} else {
|
||||||
|
headers["Content-Type"] = "application/octet-stream"
|
||||||
|
dataReader = bytes.NewReader(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
request, err := http.NewRequest(method, storage.createConnectionString(uri), dataReader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(storage.username) > 0 {
|
||||||
|
request.SetBasicAuth(storage.username, storage.password)
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, value := range headers {
|
||||||
|
request.Header.Set(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
if method == "PUT" {
|
||||||
|
request.ContentLength = int64(len(data))
|
||||||
|
}
|
||||||
|
|
||||||
|
//requestDump, err := httputil.DumpRequest(request, true)
|
||||||
|
//LOG_INFO("debug", "Request: %s", requestDump)
|
||||||
|
|
||||||
|
response, err := storage.client.Do(request)
|
||||||
|
if err != nil {
|
||||||
|
LOG_TRACE("WEBDAV_ERROR", "URL request '%s %s' returned an error (%v)", method, uri, err)
|
||||||
|
backoff = storage.retry(backoff)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode < 300 {
|
||||||
|
return response.Body, response.Header, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
io.Copy(ioutil.Discard, response.Body)
|
||||||
|
response.Body.Close()
|
||||||
|
|
||||||
|
if response.StatusCode == 301 {
|
||||||
|
return nil, nil, errWebDAVMovedPermanently
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode == 404 {
|
||||||
|
// Retry if it is UPLOAD, otherwise return immediately
|
||||||
|
if method != "PUT" {
|
||||||
|
return nil, nil, errWebDAVNotExist
|
||||||
|
}
|
||||||
|
} else if response.StatusCode == 405 {
|
||||||
|
return nil, nil, errWebDAVMethodNotAllowed
|
||||||
|
}
|
||||||
|
LOG_INFO("WEBDAV_RETRY", "URL request '%s %s' returned status code %d", method, uri, response.StatusCode)
|
||||||
|
backoff = storage.retry(backoff)
|
||||||
|
}
|
||||||
|
return nil, nil, errWebDAVMaximumBackoff
|
||||||
|
}
|
||||||
|
|
||||||
|
type WebDAVProperties map[string]string
|
||||||
|
|
||||||
|
type WebDAVPropValue struct {
|
||||||
|
XMLName xml.Name `xml:""`
|
||||||
|
Value string `xml:",innerxml"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type WebDAVProp struct {
|
||||||
|
PropList []WebDAVPropValue `xml:",any"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type WebDAVPropStat struct {
|
||||||
|
Prop *WebDAVProp `xml:"prop"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type WebDAVResponse struct {
|
||||||
|
Href string `xml:"href"`
|
||||||
|
PropStat *WebDAVPropStat `xml:"propstat"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type WebDAVMultiStatus struct {
|
||||||
|
Responses []WebDAVResponse `xml:"response"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *WebDAVStorage) getProperties(uri string, depth int, properties ...string) (map[string]WebDAVProperties, error) {
|
||||||
|
|
||||||
|
maxTries := 3
|
||||||
|
for tries := 0; ; tries++ {
|
||||||
|
propfind := "<prop>"
|
||||||
|
for _, p := range properties {
|
||||||
|
propfind += fmt.Sprintf("<%s/>", p)
|
||||||
|
}
|
||||||
|
propfind += "</prop>"
|
||||||
|
|
||||||
|
body := fmt.Sprintf(`<?xml version="1.0" encoding="utf-8" ?><propfind xmlns="DAV:">%s</propfind>`, propfind)
|
||||||
|
|
||||||
|
readCloser, _, err := storage.sendRequest("PROPFIND", uri, depth, []byte(body))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
object := WebDAVMultiStatus{}
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&object)
|
||||||
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "unexpected EOF") && tries < maxTries {
|
||||||
|
LOG_WARN("WEBDAV_RETRY", "Retrying on %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if object.Responses == nil || len(object.Responses) == 0 {
|
||||||
|
return nil, errors.New("no WebDAV responses")
|
||||||
|
}
|
||||||
|
|
||||||
|
responses := make(map[string]WebDAVProperties)
|
||||||
|
|
||||||
|
for _, responseTag := range object.Responses {
|
||||||
|
|
||||||
|
if responseTag.PropStat == nil || responseTag.PropStat.Prop == nil || responseTag.PropStat.Prop.PropList == nil {
|
||||||
|
return nil, errors.New("no WebDAV properties")
|
||||||
|
}
|
||||||
|
|
||||||
|
properties := make(WebDAVProperties)
|
||||||
|
for _, prop := range responseTag.PropStat.Prop.PropList {
|
||||||
|
properties[prop.XMLName.Local] = prop.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
responseKey := responseTag.Href
|
||||||
|
responses[responseKey] = properties
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return responses, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
|
||||||
|
// a size of 0. If 'dir' is 'snapshots', only subdirectories will be returned. If 'dir' is 'snapshots/repository_id', then only
|
||||||
|
// files will be returned. If 'dir' is 'chunks', the implementation can return the list either recusively or non-recusively.
|
||||||
|
func (storage *WebDAVStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
if dir[len(dir)-1] != '/' {
|
||||||
|
dir += "/"
|
||||||
|
}
|
||||||
|
properties, err := storage.getProperties(dir, 1, "getcontentlength", "resourcetype")
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
prefixLength := len(storage.storageDir) + len(dir) + 1
|
||||||
|
|
||||||
|
for file, m := range properties {
|
||||||
|
if len(file) <= prefixLength {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
isDir := false
|
||||||
|
size := 0
|
||||||
|
if resourceType, exist := m["resourcetype"]; exist && strings.Contains(resourceType, "collection") {
|
||||||
|
isDir = true
|
||||||
|
} else if length, exist := m["getcontentlength"]; exist {
|
||||||
|
if length == "" {
|
||||||
|
isDir = true
|
||||||
|
} else {
|
||||||
|
size, _ = strconv.Atoi(length)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isDir {
|
||||||
|
if dir != "snapshots/" {
|
||||||
|
files = append(files, file[prefixLength:])
|
||||||
|
sizes = append(sizes, int64(size))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// This is a dir
|
||||||
|
file := file[prefixLength:]
|
||||||
|
if file[len(file)-1] != '/' {
|
||||||
|
file += "/"
|
||||||
|
}
|
||||||
|
files = append(files, file)
|
||||||
|
sizes = append(sizes, int64(0))
|
||||||
|
|
||||||
|
// Add the directory to the directory cache
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
storage.directoryCache[dir + file] = 1
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
|
properties, err := storage.getProperties(filePath, 0, "getcontentlength", "resourcetype")
|
||||||
|
if err != nil {
|
||||||
|
if err == errWebDAVNotExist {
|
||||||
|
return false, false, 0, nil
|
||||||
|
}
|
||||||
|
if err == errWebDAVMovedPermanently {
|
||||||
|
// This must be a directory
|
||||||
|
return true, true, 0, nil
|
||||||
|
}
|
||||||
|
return false, false, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
m, exist := properties["/"+storage.storageDir+filePath]
|
||||||
|
|
||||||
|
// If no properties exist for the given filePath, remove the trailing / from filePath and search again
|
||||||
|
if !exist && filePath != "" && filePath[len(filePath) - 1] == '/' {
|
||||||
|
m, exist = properties["/"+storage.storageDir+filePath[:len(filePath) - 1]]
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exist {
|
||||||
|
return false, false, 0, nil
|
||||||
|
} else if resourceType, exist := m["resourcetype"]; exist && strings.Contains(resourceType, "collection") {
|
||||||
|
return true, true, 0, nil
|
||||||
|
} else if length, exist := m["getcontentlength"]; exist && length != "" {
|
||||||
|
value, _ := strconv.Atoi(length)
|
||||||
|
return true, false, int64(value), nil
|
||||||
|
} else {
|
||||||
|
return true, true, 0, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *WebDAVStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
readCloser, _, err := storage.sendRequest("DELETE", filePath, 0, []byte(""))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
io.Copy(ioutil.Discard, readCloser)
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *WebDAVStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
readCloser, _, err := storage.sendRequest("MOVE", from, 0, []byte(to))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
io.Copy(ioutil.Discard, readCloser)
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createParentDirectory creates the parent directory if it doesn't exist in the cache
|
||||||
|
func (storage *WebDAVStorage) createParentDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
|
||||||
|
found := strings.LastIndex(dir, "/")
|
||||||
|
if found == -1 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
parent := dir[:found]
|
||||||
|
|
||||||
|
return storage.CreateDirectory(threadIndex, parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateDirectory creates a new directory.
|
||||||
|
func (storage *WebDAVStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
for dir != "" && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if dir == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
_, exist := storage.directoryCache[dir]
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
|
|
||||||
|
if exist {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there is an error in creating the parent directory, proceed anyway
|
||||||
|
storage.createParentDirectory(threadIndex, dir)
|
||||||
|
|
||||||
|
readCloser, _, err := storage.sendRequest("MKCOL", dir, 0, []byte(""))
|
||||||
|
if err != nil {
|
||||||
|
if err == errWebDAVMethodNotAllowed || err == errWebDAVMovedPermanently || err == io.EOF {
|
||||||
|
// We simply ignore these errors and assume that the directory already exists
|
||||||
|
LOG_TRACE("WEBDAV_MKDIR", "Can't create directory %s: %v; error ignored", dir, err)
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
storage.directoryCache[dir] = 1
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
io.Copy(ioutil.Discard, readCloser)
|
||||||
|
readCloser.Close()
|
||||||
|
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
storage.directoryCache[dir] = 1
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *WebDAVStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
readCloser, _, err := storage.sendRequest("GET", filePath, 0, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.threads)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *WebDAVStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
|
// If there is an error in creating the parent directory, proceed anyway
|
||||||
|
storage.createParentDirectory(threadIndex, filePath)
|
||||||
|
|
||||||
|
readCloser, _, err := storage.sendRequest("PUT", filePath, 0, content)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
io.Copy(ioutil.Discard, readCloser)
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *WebDAVStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *WebDAVStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *WebDAVStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *WebDAVStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *WebDAVStorage) EnableTestMode() {}
|
||||||
Reference in New Issue
Block a user