mirror of
https://github.com/gilbertchen/duplicacy
synced 2025-12-06 00:03:38 +00:00
Compare commits
157 Commits
download_p
...
v2.7.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7da58c6d49 | ||
|
|
4402be6763 | ||
|
|
3abec4e37a | ||
|
|
dd40d4cd2f | ||
|
|
923e906b7e | ||
|
|
0da55f95ab | ||
|
|
2f407d6af9 | ||
|
|
bb680538ee | ||
|
|
7e372edd68 | ||
|
|
836a785798 | ||
|
|
e0a72efb34 | ||
|
|
d839f26b5a | ||
|
|
6ad698328f | ||
|
|
ace1ba5848 | ||
|
|
04a858b555 | ||
|
|
1fedfd1b1a | ||
|
|
3fd3f6b267 | ||
|
|
e3e3e97046 | ||
|
|
3f29ec2ffb | ||
|
|
947006411b | ||
|
|
6841c989c6 | ||
|
|
d0b3b5dc2e | ||
|
|
73ae3f809e | ||
|
|
67a3103467 | ||
|
|
6ee01a2e74 | ||
|
|
b7d820195a | ||
|
|
16d2c14c5a | ||
|
|
eecbb8fa99 | ||
|
|
97bae5f1a3 | ||
|
|
40243fb043 | ||
|
|
403df1fd06 | ||
|
|
4369bcfc0b | ||
|
|
d2b08aebee | ||
|
|
948994c2b6 | ||
|
|
ca4d004aca | ||
|
|
ce472fe375 | ||
|
|
923a6fbc5b | ||
|
|
670cbcd776 | ||
|
|
fd469bae9e | ||
|
|
acef01770a | ||
|
|
1eb1fb14a8 | ||
|
|
8b489f04eb | ||
|
|
089e19f8e6 | ||
|
|
1da7e2b536 | ||
|
|
ed8b4393be | ||
|
|
5e28dc4911 | ||
|
|
f2f07a120d | ||
|
|
153f6a2d20 | ||
|
|
5d45999077 | ||
|
|
1adcf56890 | ||
|
|
09e3cdfebf | ||
|
|
fe854d469d | ||
|
|
76f1274e13 | ||
|
|
9c3122b814 | ||
|
|
6ca8b8dff0 | ||
|
|
4ae16dec7f | ||
|
|
dae040681d | ||
|
|
51cbf73caa | ||
|
|
835af11334 | ||
|
|
4c3557eb80 | ||
|
|
eebcece9e0 | ||
|
|
8c80470c29 | ||
|
|
bcb889272d | ||
|
|
79d8654a12 | ||
|
|
6bf0d2265c | ||
|
|
749db78a1f | ||
|
|
0a51bd8d1a | ||
|
|
7208adbce2 | ||
|
|
e827662869 | ||
|
|
57dd5ba927 | ||
|
|
01a37b7828 | ||
|
|
57cd20bb84 | ||
|
|
0e970da222 | ||
|
|
e880636502 | ||
|
|
810303ce25 | ||
|
|
ffac83dd80 | ||
|
|
05674871fe | ||
|
|
22d6f3abfc | ||
|
|
d26ffe2cff | ||
|
|
a35f6c27be | ||
|
|
808ae4eb75 | ||
|
|
6699e2f440 | ||
|
|
733b68be2c | ||
|
|
b61906c99e | ||
|
|
a0a07d18cc | ||
|
|
a6ce64e715 | ||
|
|
499b612a0d | ||
|
|
46ce0ba1fb | ||
|
|
cc88abd547 | ||
|
|
e888b6d7e5 | ||
|
|
aa07feeac0 | ||
|
|
d43fe1a282 | ||
|
|
7719bb9f29 | ||
|
|
504d07bd51 | ||
|
|
0abb4099f6 | ||
|
|
694494ea54 | ||
|
|
165152493c | ||
|
|
e02041f4ed | ||
|
|
a99f059b52 | ||
|
|
f022a6f684 | ||
|
|
791c61eecb | ||
|
|
6ad27adaea | ||
|
|
9abfbe1ee0 | ||
|
|
b32c3b2cd5 | ||
|
|
9baafdafa2 | ||
|
|
ca7d927840 | ||
|
|
426110e961 | ||
|
|
0ca9cd476e | ||
|
|
abf9a94fc9 | ||
|
|
9a0d60ca84 | ||
|
|
90833f9d86 | ||
|
|
58387c0951 | ||
|
|
81bb188211 | ||
|
|
5821cad8c5 | ||
|
|
662805fbbd | ||
|
|
fc35ddf7d1 | ||
|
|
6efcd37c5c | ||
|
|
58558b8a2f | ||
|
|
045be3905b | ||
|
|
4da7f7b6f9 | ||
|
|
41668d4bbd | ||
|
|
9d4ac34f4b | ||
|
|
eba5aa6eea | ||
|
|
47c4c25d8b | ||
|
|
37781f9540 | ||
|
|
282fe4edd2 | ||
|
|
33c71ca5f8 | ||
|
|
6e7d45caac | ||
|
|
8e9caea201 | ||
|
|
18ba415f56 | ||
|
|
458687d543 | ||
|
|
57a408a577 | ||
|
|
a73ed462b6 | ||
|
|
e56efc1d3a | ||
|
|
bb58f42a37 | ||
|
|
22e8d9e60a | ||
|
|
4eb174cec5 | ||
|
|
6fd3fbd568 | ||
|
|
a6fe3d785e | ||
|
|
1da151f9d9 | ||
|
|
4b69c1162e | ||
|
|
abcb4d75c1 | ||
|
|
10d2058738 | ||
|
|
43a5ffe011 | ||
|
|
d16273fe2b | ||
|
|
2eb8ea6094 | ||
|
|
a55ac1b7ad | ||
|
|
2b56d576c7 | ||
|
|
82c6c15f1c | ||
|
|
df7487cc0b | ||
|
|
5e8baab4ec | ||
|
|
e1fa39008d | ||
|
|
aaebf4510c | ||
|
|
96dd28995b | ||
|
|
166f6e6266 | ||
|
|
86c89f43a0 | ||
|
|
a1efbe3b73 |
18
.github/ISSUE_TEMPLATE.md
vendored
18
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,5 +1,17 @@
|
||||
Please submit an issue for bug reports or feature requests. If you have any questions please post them on https://forum.duplicacy.com.
|
||||
---
|
||||
name: Please use the official forum
|
||||
about: Please use the official forum instead of Github
|
||||
title: 'Please use the official forum'
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
When you're reporting a bug, please specify the OS, version, command line arguments, or any info that you think is helpful for the diagnosis. If Duplicacy reports an error, please post the program output here.
|
||||
---
|
||||
|
||||
Note that this repository hosts the CLI version of Duplicacy only. If you're reporting anything related to the GUI version, please visit https://forum.duplicacy.com.
|
||||
|
||||
Please **use the [Duplicacy Forum](https://forum.duplicacy.com/)** when reporting bugs, making feature requests, asking for help or simply praising Duplicacy for its ease of use.
|
||||
|
||||
We strongly encourage you to create an account on the forum and use that platform for discussion as there is a higher chance that someone there will talk to you.
|
||||
|
||||
There is a handful of people watching the Github Issues and we are in the process of moving **all** of them to the forum as well. Most likely you will not receive an answer here or it will be very slow and you will be pointed to the forum.
|
||||
|
||||
We have already created a comprehensive [Guide](https://forum.duplicacy.com/t/duplicacy-user-guide/1197), and a [How-To](https://forum.duplicacy.com/c/how-to) category which stores more wisdom than these issues on Github.
|
||||
|
||||
@@ -14,3 +14,4 @@ Duplicacy is based on the following open source projects:
|
||||
|https://github.com/pcwizz/xattr | BSD-2-Clause |
|
||||
|https://github.com/minio/blake2b-simd | Apache-2.0 |
|
||||
|https://github.com/go-ole/go-ole | MIT |
|
||||
https://github.com/ncw/swift | MIT |
|
||||
|
||||
154
Gopkg.lock
generated
154
Gopkg.lock
generated
@@ -7,17 +7,11 @@
|
||||
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
|
||||
version = "v0.16.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = ["version"]
|
||||
revision = "b7fadebe0e7f5c5720986080a01495bd8d27be37"
|
||||
version = "v14.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
|
||||
revision = "0ae36a9e544696de46fdadb7b0d5fb38af48c063"
|
||||
version = "v10.2.0"
|
||||
packages = ["autorest","autorest/adal","autorest/azure","autorest/date","logger","version"]
|
||||
revision = "9bc4033dd347c7f416fca46b2f42a043dc1fbdf6"
|
||||
version = "v10.15.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -27,9 +21,9 @@
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/sts"]
|
||||
revision = "a32b1dcd091264b5dee7b386149b6cc3823395c9"
|
||||
version = "v1.12.31"
|
||||
packages = ["aws","aws/arn","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/processcreds","aws/credentials/stscreds","aws/csm","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/context","internal/ini","internal/s3err","internal/sdkio","internal/sdkmath","internal/sdkrand","internal/sdkuri","internal/shareddefaults","internal/strings","internal/sync/singleflight","private/protocol","private/protocol/eventstream","private/protocol/eventstream/eventstreamapi","private/protocol/json/jsonutil","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/internal/arn","service/sts","service/sts/stsiface"]
|
||||
revision = "851d5ffb66720c2540cc68020d4d8708950686c8"
|
||||
version = "v1.30.7"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/bkaradzic/go-lz4"
|
||||
@@ -40,14 +34,14 @@
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29"
|
||||
version = "v3.1.0"
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/azure-sdk-for-go"
|
||||
packages = ["storage"]
|
||||
revision = "bbf89bd4d716c184f158d1e1428c2dbef4a18307"
|
||||
packages = ["storage","version"]
|
||||
revision = "8fd4663cab7c7c1c46d00449291c92ad23b0d0d9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -56,10 +50,9 @@
|
||||
revision = "1de0a1836ce9c3ae1bf737a0869c4f04f28a7f98"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/go-dropbox"
|
||||
packages = ["."]
|
||||
revision = "90711b603312b1f973f3a5da3793ac4f1e5c2f2a"
|
||||
revision = "0baa9015ac2547d8b69b2e88c709aa90cfb8fbc1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gilbertchen/go-ole"
|
||||
@@ -71,7 +64,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/go.dbus"
|
||||
packages = ["."]
|
||||
revision = "9e442e6378618c083fd3b85b703ffd202721fb17"
|
||||
revision = "8591994fa32f1dbe3fa9486bc6f4d4361ac16649"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -98,33 +91,45 @@
|
||||
revision = "68e7a6806b0137a396d7d05601d7403ae1abac58"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
|
||||
version = "v1.32.0"
|
||||
branch = "master"
|
||||
name = "github.com/golang/groupcache"
|
||||
packages = ["lru"]
|
||||
revision = "8c9f03a8e57eb486e42badaed3fb287da51807ba"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
|
||||
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
||||
revision = "84668698ea25b64748563aa20726db66a6b8d299"
|
||||
version = "v1.3.5"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/googleapis/gax-go"
|
||||
packages = ["."]
|
||||
revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f"
|
||||
version = "v2.0.0"
|
||||
packages = [".","v2"]
|
||||
revision = "c8a15bac9b9fe955bd9f900272f9a306465d28cf"
|
||||
version = "v2.0.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
revision = "0b12d6b5"
|
||||
revision = "c2b33e84"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/klauspost/cpuid"
|
||||
packages = ["."]
|
||||
revision = "750c0591dbbd50ef88371c665ad49e426a4b830b"
|
||||
version = "v1.3.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/klauspost/reedsolomon"
|
||||
packages = ["."]
|
||||
revision = "7daa20bf74337a939c54f892a2eca9d9b578eb7f"
|
||||
version = "v1.9.9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/marstr/guid"
|
||||
@@ -138,23 +143,35 @@
|
||||
packages = ["."]
|
||||
revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/minio/highwayhash"
|
||||
packages = ["."]
|
||||
revision = "86a2a969d04373bf05ca722517d30fb1c9a3e4f9"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/mmcloughlin/avo"
|
||||
packages = ["attr","build","buildtags","gotypes","internal/prnt","internal/stack","ir","operand","pass","printer","reg","src","x86"]
|
||||
revision = "443f81d771042b019379ae4bfcd0a591cb47c88a"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "ae9f0ea1605b9aa6434ed5c731ca35d83ba67c55"
|
||||
revision = "3e1a09f21340e4828e7265aa89f4dc1495fa7ccc"
|
||||
version = "v1.0.50"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
revision = "614d223910a179a466c1767a985424175c39b465"
|
||||
version = "v0.9.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "98203f5a8333288eb3163b7c667d4260fe1333e9"
|
||||
version = "1.0.0"
|
||||
revision = "5616182052227b951e76d9c9b79a616c608bd91b"
|
||||
version = "v1.11.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
@@ -168,63 +185,86 @@
|
||||
packages = ["."]
|
||||
revision = "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
|
||||
|
||||
[[projects]]
|
||||
name = "go.opencensus.io"
|
||||
packages = [".","internal","internal/tagencoding","metric/metricdata","metric/metricproducer","plugin/ochttp","plugin/ochttp/propagation/b3","resource","stats","stats/internal","stats/view","tag","trace","trace/internal","trace/propagation","trace/tracestate"]
|
||||
revision = "d835ff86be02193d324330acdb7d65546b05f814"
|
||||
version = "v0.22.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","pbkdf2","ssh","ssh/agent","ssh/terminal"]
|
||||
revision = "9f005a07e0d31d45e6656d241bb5c0f2efd4bc94"
|
||||
packages = ["blowfish","chacha20","curve25519","ed25519","ed25519/internal/edwards25519","internal/subtle","pbkdf2","poly1305","ssh","ssh/agent","ssh/internal/bcrypt_pbkdf","ssh/terminal"]
|
||||
revision = "056763e48d71961566155f089ac0f02f1dda9b5a"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/mod"
|
||||
packages = ["semver"]
|
||||
revision = "859b3ef565e237f9f1a0fb6b55385c497545680d"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
|
||||
revision = "9dfe39835686865bff950a07b394c12a98ddc811"
|
||||
packages = ["context","context/ctxhttp","http/httpguts","http2","http2/hpack","idna","internal/timeseries","trace"]
|
||||
revision = "d3edc9973b7eb1fb302b0ff2c62357091cea9a30"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28"
|
||||
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix","windows"]
|
||||
revision = "82aafbf43bf885069dc71b7e7c2f9d7a614d47da"
|
||||
packages = ["cpu","unix","windows"]
|
||||
revision = "59c9f1ba88faf592b225274f69c5ef1e4ebacf82"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||
revision = "88f656faf3f37f690df1a32515b479415e1a6769"
|
||||
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/language","internal/language/compact","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
|
||||
version = "v0.3.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/tools"
|
||||
packages = ["go/ast/astutil","go/gcexportdata","go/internal/gcimporter","go/internal/packagesdriver","go/packages","go/types/typeutil","internal/event","internal/event/core","internal/event/keys","internal/event/label","internal/gocommand","internal/packagesinternal","internal/typesinternal"]
|
||||
revision = "5d1fdd8fa3469142b9369713b23d8413d6d83189"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/xerrors"
|
||||
packages = [".","internal"]
|
||||
revision = "5ec99f83aff198f5fbd629d6c8d8eb38a04218ca"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/api"
|
||||
packages = ["drive/v3","gensupport","googleapi","googleapi/internal/uritemplates","googleapi/transport","internal","iterator","option","storage/v1","transport/http"]
|
||||
revision = "17b5f22a248d6d3913171c1a557552ace0d9c806"
|
||||
packages = ["drive/v3","googleapi","googleapi/transport","internal","internal/gensupport","internal/third_party/uritemplates","iterator","option","option/internaloption","storage/v1","transport/cert","transport/http","transport/http/internal/propagation"]
|
||||
revision = "52f0532eadbcc6f6b82d6f5edf66e610d10bfde6"
|
||||
version = "v0.21.0"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
revision = "971852bfffca25b069c31162ae8f247a3dba083b"
|
||||
version = "v1.6.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status"]
|
||||
revision = "891aceb7c239e72692819142dfca057bdcbfcb96"
|
||||
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status","googleapis/type/expr"]
|
||||
revision = "baae70f3302d3efdff74db41e48a5d476d036906"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [".","balancer","balancer/roundrobin","codes","connectivity","credentials","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
|
||||
revision = "5a9f7b402fe85096d2e1d0383435ee1876e863d0"
|
||||
version = "v1.8.0"
|
||||
packages = [".","attributes","backoff","balancer","balancer/base","balancer/roundrobin","binarylog/grpc_binarylog_v1","codes","connectivity","credentials","credentials/internal","encoding","encoding/proto","grpclog","internal","internal/backoff","internal/balancerload","internal/binarylog","internal/buffer","internal/channelz","internal/envconfig","internal/grpclog","internal/grpcrand","internal/grpcsync","internal/grpcutil","internal/resolver/dns","internal/resolver/passthrough","internal/syscall","internal/transport","keepalive","metadata","naming","peer","resolver","serviceconfig","stats","status","tap"]
|
||||
revision = "ac54eec90516cee50fc6b9b113b34628a85f976f"
|
||||
version = "v1.28.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "eff5ae2d9507f0d62cd2e5bdedebb5c59d64f70f476b087c01c35d4a5e1be72d"
|
||||
inputs-digest = "0e6ea2be64dedc36cb9192f1d410917ea72896302011e55b6df5e4c00c1c2f1c"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
14
Gopkg.toml
14
Gopkg.toml
@@ -31,7 +31,7 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
version = "1.12.31"
|
||||
version = "1.30.7"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/bkaradzic/go-lz4"
|
||||
@@ -46,8 +46,8 @@
|
||||
name = "github.com/gilbertchen/cli"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/go-dropbox"
|
||||
revision = "0baa9015ac2547d8b69b2e88c709aa90cfb8fbc1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gilbertchen/go-ole"
|
||||
@@ -75,7 +75,7 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/sftp"
|
||||
version = "1.0.0"
|
||||
version = "1.10.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
@@ -86,9 +86,13 @@
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
version = "0.21.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "1.28.0"
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
Copyright © 2017 Acrosync LLC
|
||||
|
||||
* Free for personal use or commercial trial
|
||||
* Non-trial commercial use requires per-user CLI licenses available from [duplicacy.com](https://duplicacy.com/buy) at a cost of $20 per year
|
||||
* A user is defined as the computer account that creates or edits the files to be backed up; if a backup contains files created or edited by multiple users for commercial purposes, one CLI license is required for each user
|
||||
* Non-trial commercial use requires per-computer CLI licenses available from [duplicacy.com](https://duplicacy.com/buy.html) at a cost of $50 per year
|
||||
* The computer with a valid commercial license for the GUI version may run the CLI version without a CLI license
|
||||
* CLI licenses are not required to restore or manage backups; only the backup command requires valid CLI licenses
|
||||
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
||||
|
||||
15
README.md
15
README.md
@@ -2,7 +2,7 @@
|
||||
|
||||
Duplicacy is a new generation cross-platform cloud backup tool based on the idea of [Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy/wiki/Lock-Free-Deduplication).
|
||||
|
||||
This repository hosts source code, design documents, and binary releases of the command line version of Duplicacy. There is also a Duplicacy GUI frontend built for Windows and Mac OS X available from https://duplicacy.com.
|
||||
This repository hosts source code, design documents, and binary releases of the command line version of Duplicacy. There is also a Web GUI frontend built for Windows, macOS, and Linux, available from https://duplicacy.com.
|
||||
|
||||
There is a special edition of Duplicacy developed for VMware vSphere (ESXi) named [Vertical Backup](https://www.verticalbackup.com) that can back up virtual machine files on ESXi to local drives, network or cloud storages.
|
||||
|
||||
@@ -10,14 +10,15 @@ There is a special edition of Duplicacy developed for VMware vSphere (ESXi) name
|
||||
|
||||
There are 3 core advantages of Duplicacy over any other open-source or commercial backup tools:
|
||||
|
||||
* Duplicacy is the *only* cloud backup tool that allows multiple computers to back up to the same cloud storage, taking advantage of cross-computer deduplication whenever possible, without direct communication among them. This feature literally turns any cloud storage server supporting only a basic set of file operations into a sophisticated deduplication-aware server.
|
||||
* Duplicacy is the *only* cloud backup tool that allows multiple computers to back up to the same cloud storage, taking advantage of cross-computer deduplication whenever possible, without direct communication among them. This feature turns any cloud storage server supporting only a basic set of file operations into a sophisticated deduplication-aware server.
|
||||
|
||||
* Unlike other chunk-based backup tools where chunks are grouped into pack files and a chunk database is used to track which chunks are stored inside each pack file, Duplicacy takes a database-less approach where every chunk is saved independently using its hash as the file name to facilitate quick lookups. The lack of a centralized chunk database not only makes the implementation less error-prone, but also produces a highly maintainable piece of software with plenty of room for development of new features and usability enhancements.
|
||||
* Unlike other chunk-based backup tools where chunks are grouped into pack files and a chunk database is used to track which chunks are stored inside each pack file, Duplicacy takes a database-less approach where every chunk is saved independently using its hash as the file name to facilitate quick lookups. The avoidance of a centralized chunk database not only produces a simpler and less error-prone implementation, but also makes it easier to develop advanced features, such as [Asymmetric Encryption](https://github.com/gilbertchen/duplicacy/wiki/RSA-encryption) for stronger encryption and [Erasure Coding](https://github.com/gilbertchen/duplicacy/wiki/Erasure-coding) for resilient data protection.
|
||||
|
||||
* Duplicacy is fast. While the performance wasn't the top-priority design goal, Duplicacy has been shown to outperform other backup tools by a considerable margin, as indicated by the following results obtained from a [benchmarking experiment](https://github.com/gilbertchen/benchmarking) backing up the [Linux code base](https://github.com/torvalds/linux) using Duplicacy and 3 other open-source backup tools.
|
||||
|
||||
[](https://github.com/gilbertchen/benchmarking)
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
* [A brief introduction](https://github.com/gilbertchen/duplicacy/wiki/Quick-Start)
|
||||
@@ -44,6 +45,7 @@ Duplicacy currently provides the following storage backends:
|
||||
* WebDAV (under beta testing)
|
||||
* pcloud (via WebDAV)
|
||||
* Box.com (via WebDAV)
|
||||
* File Fabric by [Storage Made Easy](https://storagemadeeasy.com/)
|
||||
|
||||
Please consult the [wiki page](https://github.com/gilbertchen/duplicacy/wiki/Storage-Backends) on how to set up Duplicacy to work with each cloud storage.
|
||||
|
||||
@@ -64,9 +66,9 @@ to find the differences from previous backups and only then uploading the differ
|
||||
|
||||
[Duplicati](https://duplicati.com) is one of the first backup tools that adopt the chunk-based approach to split files into chunks which are then uploaded to the storage. The chunk-based approach got the incremental backup model right in the sense that every incremental backup is actually a full snapshot. As Duplicati splits files into fixed-size chunks, deletions or insertions of a few bytes will foil the deduplication. Cloud support is extensive, but multiple clients can't back up to the same storage location.
|
||||
|
||||
[Attic](https://attic-backup.org) has been acclaimed by some as the [Holy Grail of backups](https://www.stavros.io/posts/holy-grail-backups). It follows the same incremental backup model like Duplicati, but embraces the variable-size chunk algorithm for better performance and higher deduplication efficiency (not susceptible to byte insertion and deletion any more). Deletions of old backup is also supported. However, no cloud backends are implemented. Although concurrent backups from multiple clients to the same storage is in theory possible by the use of locking, it is
|
||||
[Attic](https://attic-backup.org) has been acclaimed by some as the [Holy Grail of backups](https://www.stavros.io/posts/holy-grail-backups). It follows the same incremental backup model like Duplicati but embraces the variable-size chunk algorithm for better performance and higher deduplication efficiency (not susceptible to byte insertion and deletion any more). Deletions of old backup are also supported. However, no cloud backends are implemented. Although concurrent backups from multiple clients to the same storage is in theory possible by the use of locking, it is
|
||||
[not recommended](http://librelist.com/browser//attic/2014/11/11/backing-up-multiple-servers-into-a-single-repository/#e96345aa5a3469a87786675d65da492b) by the developer due to chunk indices being kept in a local cache.
|
||||
Concurrent access is not only a convenience; it is a necessity for better deduplication. For instance, if multiple machines with the same OS installed can back up their entire drives to the same storage, only one copy of the system files needs to be stored, greatly reducing the storage space regardless of the number of machines. Attic still adopts the traditional approach of using a centralized indexing database to manage chunks, and relies heavily on caching to improve performance. The presence of exclusive locking makes it hard to be extended to cloud storages.
|
||||
Concurrent access is not only a convenience; it is a necessity for better deduplication. For instance, if multiple machines with the same OS installed can back up their entire drives to the same storage, only one copy of the system files needs to be stored, greatly reducing the storage space regardless of the number of machines. Attic still adopts the traditional approach of using a centralized indexing database to manage chunks and relies heavily on caching to improve performance. The presence of exclusive locking makes it hard to be extended to cloud storages.
|
||||
|
||||
[restic](https://restic.github.io) is a more recent addition. It uses a format similar to the git packfile format. Multiple clients backing up to the same storage are still guarded by
|
||||
[locks](https://github.com/restic/restic/blob/master/doc/Design.md#locks), and because a chunk database is used, deduplication isn't real-time (different clients sharing the same files will upload different copies of the same chunks). A prune operation will completely block all other clients connected to the storage from doing their regular backups. Moreover, since most cloud storage services do not provide a locking service, the best effort is to use some basic file operations to simulate a lock, but distributed locking is known to be a hard problem and it is unclear how reliable restic's lock implementation is. A faulty implementation may cause a prune operation to accidentally delete data still in use, resulting in unrecoverable data loss. This is the exact problem that we avoided by taking the lock-free approach.
|
||||
@@ -90,8 +92,7 @@ The following table compares the feature lists of all these backup tools:
|
||||
## License
|
||||
|
||||
* Free for personal use or commercial trial
|
||||
* Non-trial commercial use requires per-user CLI licenses available from [duplicacy.com](https://duplicacy.com/buy) at a cost of $20 per year
|
||||
* A user is defined as the computer account that creates or edits the files to be backed up; if a backup contains files created or edited by multiple users for commercial purposes, one CLI license is required for each user
|
||||
* Non-trial commercial use requires per-computer CLI licenses available from [duplicacy.com](https://duplicacy.com/buy.html) at a cost of $50 per year
|
||||
* The computer with a valid commercial license for the GUI version may run the CLI version without a CLI license
|
||||
* CLI licenses are not required to restore or manage backups; only the backup command requires valid CLI licenses
|
||||
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
||||
|
||||
@@ -159,6 +159,10 @@ func setGlobalOptions(context *cli.Context) {
|
||||
}()
|
||||
}
|
||||
|
||||
for _, logID := range context.GlobalStringSlice("suppress") {
|
||||
duplicacy.SuppressLog(logID)
|
||||
}
|
||||
|
||||
duplicacy.RunInBackground = context.GlobalBool("background")
|
||||
}
|
||||
|
||||
@@ -201,13 +205,29 @@ func runScript(context *cli.Context, storageName string, phase string) bool {
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
duplicacy.LOG_WARN("SCRIPT_ERROR", "Failed to run script: %v", err)
|
||||
duplicacy.LOG_ERROR("SCRIPT_ERROR", "Failed to run %s script: %v", script, err)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func loadRSAPrivateKey(keyFile string, passphrase string, preference *duplicacy.Preference, backupManager *duplicacy.BackupManager, resetPasswords bool) {
|
||||
if keyFile == "" {
|
||||
return
|
||||
}
|
||||
|
||||
prompt := fmt.Sprintf("Enter the passphrase for %s:", keyFile)
|
||||
if passphrase == "" {
|
||||
passphrase = duplicacy.GetPassword(*preference, "rsa_passphrase", prompt, false, resetPasswords)
|
||||
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
|
||||
duplicacy.SavePassword(*preference, "rsa_passphrase", passphrase)
|
||||
} else {
|
||||
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func initRepository(context *cli.Context) {
|
||||
configRepository(context, true)
|
||||
}
|
||||
@@ -319,6 +339,11 @@ func configRepository(context *cli.Context, init bool) {
|
||||
if preference.Encrypted {
|
||||
prompt := fmt.Sprintf("Enter storage password for %s:", preference.StorageURL)
|
||||
storagePassword = duplicacy.GetPassword(preference, "password", prompt, false, true)
|
||||
} else {
|
||||
if context.String("key") != "" {
|
||||
duplicacy.LOG_ERROR("STORAGE_CONFIG", "RSA encryption can't be enabled with an unencrypted storage")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
existingConfig, _, err := duplicacy.DownloadConfig(storage, storagePassword)
|
||||
@@ -433,8 +458,26 @@ func configRepository(context *cli.Context, init bool) {
|
||||
if iterations == 0 {
|
||||
iterations = duplicacy.CONFIG_DEFAULT_ITERATIONS
|
||||
}
|
||||
|
||||
dataShards := 0
|
||||
parityShards := 0
|
||||
shards := context.String("erasure-coding")
|
||||
if shards != "" {
|
||||
shardsRegex := regexp.MustCompile(`^([0-9]+):([0-9]+)$`)
|
||||
matched := shardsRegex.FindStringSubmatch(shards)
|
||||
if matched == nil {
|
||||
duplicacy.LOG_ERROR("STORAGE_ERASURECODE", "Invalid erasure coding parameters: %s", shards)
|
||||
} else {
|
||||
dataShards, _ = strconv.Atoi(matched[1])
|
||||
parityShards, _ = strconv.Atoi(matched[2])
|
||||
if dataShards == 0 || dataShards > 256 || parityShards == 0 || parityShards > dataShards {
|
||||
duplicacy.LOG_ERROR("STORAGE_ERASURECODE", "Invalid erasure coding parameters: %s", shards)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
duplicacy.ConfigStorage(storage, iterations, compressionLevel, averageChunkSize, maximumChunkSize,
|
||||
minimumChunkSize, storagePassword, otherConfig, bitCopy)
|
||||
minimumChunkSize, storagePassword, otherConfig, bitCopy, context.String("key"), dataShards, parityShards)
|
||||
}
|
||||
|
||||
duplicacy.Preferences = append(duplicacy.Preferences, preference)
|
||||
@@ -532,7 +575,18 @@ func setPreference(context *cli.Context) {
|
||||
newPreference.DoNotSavePassword = triBool.IsTrue()
|
||||
}
|
||||
|
||||
newPreference.NobackupFile = context.String("nobackup-file")
|
||||
if context.String("nobackup-file") != "" {
|
||||
newPreference.NobackupFile = context.String("nobackup-file")
|
||||
}
|
||||
|
||||
if context.String("filters") != "" {
|
||||
newPreference.FiltersFile = context.String("filters")
|
||||
}
|
||||
|
||||
triBool = context.Generic("exclude-by-attribute").(*TriBool)
|
||||
if triBool.IsSet() {
|
||||
newPreference.ExcludeByAttribute = triBool.IsTrue()
|
||||
}
|
||||
|
||||
key := context.String("key")
|
||||
value := context.String("value")
|
||||
@@ -715,7 +769,7 @@ func backupRepository(context *cli.Context) {
|
||||
uploadRateLimit := context.Int("limit-rate")
|
||||
enumOnly := context.Bool("enum-only")
|
||||
storage.SetRateLimits(0, uploadRateLimit)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile, preference.ExcludeByAttribute)
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
@@ -768,6 +822,7 @@ func restoreRepository(context *cli.Context) {
|
||||
setOwner := !context.Bool("ignore-owner")
|
||||
|
||||
showStatistics := context.Bool("stats")
|
||||
persist := context.Bool("persist")
|
||||
|
||||
var patterns []string
|
||||
for _, pattern := range context.Args() {
|
||||
@@ -782,33 +837,27 @@ func restoreRepository(context *cli.Context) {
|
||||
pattern = pattern[1:]
|
||||
}
|
||||
|
||||
if duplicacy.IsUnspecifiedFilter(pattern) {
|
||||
pattern = "+" + pattern
|
||||
}
|
||||
|
||||
if duplicacy.IsEmptyFilter(pattern) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
||||
valid, err := duplicacy.IsValidRegex(pattern[2:])
|
||||
if !valid || err != nil {
|
||||
duplicacy.LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
|
||||
}
|
||||
}
|
||||
|
||||
patterns = append(patterns, pattern)
|
||||
|
||||
}
|
||||
|
||||
patterns = duplicacy.ProcessFilterLines(patterns, make([]string, 0))
|
||||
|
||||
duplicacy.LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(duplicacy.RegexMap))
|
||||
|
||||
duplicacy.LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
|
||||
|
||||
storage.SetRateLimits(context.Int("limit-rate"), 0)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile, preference.ExcludeByAttribute)
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, setOwner, showStatistics, patterns)
|
||||
failed := backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, setOwner, showStatistics, patterns, persist)
|
||||
if failed > 0 {
|
||||
duplicacy.LOG_ERROR("RESTORE_FAIL", "%d file(s) were not restored correctly", failed)
|
||||
return
|
||||
}
|
||||
|
||||
runScript(context, preference.Name, "post")
|
||||
}
|
||||
@@ -844,7 +893,7 @@ func listSnapshots(context *cli.Context) {
|
||||
tag := context.String("t")
|
||||
revisions := getRevisions(context)
|
||||
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", preference.ExcludeByAttribute)
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
id := preference.SnapshotID
|
||||
@@ -857,6 +906,9 @@ func listSnapshots(context *cli.Context) {
|
||||
showFiles := context.Bool("files")
|
||||
showChunks := context.Bool("chunks")
|
||||
|
||||
// list doesn't need to decrypt file chunks; but we need -key here so we can reset the passphrase for the private key
|
||||
loadRSAPrivateKey(context.String("key"), "", preference, backupManager, resetPassword)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
backupManager.SnapshotManager.ListSnapshots(id, revisions, tag, showFiles, showChunks)
|
||||
|
||||
@@ -879,7 +931,12 @@ func checkSnapshots(context *cli.Context) {
|
||||
|
||||
runScript(context, preference.Name, "pre")
|
||||
|
||||
storage := duplicacy.CreateStorage(*preference, false, 1)
|
||||
threads := context.Int("threads")
|
||||
if threads < 1 {
|
||||
threads = 1
|
||||
}
|
||||
|
||||
storage := duplicacy.CreateStorage(*preference, false, threads)
|
||||
if storage == nil {
|
||||
return
|
||||
}
|
||||
@@ -892,9 +949,11 @@ func checkSnapshots(context *cli.Context) {
|
||||
tag := context.String("t")
|
||||
revisions := getRevisions(context)
|
||||
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
|
||||
|
||||
id := preference.SnapshotID
|
||||
if context.Bool("all") {
|
||||
id = ""
|
||||
@@ -905,11 +964,13 @@ func checkSnapshots(context *cli.Context) {
|
||||
showStatistics := context.Bool("stats")
|
||||
showTabular := context.Bool("tabular")
|
||||
checkFiles := context.Bool("files")
|
||||
checkChunks := context.Bool("chunks")
|
||||
searchFossils := context.Bool("fossils")
|
||||
resurrect := context.Bool("resurrect")
|
||||
persist := context.Bool("persist")
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
|
||||
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, checkChunks, searchFossils, resurrect, threads, persist)
|
||||
|
||||
runScript(context, preference.Name, "post")
|
||||
}
|
||||
@@ -947,9 +1008,12 @@ func printFile(context *cli.Context) {
|
||||
snapshotID = context.String("id")
|
||||
}
|
||||
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
|
||||
file := ""
|
||||
@@ -1003,11 +1067,13 @@ func diff(context *cli.Context) {
|
||||
}
|
||||
|
||||
compareByHash := context.Bool("hash")
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile)
|
||||
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile, preference.FiltersFile, preference.ExcludeByAttribute)
|
||||
|
||||
runScript(context, preference.Name, "post")
|
||||
}
|
||||
@@ -1046,7 +1112,7 @@ func showHistory(context *cli.Context) {
|
||||
|
||||
revisions := getRevisions(context)
|
||||
showLocalHash := context.Bool("hash")
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
@@ -1109,7 +1175,7 @@ func pruneSnapshots(context *cli.Context) {
|
||||
os.Exit(ArgumentExitCode)
|
||||
}
|
||||
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
@@ -1129,9 +1195,14 @@ func copySnapshots(context *cli.Context) {
|
||||
os.Exit(ArgumentExitCode)
|
||||
}
|
||||
|
||||
threads := context.Int("threads")
|
||||
if threads < 1 {
|
||||
threads = 1
|
||||
uploadingThreads := context.Int("threads")
|
||||
if uploadingThreads < 1 {
|
||||
uploadingThreads = 1
|
||||
}
|
||||
|
||||
downloadingThreads := context.Int("download-threads")
|
||||
if downloadingThreads < 1 {
|
||||
downloadingThreads = 1
|
||||
}
|
||||
|
||||
repository, source := getRepositoryPreference(context, context.String("from"))
|
||||
@@ -1139,7 +1210,7 @@ func copySnapshots(context *cli.Context) {
|
||||
runScript(context, source.Name, "pre")
|
||||
|
||||
duplicacy.LOG_INFO("STORAGE_SET", "Source storage set to %s", source.StorageURL)
|
||||
sourceStorage := duplicacy.CreateStorage(*source, false, threads)
|
||||
sourceStorage := duplicacy.CreateStorage(*source, false, downloadingThreads)
|
||||
if sourceStorage == nil {
|
||||
return
|
||||
}
|
||||
@@ -1149,10 +1220,12 @@ func copySnapshots(context *cli.Context) {
|
||||
sourcePassword = duplicacy.GetPassword(*source, "password", "Enter source storage password:", false, false)
|
||||
}
|
||||
|
||||
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, source.NobackupFile)
|
||||
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, "", "", false)
|
||||
sourceManager.SetupSnapshotCache(source.Name)
|
||||
duplicacy.SavePassword(*source, "password", sourcePassword)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), source, sourceManager, false)
|
||||
|
||||
_, destination := getRepositoryPreference(context, context.String("to"))
|
||||
|
||||
if destination.Name == source.Name {
|
||||
@@ -1167,7 +1240,7 @@ func copySnapshots(context *cli.Context) {
|
||||
}
|
||||
|
||||
duplicacy.LOG_INFO("STORAGE_SET", "Destination storage set to %s", destination.StorageURL)
|
||||
destinationStorage := duplicacy.CreateStorage(*destination, false, threads)
|
||||
destinationStorage := duplicacy.CreateStorage(*destination, false, uploadingThreads)
|
||||
if destinationStorage == nil {
|
||||
return
|
||||
}
|
||||
@@ -1182,7 +1255,7 @@ func copySnapshots(context *cli.Context) {
|
||||
destinationStorage.SetRateLimits(0, context.Int("upload-limit-rate"))
|
||||
|
||||
destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository,
|
||||
destinationPassword, destination.NobackupFile)
|
||||
destinationPassword, "", "", false)
|
||||
duplicacy.SavePassword(*destination, "password", destinationPassword)
|
||||
destinationManager.SetupSnapshotCache(destination.Name)
|
||||
|
||||
@@ -1192,7 +1265,7 @@ func copySnapshots(context *cli.Context) {
|
||||
snapshotID = context.String("id")
|
||||
}
|
||||
|
||||
sourceManager.CopySnapshots(destinationManager, snapshotID, revisions, threads)
|
||||
sourceManager.CopySnapshots(destinationManager, snapshotID, revisions, uploadingThreads, downloadingThreads)
|
||||
runScript(context, source.Name, "post")
|
||||
}
|
||||
|
||||
@@ -1360,6 +1433,16 @@ func main() {
|
||||
Usage: "initialize a new repository at the specified path rather than the current working directory",
|
||||
Argument: "<path>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA public key to encrypt file chunks",
|
||||
Argument: "<public key>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "erasure-coding",
|
||||
Usage: "enable erasure coding to protect against storage corruption",
|
||||
Argument: "<data shards>:<parity shards>",
|
||||
},
|
||||
},
|
||||
Usage: "Initialize the storage if necessary and the current directory as the repository",
|
||||
ArgsUsage: "<snapshot id> <storage url>",
|
||||
@@ -1467,6 +1550,20 @@ func main() {
|
||||
Usage: "restore from the specified storage instead of the default one",
|
||||
Argument: "<storage name>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA private key to decrypt file chunks",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "persist",
|
||||
Usage: "continue processing despite chunk errors or existing files (without -overwrite), reporting any affected files",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key-passphrase",
|
||||
Usage: "the passphrase to decrypt the RSA private key",
|
||||
Argument: "<private key passphrase>",
|
||||
},
|
||||
},
|
||||
Usage: "Restore the repository to a previously saved snapshot",
|
||||
ArgsUsage: "[--] [pattern] ...",
|
||||
@@ -1512,6 +1609,11 @@ func main() {
|
||||
Usage: "retrieve snapshots from the specified storage",
|
||||
Argument: "<storage name>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA private key to decrypt file chunks",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
},
|
||||
Usage: "List snapshots",
|
||||
ArgsUsage: " ",
|
||||
@@ -1551,6 +1653,10 @@ func main() {
|
||||
Name: "files",
|
||||
Usage: "verify the integrity of every file",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "chunks",
|
||||
Usage: "verify the integrity of every chunk",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "stats",
|
||||
Usage: "show deduplication statistics (imply -all and all revisions)",
|
||||
@@ -1564,6 +1670,26 @@ func main() {
|
||||
Usage: "retrieve snapshots from the specified storage",
|
||||
Argument: "<storage name>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA private key to decrypt file chunks",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key-passphrase",
|
||||
Usage: "the passphrase to decrypt the RSA private key",
|
||||
Argument: "<private key passphrase>",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "threads",
|
||||
Value: 1,
|
||||
Usage: "number of threads used to verify chunks",
|
||||
Argument: "<n>",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "persist",
|
||||
Usage: "continue processing despite chunk errors, reporting any affected (corrupted) files",
|
||||
},
|
||||
},
|
||||
Usage: "Check the integrity of snapshots",
|
||||
ArgsUsage: " ",
|
||||
@@ -1587,6 +1713,16 @@ func main() {
|
||||
Usage: "retrieve the file from the specified storage",
|
||||
Argument: "<storage name>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA private key to decrypt file chunks",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key-passphrase",
|
||||
Usage: "the passphrase to decrypt the RSA private key",
|
||||
Argument: "<private key passphrase>",
|
||||
},
|
||||
},
|
||||
Usage: "Print to stdout the specified file, or the snapshot content if no file is specified",
|
||||
ArgsUsage: "[<file>]",
|
||||
@@ -1615,6 +1751,16 @@ func main() {
|
||||
Usage: "retrieve files from the specified storage",
|
||||
Argument: "<storage name>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA private key to decrypt file chunks",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key-passphrase",
|
||||
Usage: "the passphrase to decrypt the RSA private key",
|
||||
Argument: "<private key passphrase>",
|
||||
},
|
||||
},
|
||||
Usage: "Compare two snapshots or two revisions of a file",
|
||||
ArgsUsage: "[<file>]",
|
||||
@@ -1779,6 +1925,16 @@ func main() {
|
||||
Usage: "specify the path of the repository (instead of the current working directory)",
|
||||
Argument: "<path>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA public key to encrypt file chunks",
|
||||
Argument: "<public key>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "erasure-coding",
|
||||
Usage: "enable erasure coding to protect against storage corruption",
|
||||
Argument: "<data shards>:<parity shards>",
|
||||
},
|
||||
},
|
||||
Usage: "Add an additional storage to be used for the existing repository",
|
||||
ArgsUsage: "<storage name> <snapshot id> <storage url>",
|
||||
@@ -1818,6 +1974,12 @@ func main() {
|
||||
Argument: "<file name>",
|
||||
Value: "",
|
||||
},
|
||||
cli.GenericFlag{
|
||||
Name: "exclude-by-attribute",
|
||||
Usage: "Exclude files based on file attributes. (macOS only, com_apple_backup_excludeItem)",
|
||||
Value: &TriBool{},
|
||||
Arg: "true",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "add a key/password whose value is supplied by the -value option",
|
||||
@@ -1831,6 +1993,11 @@ func main() {
|
||||
Usage: "use the specified storage instead of the default one",
|
||||
Argument: "<storage name>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "filters",
|
||||
Usage: "specify the path of the filters file containing include/exclude patterns",
|
||||
Argument: "<file path>",
|
||||
},
|
||||
},
|
||||
Usage: "Change the options for the default or specified storage",
|
||||
ArgsUsage: " ",
|
||||
@@ -1877,6 +2044,22 @@ func main() {
|
||||
Usage: "number of uploading threads",
|
||||
Argument: "<n>",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "download-threads",
|
||||
Value: 1,
|
||||
Usage: "number of downloading threads",
|
||||
Argument: "<n>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA private key to decrypt file chunks from the source storage",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key-passphrase",
|
||||
Usage: "the passphrase to decrypt the RSA private key",
|
||||
Argument: "<private key passphrase>",
|
||||
},
|
||||
},
|
||||
Usage: "Copy snapshots between compatible storages",
|
||||
ArgsUsage: " ",
|
||||
@@ -1985,13 +2168,18 @@ func main() {
|
||||
Name: "comment",
|
||||
Usage: "add a comment to identify the process",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "suppress, s",
|
||||
Usage: "suppress logs with the specified id",
|
||||
Argument: "<id>",
|
||||
},
|
||||
}
|
||||
|
||||
app.HideVersion = true
|
||||
app.Name = "duplicacy"
|
||||
app.HelpName = "duplicacy"
|
||||
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
||||
app.Version = "2.1.2" + " (" + GitCommit + ")"
|
||||
app.Version = "2.7.1" + " (" + GitCommit + ")"
|
||||
|
||||
// If the program is interrupted, call the RunAtError function.
|
||||
c := make(chan os.Signal, 1)
|
||||
|
||||
@@ -166,9 +166,21 @@ func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chun
|
||||
|
||||
// UploadFile writes 'content' to the file at 'filePath'.
|
||||
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.containers))
|
||||
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||
return blob.CreateBlockBlobFromReader(reader, nil)
|
||||
|
||||
tries := 0
|
||||
|
||||
for {
|
||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.containers))
|
||||
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||
err = blob.CreateBlockBlobFromReader(reader, nil)
|
||||
|
||||
if err == nil || !strings.Contains(err.Error(), "write: broken pipe") || tries >= 3 {
|
||||
return err
|
||||
}
|
||||
|
||||
LOG_INFO("AZURE_RETRY", "Connection unexpectedly terminated: %v; retrying", err)
|
||||
tries++
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -5,19 +5,22 @@
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"fmt"
|
||||
"bytes"
|
||||
"time"
|
||||
"sync"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"net/url"
|
||||
"net/http"
|
||||
"math/rand"
|
||||
"io/ioutil"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/base64"
|
||||
)
|
||||
|
||||
type B2Error struct {
|
||||
@@ -39,67 +42,115 @@ var B2AuthorizationURL = "https://api.backblazeb2.com/b2api/v1/b2_authorize_acco
|
||||
|
||||
type B2Client struct {
|
||||
HTTPClient *http.Client
|
||||
|
||||
AccountID string
|
||||
ApplicationKeyID string
|
||||
ApplicationKey string
|
||||
BucketName string
|
||||
BucketID string
|
||||
StorageDir string
|
||||
|
||||
Lock sync.Mutex
|
||||
AuthorizationToken string
|
||||
APIURL string
|
||||
DownloadURL string
|
||||
BucketName string
|
||||
BucketID string
|
||||
IsAuthorized bool
|
||||
|
||||
UploadURL string
|
||||
UploadToken string
|
||||
UploadURLs []string
|
||||
UploadTokens []string
|
||||
|
||||
TestMode bool
|
||||
Threads int
|
||||
MaximumRetries int
|
||||
TestMode bool
|
||||
|
||||
LastAuthorizationTime int64
|
||||
}
|
||||
|
||||
func NewB2Client(applicationKeyID string, applicationKey string) *B2Client {
|
||||
// URL encode the given path but keep the slashes intact
|
||||
func B2Escape(path string) string {
|
||||
var components []string
|
||||
for _, c := range strings.Split(path, "/") {
|
||||
components = append(components, url.QueryEscape(c))
|
||||
}
|
||||
return strings.Join(components, "/")
|
||||
}
|
||||
|
||||
func NewB2Client(applicationKeyID string, applicationKey string, downloadURL string, storageDir string, threads int) *B2Client {
|
||||
|
||||
for storageDir != "" && storageDir[0] == '/' {
|
||||
storageDir = storageDir[1:]
|
||||
}
|
||||
|
||||
if storageDir != "" && storageDir[len(storageDir) - 1] != '/' {
|
||||
storageDir += "/"
|
||||
}
|
||||
|
||||
maximumRetries := 15
|
||||
if value, found := os.LookupEnv("DUPLICACY_B2_RETRIES"); found && value != "" {
|
||||
maximumRetries, _ = strconv.Atoi(value)
|
||||
LOG_INFO("B2_RETRIES", "Setting maximum retries for B2 to %d", maximumRetries)
|
||||
}
|
||||
|
||||
client := &B2Client{
|
||||
HTTPClient: http.DefaultClient,
|
||||
ApplicationKeyID: applicationKeyID,
|
||||
ApplicationKey: applicationKey,
|
||||
DownloadURL: downloadURL,
|
||||
StorageDir: storageDir,
|
||||
UploadURLs: make([]string, threads),
|
||||
UploadTokens: make([]string, threads),
|
||||
Threads: threads,
|
||||
MaximumRetries: maximumRetries,
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
func (client *B2Client) retry(backoff int, response *http.Response) int {
|
||||
func (client *B2Client) getAPIURL() string {
|
||||
client.Lock.Lock()
|
||||
defer client.Lock.Unlock()
|
||||
return client.APIURL
|
||||
}
|
||||
|
||||
func (client *B2Client) getDownloadURL() string {
|
||||
client.Lock.Lock()
|
||||
defer client.Lock.Unlock()
|
||||
return client.DownloadURL
|
||||
}
|
||||
|
||||
func (client *B2Client) retry(retries int, response *http.Response) int {
|
||||
if response != nil {
|
||||
if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 {
|
||||
retryAfter, _ := strconv.Atoi(backoffList[0])
|
||||
if retryAfter >= 1 {
|
||||
time.Sleep(time.Duration(retryAfter) * time.Second)
|
||||
return 0
|
||||
return 1
|
||||
}
|
||||
}
|
||||
}
|
||||
if backoff == 0 {
|
||||
backoff = 1
|
||||
} else {
|
||||
backoff *= 2
|
||||
|
||||
if retries >= client.MaximumRetries + 1 {
|
||||
return 0
|
||||
}
|
||||
time.Sleep(time.Duration(backoff) * time.Second)
|
||||
return backoff
|
||||
retries++
|
||||
delay := 1 << uint(retries)
|
||||
if delay > 64 {
|
||||
delay = 64
|
||||
}
|
||||
delayInSeconds := (rand.Float32() + 1.0) * float32(delay) / 2.0
|
||||
|
||||
time.Sleep(time.Duration(delayInSeconds) * time.Second)
|
||||
return retries
|
||||
}
|
||||
|
||||
func (client *B2Client) call(url string, method string, requestHeaders map[string]string, input interface{}) (io.ReadCloser, http.Header, int64, error) {
|
||||
|
||||
switch method {
|
||||
case http.MethodGet:
|
||||
break
|
||||
case http.MethodHead:
|
||||
break
|
||||
case http.MethodPost:
|
||||
break
|
||||
default:
|
||||
return nil, nil, 0, fmt.Errorf("unhandled http request method: " + method)
|
||||
}
|
||||
func (client *B2Client) call(threadIndex int, requestURL string, method string, requestHeaders map[string]string, input interface{}) (
|
||||
io.ReadCloser, http.Header, int64, error) {
|
||||
|
||||
var response *http.Response
|
||||
|
||||
backoff := 0
|
||||
for i := 0; i < 8; i++ {
|
||||
var inputReader *bytes.Reader
|
||||
retries := 0
|
||||
for {
|
||||
var inputReader io.Reader
|
||||
isUpload := false
|
||||
|
||||
switch input.(type) {
|
||||
default:
|
||||
@@ -108,21 +159,43 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
inputReader = bytes.NewReader(jsonInput)
|
||||
case []byte:
|
||||
inputReader = bytes.NewReader(input.([]byte))
|
||||
case int:
|
||||
inputReader = bytes.NewReader([]byte(""))
|
||||
case []byte:
|
||||
isUpload = true
|
||||
inputReader = bytes.NewReader(input.([]byte))
|
||||
case *RateLimitedReader:
|
||||
isUpload = true
|
||||
rateLimitedReader := input.(*RateLimitedReader)
|
||||
rateLimitedReader.Reset()
|
||||
inputReader = rateLimitedReader
|
||||
}
|
||||
|
||||
request, err := http.NewRequest(method, url, inputReader)
|
||||
|
||||
if isUpload {
|
||||
if client.UploadURLs[threadIndex] == "" || client.UploadTokens[threadIndex] == "" {
|
||||
err := client.getUploadURL(threadIndex)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
}
|
||||
requestURL = client.UploadURLs[threadIndex]
|
||||
}
|
||||
|
||||
request, err := http.NewRequest(method, requestURL, inputReader)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
if url == B2AuthorizationURL {
|
||||
if requestURL == B2AuthorizationURL {
|
||||
request.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(client.ApplicationKeyID+":"+client.ApplicationKey)))
|
||||
} else if isUpload {
|
||||
request.ContentLength, _ = strconv.ParseInt(requestHeaders["Content-Length"], 10, 64)
|
||||
request.Header.Set("Authorization", client.UploadTokens[threadIndex])
|
||||
} else {
|
||||
client.Lock.Lock()
|
||||
request.Header.Set("Authorization", client.AuthorizationToken)
|
||||
client.Lock.Unlock()
|
||||
}
|
||||
|
||||
if requestHeaders != nil {
|
||||
@@ -133,7 +206,9 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
|
||||
|
||||
if client.TestMode {
|
||||
r := rand.Float32()
|
||||
if r < 0.5 {
|
||||
if r < 0.5 && isUpload {
|
||||
request.Header.Set("X-Bz-Test-Mode", "fail_some_uploads")
|
||||
} else if r < 0.75 {
|
||||
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
|
||||
} else {
|
||||
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
|
||||
@@ -142,28 +217,51 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
|
||||
|
||||
response, err = client.HTTPClient.Do(request)
|
||||
if err != nil {
|
||||
if url != B2AuthorizationURL {
|
||||
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned an error: %v", url, err)
|
||||
backoff = client.retry(backoff, response)
|
||||
continue
|
||||
|
||||
// Don't retry when the first authorization request fails
|
||||
if requestURL == B2AuthorizationURL && !client.IsAuthorized {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
return nil, nil, 0, err
|
||||
|
||||
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s' returned an error: %v", threadIndex, requestURL, err)
|
||||
|
||||
retries = client.retry(retries, response)
|
||||
if retries <= 0 {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
// Clear the upload url to requrest a new one on retry
|
||||
if isUpload {
|
||||
client.UploadURLs[threadIndex] = ""
|
||||
client.UploadTokens[threadIndex] = ""
|
||||
}
|
||||
continue
|
||||
|
||||
}
|
||||
|
||||
if response.StatusCode < 300 {
|
||||
return response.Body, response.Header, response.ContentLength, nil
|
||||
}
|
||||
|
||||
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s %s' returned status code %d", method, url, response.StatusCode)
|
||||
e := &B2Error{}
|
||||
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
|
||||
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s %s' returned status code %d", threadIndex, method, requestURL, response.StatusCode)
|
||||
} else {
|
||||
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s %s' returned %d %s", threadIndex, method, requestURL, response.StatusCode, e.Message)
|
||||
}
|
||||
|
||||
io.Copy(ioutil.Discard, response.Body)
|
||||
response.Body.Close()
|
||||
|
||||
if response.StatusCode == 401 {
|
||||
if url == B2AuthorizationURL {
|
||||
if requestURL == B2AuthorizationURL {
|
||||
return nil, nil, 0, fmt.Errorf("Authorization failure")
|
||||
}
|
||||
client.AuthorizeAccount()
|
||||
continue
|
||||
|
||||
// Attempt authorization again. If authorization is actually not done, run the random backoff
|
||||
_, allowed := client.AuthorizeAccount(threadIndex)
|
||||
if allowed {
|
||||
continue
|
||||
}
|
||||
} else if response.StatusCode == 403 {
|
||||
if !client.TestMode {
|
||||
return nil, nil, 0, fmt.Errorf("B2 cap exceeded")
|
||||
@@ -176,32 +274,21 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
|
||||
} else if response.StatusCode == 416 {
|
||||
if http.MethodHead == method {
|
||||
// 416 Requested Range Not Satisfiable
|
||||
return nil, nil, 0, fmt.Errorf("URL request '%s' returned status code %d", url, response.StatusCode)
|
||||
return nil, nil, 0, fmt.Errorf("URL request '%s' returned %d %s", requestURL, response.StatusCode, e.Message)
|
||||
}
|
||||
} else if response.StatusCode == 429 || response.StatusCode == 408 {
|
||||
backoff = client.retry(backoff, response)
|
||||
continue
|
||||
} else if response.StatusCode >= 500 && response.StatusCode <= 599 {
|
||||
backoff = client.retry(backoff, response)
|
||||
continue
|
||||
} else {
|
||||
LOG_INFO("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
|
||||
backoff = client.retry(backoff, response)
|
||||
continue
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
e := &B2Error{}
|
||||
|
||||
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
|
||||
return nil, nil, 0, err
|
||||
retries = client.retry(retries, response)
|
||||
if retries <= 0 {
|
||||
return nil, nil, 0, fmt.Errorf("URL request '%s' returned %d %s", requestURL, response.StatusCode, e.Message)
|
||||
}
|
||||
|
||||
return nil, nil, 0, e
|
||||
if isUpload {
|
||||
client.UploadURLs[threadIndex] = ""
|
||||
client.UploadTokens[threadIndex] = ""
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil, 0, fmt.Errorf("Maximum backoff reached")
|
||||
}
|
||||
|
||||
type B2AuthorizeAccountOutput struct {
|
||||
@@ -211,11 +298,18 @@ type B2AuthorizeAccountOutput struct {
|
||||
DownloadURL string
|
||||
}
|
||||
|
||||
func (client *B2Client) AuthorizeAccount() (err error) {
|
||||
func (client *B2Client) AuthorizeAccount(threadIndex int) (err error, allowed bool) {
|
||||
client.Lock.Lock()
|
||||
defer client.Lock.Unlock()
|
||||
|
||||
readCloser, _, _, err := client.call(B2AuthorizationURL, http.MethodPost, nil, make(map[string]string))
|
||||
// Don't authorize if the previous one was done less than 30 seconds ago
|
||||
if client.LastAuthorizationTime != 0 && client.LastAuthorizationTime > time.Now().Unix() - 30 {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
readCloser, _, _, err := client.call(threadIndex, B2AuthorizationURL, http.MethodPost, nil, make(map[string]string))
|
||||
if err != nil {
|
||||
return err
|
||||
return err, true
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
@@ -223,7 +317,7 @@ func (client *B2Client) AuthorizeAccount() (err error) {
|
||||
output := &B2AuthorizeAccountOutput{}
|
||||
|
||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||
return err
|
||||
return err, true
|
||||
}
|
||||
|
||||
// The account id may be different from the application key id so we're getting the account id from the returned
|
||||
@@ -232,9 +326,15 @@ func (client *B2Client) AuthorizeAccount() (err error) {
|
||||
|
||||
client.AuthorizationToken = output.AuthorizationToken
|
||||
client.APIURL = output.APIURL
|
||||
client.DownloadURL = output.DownloadURL
|
||||
if client.DownloadURL == "" {
|
||||
client.DownloadURL = output.DownloadURL
|
||||
}
|
||||
LOG_INFO("BACKBLAZE_URL", "download URL is: %s", client.DownloadURL)
|
||||
client.IsAuthorized = true
|
||||
|
||||
return nil
|
||||
client.LastAuthorizationTime = time.Now().Unix()
|
||||
|
||||
return nil, true
|
||||
}
|
||||
|
||||
type ListBucketOutput struct {
|
||||
@@ -248,10 +348,11 @@ func (client *B2Client) FindBucket(bucketName string) (err error) {
|
||||
|
||||
input := make(map[string]string)
|
||||
input["accountId"] = client.AccountID
|
||||
input["bucketName"] = bucketName
|
||||
|
||||
url := client.APIURL + "/b2api/v1/b2_list_buckets"
|
||||
url := client.getAPIURL() + "/b2api/v1/b2_list_buckets"
|
||||
|
||||
readCloser, _, _, err := client.call(url, http.MethodPost, nil, input)
|
||||
readCloser, _, _, err := client.call(0, url, http.MethodPost, nil, input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -293,7 +394,7 @@ type B2ListFileNamesOutput struct {
|
||||
NextFileId string
|
||||
}
|
||||
|
||||
func (client *B2Client) ListFileNames(startFileName string, singleFile bool, includeVersions bool) (files []*B2Entry, err error) {
|
||||
func (client *B2Client) ListFileNames(threadIndex int, startFileName string, singleFile bool, includeVersions bool) (files []*B2Entry, err error) {
|
||||
|
||||
maxFileCount := 1000
|
||||
if singleFile {
|
||||
@@ -311,20 +412,21 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
|
||||
input := make(map[string]interface{})
|
||||
input["bucketId"] = client.BucketID
|
||||
input["startFileName"] = startFileName
|
||||
input["startFileName"] = client.StorageDir + startFileName
|
||||
input["maxFileCount"] = maxFileCount
|
||||
input["prefix"] = client.StorageDir
|
||||
|
||||
for {
|
||||
url := client.APIURL + "/b2api/v1/b2_list_file_names"
|
||||
apiURL := client.getAPIURL() + "/b2api/v1/b2_list_file_names"
|
||||
requestHeaders := map[string]string{}
|
||||
requestMethod := http.MethodPost
|
||||
var requestInput interface{}
|
||||
requestInput = input
|
||||
if includeVersions {
|
||||
url = client.APIURL + "/b2api/v1/b2_list_file_versions"
|
||||
apiURL = client.getAPIURL() + "/b2api/v1/b2_list_file_versions"
|
||||
} else if singleFile {
|
||||
// handle a single file with no versions as a special case to download the last byte of the file
|
||||
url = client.DownloadURL + "/file/" + client.BucketName + "/" + startFileName
|
||||
apiURL = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + startFileName)
|
||||
// requesting byte -1 works for empty files where 0-0 fails with a 416 error
|
||||
requestHeaders["Range"] = "bytes=-1"
|
||||
// HEAD request
|
||||
@@ -334,7 +436,7 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
var readCloser io.ReadCloser
|
||||
var responseHeader http.Header
|
||||
var err error
|
||||
readCloser, responseHeader, _, err = client.call(url, requestMethod, requestHeaders, requestInput)
|
||||
readCloser, responseHeader, _, err = client.call(threadIndex, apiURL, requestMethod, requestHeaders, requestInput)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -347,7 +449,7 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
|
||||
if singleFile && !includeVersions {
|
||||
if responseHeader == nil {
|
||||
LOG_DEBUG("BACKBLAZE_LIST", "b2_download_file_by_name did not return headers")
|
||||
LOG_DEBUG("BACKBLAZE_LIST", "%s did not return headers", apiURL)
|
||||
return []*B2Entry{}, nil
|
||||
}
|
||||
requiredHeaders := []string{
|
||||
@@ -361,11 +463,17 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
}
|
||||
}
|
||||
if len(missingKeys) > 0 {
|
||||
return nil, fmt.Errorf("b2_download_file_by_name missing headers: %s", missingKeys)
|
||||
return nil, fmt.Errorf("%s missing headers: %s", apiURL, missingKeys)
|
||||
}
|
||||
// construct the B2Entry from the response headers of the download request
|
||||
fileID := responseHeader.Get("x-bz-file-id")
|
||||
fileName := responseHeader.Get("x-bz-file-name")
|
||||
unescapedFileName, err := url.QueryUnescape(fileName)
|
||||
if err == nil {
|
||||
fileName = unescapedFileName
|
||||
} else {
|
||||
LOG_WARN("BACKBLAZE_UNESCAPE", "Failed to unescape the file name %s", fileName)
|
||||
}
|
||||
fileAction := "upload"
|
||||
// byte range that is returned: "bytes #-#/#
|
||||
rangeString := responseHeader.Get("Content-Range")
|
||||
@@ -378,14 +486,14 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
// this should only execute if the requested file is empty and the range request didn't result in a Content-Range header
|
||||
fileSize, _ = strconv.ParseInt(lengthString, 0, 64)
|
||||
if fileSize != 0 {
|
||||
return nil, fmt.Errorf("b2_download_file_by_name returned non-zero file length")
|
||||
return nil, fmt.Errorf("%s returned non-zero file length", apiURL)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("could not parse b2_download_file_by_name headers")
|
||||
return nil, fmt.Errorf("could not parse headers returned by %s", apiURL)
|
||||
}
|
||||
fileUploadTimestamp, _ := strconv.ParseInt(responseHeader.Get("X-Bz-Upload-Timestamp"), 0, 64)
|
||||
|
||||
return []*B2Entry{{fileID, fileName, fileAction, fileSize, fileUploadTimestamp}}, nil
|
||||
return []*B2Entry{{fileID, fileName[len(client.StorageDir):], fileAction, fileSize, fileUploadTimestamp}}, nil
|
||||
}
|
||||
|
||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||
@@ -394,31 +502,27 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
|
||||
ioutil.ReadAll(readCloser)
|
||||
|
||||
if startFileName == "" {
|
||||
files = append(files, output.Files...)
|
||||
} else {
|
||||
for _, file := range output.Files {
|
||||
if singleFile {
|
||||
if file.FileName == startFileName {
|
||||
files = append(files, file)
|
||||
if !includeVersions {
|
||||
output.NextFileName = ""
|
||||
break
|
||||
}
|
||||
} else {
|
||||
for _, file := range output.Files {
|
||||
file.FileName = file.FileName[len(client.StorageDir):]
|
||||
if singleFile {
|
||||
if file.FileName == startFileName {
|
||||
files = append(files, file)
|
||||
if !includeVersions {
|
||||
output.NextFileName = ""
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if strings.HasPrefix(file.FileName, startFileName) {
|
||||
files = append(files, file)
|
||||
} else {
|
||||
output.NextFileName = ""
|
||||
break
|
||||
}
|
||||
output.NextFileName = ""
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if strings.HasPrefix(file.FileName, startFileName) {
|
||||
files = append(files, file)
|
||||
} else {
|
||||
output.NextFileName = ""
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(output.NextFileName) == 0 {
|
||||
@@ -434,14 +538,14 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (client *B2Client) DeleteFile(fileName string, fileID string) (err error) {
|
||||
func (client *B2Client) DeleteFile(threadIndex int, fileName string, fileID string) (err error) {
|
||||
|
||||
input := make(map[string]string)
|
||||
input["fileName"] = fileName
|
||||
input["fileName"] = client.StorageDir + fileName
|
||||
input["fileId"] = fileID
|
||||
|
||||
url := client.APIURL + "/b2api/v1/b2_delete_file_version"
|
||||
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
|
||||
url := client.getAPIURL() + "/b2api/v1/b2_delete_file_version"
|
||||
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -454,14 +558,14 @@ type B2HideFileOutput struct {
|
||||
FileID string
|
||||
}
|
||||
|
||||
func (client *B2Client) HideFile(fileName string) (fileID string, err error) {
|
||||
func (client *B2Client) HideFile(threadIndex int, fileName string) (fileID string, err error) {
|
||||
|
||||
input := make(map[string]string)
|
||||
input["bucketId"] = client.BucketID
|
||||
input["fileName"] = fileName
|
||||
input["fileName"] = client.StorageDir + fileName
|
||||
|
||||
url := client.APIURL + "/b2api/v1/b2_hide_file"
|
||||
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
|
||||
url := client.getAPIURL() + "/b2api/v1/b2_hide_file"
|
||||
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -478,11 +582,11 @@ func (client *B2Client) HideFile(fileName string) (fileID string, err error) {
|
||||
return output.FileID, nil
|
||||
}
|
||||
|
||||
func (client *B2Client) DownloadFile(filePath string) (io.ReadCloser, int64, error) {
|
||||
func (client *B2Client) DownloadFile(threadIndex int, filePath string) (io.ReadCloser, int64, error) {
|
||||
|
||||
url := client.DownloadURL + "/file/" + client.BucketName + "/" + filePath
|
||||
url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + filePath)
|
||||
|
||||
readCloser, _, len, err := client.call(url, http.MethodGet, make(map[string]string), 0)
|
||||
readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0)
|
||||
return readCloser, len, err
|
||||
}
|
||||
|
||||
@@ -492,12 +596,12 @@ type B2GetUploadArgumentOutput struct {
|
||||
AuthorizationToken string
|
||||
}
|
||||
|
||||
func (client *B2Client) getUploadURL() error {
|
||||
func (client *B2Client) getUploadURL(threadIndex int) error {
|
||||
input := make(map[string]string)
|
||||
input["bucketId"] = client.BucketID
|
||||
|
||||
url := client.APIURL + "/b2api/v1/b2_get_upload_url"
|
||||
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
|
||||
url := client.getAPIURL() + "/b2api/v1/b2_get_upload_url"
|
||||
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -510,96 +614,29 @@ func (client *B2Client) getUploadURL() error {
|
||||
return err
|
||||
}
|
||||
|
||||
client.UploadURL = output.UploadURL
|
||||
client.UploadToken = output.AuthorizationToken
|
||||
client.UploadURLs[threadIndex] = output.UploadURL
|
||||
client.UploadTokens[threadIndex] = output.AuthorizationToken
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit int) (err error) {
|
||||
func (client *B2Client) UploadFile(threadIndex int, filePath string, content []byte, rateLimit int) (err error) {
|
||||
|
||||
hasher := sha1.New()
|
||||
hasher.Write(content)
|
||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
headers := make(map[string]string)
|
||||
headers["X-Bz-File-Name"] = filePath
|
||||
headers["X-Bz-File-Name"] = B2Escape(client.StorageDir + filePath)
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", len(content))
|
||||
headers["Content-Type"] = "application/octet-stream"
|
||||
headers["X-Bz-Content-Sha1"] = hash
|
||||
|
||||
var response *http.Response
|
||||
|
||||
backoff := 0
|
||||
for i := 0; i < 8; i++ {
|
||||
|
||||
if client.UploadURL == "" || client.UploadToken == "" {
|
||||
err = client.getUploadURL()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
request, err := http.NewRequest("POST", client.UploadURL, CreateRateLimitedReader(content, rateLimit))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
request.ContentLength = int64(len(content))
|
||||
|
||||
request.Header.Set("Authorization", client.UploadToken)
|
||||
request.Header.Set("X-Bz-File-Name", filePath)
|
||||
request.Header.Set("Content-Type", "application/octet-stream")
|
||||
request.Header.Set("X-Bz-Content-Sha1", hash)
|
||||
|
||||
for key, value := range headers {
|
||||
request.Header.Set(key, value)
|
||||
}
|
||||
|
||||
if client.TestMode {
|
||||
r := rand.Float32()
|
||||
if r < 0.8 {
|
||||
request.Header.Set("X-Bz-Test-Mode", "fail_some_uploads")
|
||||
} else if r < 0.9 {
|
||||
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
|
||||
} else {
|
||||
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
|
||||
}
|
||||
}
|
||||
|
||||
response, err = client.HTTPClient.Do(request)
|
||||
if err != nil {
|
||||
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned an error: %v", client.UploadURL, err)
|
||||
backoff = client.retry(backoff, response)
|
||||
client.UploadURL = ""
|
||||
client.UploadToken = ""
|
||||
continue
|
||||
}
|
||||
|
||||
io.Copy(ioutil.Discard, response.Body)
|
||||
response.Body.Close()
|
||||
|
||||
if response.StatusCode < 300 {
|
||||
return nil
|
||||
}
|
||||
|
||||
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
|
||||
|
||||
if response.StatusCode == 401 {
|
||||
LOG_INFO("BACKBLAZE_UPLOAD", "Re-authorization required")
|
||||
client.UploadURL = ""
|
||||
client.UploadToken = ""
|
||||
continue
|
||||
} else if response.StatusCode == 403 {
|
||||
if !client.TestMode {
|
||||
return fmt.Errorf("B2 cap exceeded")
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
LOG_INFO("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
|
||||
backoff = client.retry(backoff, response)
|
||||
client.UploadURL = ""
|
||||
client.UploadToken = ""
|
||||
}
|
||||
readCloser, _, _, err := client.call(threadIndex, "", http.MethodPost, headers, CreateRateLimitedReader(content, rateLimit))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fmt.Errorf("Maximum backoff reached")
|
||||
readCloser.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func createB2ClientForTest(t *testing.T) (*B2Client, string) {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
return NewB2Client(b2["account"], b2["key"]), b2["bucket"]
|
||||
return NewB2Client(b2["account"], b2["key"], "", b2["directory"], 1), b2["bucket"]
|
||||
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestB2Client(t *testing.T) {
|
||||
|
||||
b2Client.TestMode = true
|
||||
|
||||
err := b2Client.AuthorizeAccount()
|
||||
err, _ := b2Client.AuthorizeAccount(0)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to authorize the b2 account: %v", err)
|
||||
return
|
||||
@@ -64,14 +64,14 @@ func TestB2Client(t *testing.T) {
|
||||
|
||||
testDirectory := "b2client_test/"
|
||||
|
||||
files, err := b2Client.ListFileNames(testDirectory, false, false)
|
||||
files, err := b2Client.ListFileNames(0, testDirectory, false, false)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to list files: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
err = b2Client.DeleteFile(file.FileName, file.FileID)
|
||||
err = b2Client.DeleteFile(0, file.FileName, file.FileID)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
|
||||
}
|
||||
@@ -90,14 +90,14 @@ func TestB2Client(t *testing.T) {
|
||||
hash := sha256.Sum256(content)
|
||||
name := hex.EncodeToString(hash[:])
|
||||
|
||||
err = b2Client.UploadFile(testDirectory+name, content, 100)
|
||||
err = b2Client.UploadFile(0, testDirectory+name, content, 100)
|
||||
if err != nil {
|
||||
t.Errorf("Error uploading file '%s': %v", name, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
files, err = b2Client.ListFileNames(testDirectory, false, false)
|
||||
files, err = b2Client.ListFileNames(0, testDirectory, false, false)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to list files: %v", err)
|
||||
return
|
||||
@@ -105,7 +105,7 @@ func TestB2Client(t *testing.T) {
|
||||
|
||||
for _, file := range files {
|
||||
|
||||
readCloser, _, err := b2Client.DownloadFile(file.FileName)
|
||||
readCloser, _, err := b2Client.DownloadFile(0, file.FileName)
|
||||
if err != nil {
|
||||
t.Errorf("Error downloading file '%s': %v", file.FileName, err)
|
||||
return
|
||||
@@ -125,7 +125,7 @@ func TestB2Client(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
err = b2Client.DeleteFile(file.FileName, file.FileID)
|
||||
err = b2Client.DeleteFile(0, file.FileName, file.FileID)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
|
||||
}
|
||||
|
||||
@@ -11,32 +11,26 @@ import (
|
||||
type B2Storage struct {
|
||||
StorageBase
|
||||
|
||||
clients []*B2Client
|
||||
client *B2Client
|
||||
}
|
||||
|
||||
// CreateB2Storage creates a B2 storage object.
|
||||
func CreateB2Storage(accountID string, applicationKey string, bucket string, threads int) (storage *B2Storage, err error) {
|
||||
func CreateB2Storage(accountID string, applicationKey string, downloadURL string, bucket string, storageDir string, threads int) (storage *B2Storage, err error) {
|
||||
|
||||
var clients []*B2Client
|
||||
client := NewB2Client(accountID, applicationKey, downloadURL, storageDir, threads)
|
||||
|
||||
for i := 0; i < threads; i++ {
|
||||
client := NewB2Client(accountID, applicationKey)
|
||||
err, _ = client.AuthorizeAccount(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = client.AuthorizeAccount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = client.FindBucket(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clients = append(clients, client)
|
||||
err = client.FindBucket(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storage = &B2Storage{
|
||||
clients: clients,
|
||||
client: client,
|
||||
}
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
@@ -56,7 +50,7 @@ func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string
|
||||
includeVersions = true
|
||||
}
|
||||
|
||||
entries, err := storage.clients[threadIndex].ListFileNames(dir, false, includeVersions)
|
||||
entries, err := storage.client.ListFileNames(threadIndex, dir, false, includeVersions)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -102,7 +96,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
|
||||
|
||||
if strings.HasSuffix(filePath, ".fsl") {
|
||||
filePath = filePath[:len(filePath)-len(".fsl")]
|
||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
|
||||
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -116,7 +110,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
|
||||
|
||||
toBeDeleted = true
|
||||
|
||||
err = storage.clients[threadIndex].DeleteFile(filePath, entry.FileID)
|
||||
err = storage.client.DeleteFile(threadIndex, filePath, entry.FileID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -125,7 +119,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
|
||||
return nil
|
||||
|
||||
} else {
|
||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, false)
|
||||
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -133,7 +127,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
|
||||
if len(entries) == 0 {
|
||||
return nil
|
||||
}
|
||||
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
|
||||
return storage.client.DeleteFile(threadIndex, filePath, entries[0].FileID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,10 +154,10 @@ func (storage *B2Storage) MoveFile(threadIndex int, from string, to string) (err
|
||||
}
|
||||
|
||||
if filePath == from {
|
||||
_, err = storage.clients[threadIndex].HideFile(from)
|
||||
_, err = storage.client.HideFile(threadIndex, from)
|
||||
return err
|
||||
} else {
|
||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
|
||||
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -171,7 +165,7 @@ func (storage *B2Storage) MoveFile(threadIndex int, from string, to string) (err
|
||||
return nil
|
||||
}
|
||||
|
||||
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
|
||||
return storage.client.DeleteFile(threadIndex, filePath, entries[0].FileID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,7 +182,7 @@ func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist b
|
||||
filePath = filePath[:len(filePath)-len(".fsl")]
|
||||
}
|
||||
|
||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, isFossil)
|
||||
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, isFossil)
|
||||
if err != nil {
|
||||
return false, false, 0, err
|
||||
}
|
||||
@@ -210,22 +204,20 @@ func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist b
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
|
||||
filePath = strings.Replace(filePath, " ", "%20", -1)
|
||||
readCloser, _, err := storage.clients[threadIndex].DownloadFile(filePath)
|
||||
readCloser, _, err := storage.client.DownloadFile(threadIndex, filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
|
||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.clients))
|
||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.client.Threads)
|
||||
return err
|
||||
}
|
||||
|
||||
// UploadFile writes 'content' to the file at 'filePath'.
|
||||
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||
filePath = strings.Replace(filePath, " ", "%20", -1)
|
||||
return storage.clients[threadIndex].UploadFile(filePath, content, storage.UploadRateLimit/len(storage.clients))
|
||||
return storage.client.UploadFile(threadIndex, filePath, content, storage.UploadRateLimit/storage.client.Threads)
|
||||
}
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
@@ -243,7 +235,5 @@ func (storage *B2Storage) IsFastListing() bool { return true }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *B2Storage) EnableTestMode() {
|
||||
for _, client := range storage.clients {
|
||||
client.TestMode = true
|
||||
}
|
||||
storage.client.TestMode = true
|
||||
}
|
||||
|
||||
@@ -35,6 +35,11 @@ type BackupManager struct {
|
||||
config *Config // contains a number of options
|
||||
|
||||
nobackupFile string // don't backup directory when this file name is found
|
||||
|
||||
filtersFile string // the path to the filters file
|
||||
|
||||
excludeByAttribute bool // don't backup file based on file attribute
|
||||
|
||||
}
|
||||
|
||||
func (manager *BackupManager) SetDryRun(dryRun bool) {
|
||||
@@ -44,7 +49,7 @@ func (manager *BackupManager) SetDryRun(dryRun bool) {
|
||||
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
|
||||
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
|
||||
// master key which can be nil if encryption is not enabled.
|
||||
func CreateBackupManager(snapshotID string, storage Storage, top string, password string, nobackupFile string) *BackupManager {
|
||||
func CreateBackupManager(snapshotID string, storage Storage, top string, password string, nobackupFile string, filtersFile string, excludeByAttribute bool) *BackupManager {
|
||||
|
||||
config, _, err := DownloadConfig(storage, password)
|
||||
if err != nil {
|
||||
@@ -67,6 +72,10 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
|
||||
config: config,
|
||||
|
||||
nobackupFile: nobackupFile,
|
||||
|
||||
filtersFile: filtersFile,
|
||||
|
||||
excludeByAttribute: excludeByAttribute,
|
||||
}
|
||||
|
||||
if IsDebugging() {
|
||||
@@ -76,6 +85,11 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
|
||||
return backupManager
|
||||
}
|
||||
|
||||
// loadRSAPrivateKey loads the specifed private key file for decrypting file chunks
|
||||
func (manager *BackupManager) LoadRSAPrivateKey(keyFile string, passphrase string) {
|
||||
manager.config.loadRSAPrivateKey(keyFile, passphrase)
|
||||
}
|
||||
|
||||
// SetupSnapshotCache creates the snapshot cache, which is merely a local storage under the default .duplicacy
|
||||
// directory
|
||||
func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
|
||||
@@ -103,6 +117,7 @@ func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
// setEntryContent sets the 4 content pointers for each entry in 'entries'. 'offset' indicates the value
|
||||
// to be added to the StartChunk and EndChunk points, used when intending to append 'entries' to the
|
||||
// original unchanged entry list.
|
||||
@@ -176,6 +191,19 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
|
||||
LOG_DEBUG("BACKUP_PARAMETERS", "top: %s, quick: %t, tag: %s", top, quickMode, tag)
|
||||
|
||||
if manager.config.DataShards != 0 && manager.config.ParityShards != 0 {
|
||||
LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled with %d data shards and %d parity shards",
|
||||
manager.config.DataShards, manager.config.ParityShards)
|
||||
}
|
||||
|
||||
if manager.config.rsaPublicKey != nil && len(manager.config.FileKey) > 0 {
|
||||
LOG_INFO("BACKUP_KEY", "RSA encryption is enabled")
|
||||
}
|
||||
|
||||
if manager.excludeByAttribute {
|
||||
LOG_INFO("BACKUP_EXCLUDE", "Exclude files with no-backup attributes")
|
||||
}
|
||||
|
||||
remoteSnapshot := manager.SnapshotManager.downloadLatestSnapshot(manager.snapshotID)
|
||||
if remoteSnapshot == nil {
|
||||
remoteSnapshot = CreateEmptySnapshot(manager.snapshotID)
|
||||
@@ -188,7 +216,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
defer DeleteShadowCopy()
|
||||
|
||||
LOG_INFO("BACKUP_INDEXING", "Indexing %s", top)
|
||||
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop, manager.nobackupFile)
|
||||
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop,
|
||||
manager.nobackupFile, manager.filtersFile, manager.excludeByAttribute)
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
|
||||
return false
|
||||
@@ -198,6 +227,11 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
return true
|
||||
}
|
||||
|
||||
if len(localSnapshot.Files) == 0 {
|
||||
LOG_ERROR("SNAPSHOT_EMPTY", "No files under the repository to be backed up")
|
||||
return false
|
||||
}
|
||||
|
||||
// This cache contains all chunks referenced by last snasphot. Any other chunks will lead to a call to
|
||||
// UploadChunk.
|
||||
chunkCache := make(map[string]bool)
|
||||
@@ -502,6 +536,11 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
chunkID := chunk.GetID()
|
||||
chunkSize := chunk.GetLength()
|
||||
|
||||
if chunkSize == 0 {
|
||||
LOG_DEBUG("CHUNK_EMPTY", "Ignored chunk %s of size 0", chunkID)
|
||||
return
|
||||
}
|
||||
|
||||
chunkIndex++
|
||||
|
||||
_, found := chunkCache[chunkID]
|
||||
@@ -723,7 +762,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
// the same as 'top'. 'quickMode' will bypass files with unchanged sizes and timestamps. 'deleteMode' will
|
||||
// remove local files that don't exist in the snapshot. 'patterns' is used to include/exclude certain files.
|
||||
func (manager *BackupManager) Restore(top string, revision int, inPlace bool, quickMode bool, threads int, overwrite bool,
|
||||
deleteMode bool, setOwner bool, showStatistics bool, patterns []string) bool {
|
||||
deleteMode bool, setOwner bool, showStatistics bool, patterns []string, allowFailures bool) int {
|
||||
|
||||
startTime := time.Now().Unix()
|
||||
|
||||
@@ -746,7 +785,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
err = os.Mkdir(top, 0744)
|
||||
if err != nil {
|
||||
LOG_ERROR("RESTORE_MKDIR", "Can't create the directory to be restored: %v", err)
|
||||
return false
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -754,16 +793,17 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
err = os.Mkdir(path.Join(top, DUPLICACY_DIRECTORY), 0744)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
LOG_ERROR("RESTORE_MKDIR", "Failed to create the preference directory: %v", err)
|
||||
return false
|
||||
return 0
|
||||
}
|
||||
|
||||
remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision)
|
||||
manager.SnapshotManager.DownloadSnapshotContents(remoteSnapshot, patterns, true)
|
||||
|
||||
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile)
|
||||
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile,
|
||||
manager.filtersFile, manager.excludeByAttribute)
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the repository: %v", err)
|
||||
return false
|
||||
return 0
|
||||
}
|
||||
|
||||
LOG_INFO("RESTORE_START", "Restoring %s to revision %d", top, revision)
|
||||
@@ -790,6 +830,11 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
|
||||
var totalFileSize int64
|
||||
var downloadedFileSize int64
|
||||
var failedFiles int
|
||||
var skippedFileSize int64
|
||||
var skippedFiles int64
|
||||
|
||||
var downloadedFiles []*Entry
|
||||
|
||||
i := 0
|
||||
for _, entry := range remoteSnapshot.Files {
|
||||
@@ -807,6 +852,9 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
if compare == 0 {
|
||||
i++
|
||||
if quickMode && local.IsSameAs(entry) {
|
||||
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", local.Path)
|
||||
skippedFileSize += entry.Size
|
||||
skippedFiles++
|
||||
skipped = true
|
||||
}
|
||||
}
|
||||
@@ -836,7 +884,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
err = os.Symlink(entry.Link, fullPath)
|
||||
if err != nil {
|
||||
LOG_ERROR("RESTORE_SYMLINK", "Can't create symlink %s: %v", entry.Path, err)
|
||||
return false
|
||||
return 0
|
||||
}
|
||||
entry.RestoreMetadata(fullPath, nil, setOwner)
|
||||
LOG_TRACE("DOWNLOAD_DONE", "Symlink %s updated", entry.Path)
|
||||
@@ -845,7 +893,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
|
||||
if err == nil && !stat.IsDir() {
|
||||
LOG_ERROR("RESTORE_NOTDIR", "The path %s is not a directory", fullPath)
|
||||
return false
|
||||
return 0
|
||||
}
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
@@ -854,7 +902,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
err = os.MkdirAll(fullPath, 0700)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
LOG_ERROR("RESTORE_MKDIR", "%v", err)
|
||||
return false
|
||||
return 0
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -872,14 +920,13 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
// Sort entries by their starting chunks in order to linearize the access to the chunk chain.
|
||||
sort.Sort(ByChunk(fileEntries))
|
||||
|
||||
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, showStatistics, threads)
|
||||
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, showStatistics, threads, allowFailures)
|
||||
chunkDownloader.AddFiles(remoteSnapshot, fileEntries)
|
||||
|
||||
chunkMaker := CreateChunkMaker(manager.config, true)
|
||||
|
||||
startDownloadingTime := time.Now().Unix()
|
||||
|
||||
var downloadedFiles []*Entry
|
||||
// Now download files one by one
|
||||
for _, file := range fileEntries {
|
||||
|
||||
@@ -889,16 +936,21 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
if quickMode {
|
||||
if file.IsSameAsFileInfo(stat) {
|
||||
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", file.Path)
|
||||
skippedFileSize += file.Size
|
||||
skippedFiles++
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if file.Size == 0 && file.IsSameAsFileInfo(stat) {
|
||||
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (size 0)", file.Path)
|
||||
skippedFileSize += file.Size
|
||||
skippedFiles++
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
err = os.MkdirAll(path.Dir(fullPath), 0744)
|
||||
parent, _ := SplitDir(fullPath)
|
||||
err = os.MkdirAll(parent, 0744)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_MKDIR", "Failed to create directory: %v", err)
|
||||
}
|
||||
@@ -909,22 +961,39 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
newFile, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.GetPermissions())
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_OPEN", "Failed to create empty file: %v", err)
|
||||
return false
|
||||
return 0
|
||||
}
|
||||
newFile.Close()
|
||||
|
||||
file.RestoreMetadata(fullPath, nil, setOwner)
|
||||
if !showStatistics {
|
||||
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (0)", file.Path)
|
||||
downloadedFileSize += file.Size
|
||||
downloadedFiles = append(downloadedFiles, file)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if manager.RestoreFile(chunkDownloader, chunkMaker, file, top, inPlace, overwrite, showStatistics,
|
||||
totalFileSize, downloadedFileSize, startDownloadingTime) {
|
||||
downloaded, err := manager.RestoreFile(chunkDownloader, chunkMaker, file, top, inPlace, overwrite, showStatistics,
|
||||
totalFileSize, downloadedFileSize, startDownloadingTime, allowFailures)
|
||||
if err != nil {
|
||||
// RestoreFile returned an error; if allowFailures is false RestoerFile would error out and not return so here
|
||||
// we just need to show a warning
|
||||
failedFiles++
|
||||
LOG_WARN("DOWNLOAD_FAIL", "Failed to restore %s: %v", file.Path, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// No error
|
||||
if downloaded {
|
||||
// No error, file was restored
|
||||
downloadedFileSize += file.Size
|
||||
downloadedFiles = append(downloadedFiles, file)
|
||||
} else {
|
||||
// No error, file was skipped
|
||||
skippedFileSize += file.Size
|
||||
skippedFiles++
|
||||
}
|
||||
file.RestoreMetadata(fullPath, nil, setOwner)
|
||||
}
|
||||
@@ -952,11 +1021,16 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
}
|
||||
}
|
||||
|
||||
if failedFiles > 0 {
|
||||
return failedFiles
|
||||
}
|
||||
|
||||
LOG_INFO("RESTORE_END", "Restored %s to revision %d", top, revision)
|
||||
if showStatistics {
|
||||
LOG_INFO("RESTORE_STATS", "Files: %d total, %s bytes", len(fileEntries), PrettySize(totalFileSize))
|
||||
LOG_INFO("RESTORE_STATS", "Downloaded %d file, %s bytes, %d chunks",
|
||||
len(downloadedFiles), PrettySize(downloadedFileSize), chunkDownloader.numberOfDownloadedChunks)
|
||||
LOG_INFO("RESTORE_STATS", "Skipped %d file, %s bytes", skippedFiles, PrettySize(skippedFileSize))
|
||||
}
|
||||
|
||||
runningTime := time.Now().Unix() - startTime
|
||||
@@ -968,7 +1042,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
|
||||
chunkDownloader.Stop()
|
||||
|
||||
return true
|
||||
return 0
|
||||
}
|
||||
|
||||
// fileEncoder encodes one file at a time to avoid loading the full json description of the entire file tree
|
||||
@@ -1128,8 +1202,11 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
|
||||
// Restore downloads a file from the storage. If 'inPlace' is false, the download file is saved first to a temporary
|
||||
// file under the .duplicacy directory and then replaces the existing one. Otherwise, the existing file will be
|
||||
// overwritten directly.
|
||||
// Return: true, nil: Restored file;
|
||||
// false, nil: Skipped file;
|
||||
// false, error: Failure to restore file (only if allowFailures == true)
|
||||
func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chunkMaker *ChunkMaker, entry *Entry, top string, inPlace bool, overwrite bool,
|
||||
showStatistics bool, totalFileSize int64, downloadedFileSize int64, startTime int64) bool {
|
||||
showStatistics bool, totalFileSize int64, downloadedFileSize int64, startTime int64, allowFailures bool) (bool, error) {
|
||||
|
||||
LOG_TRACE("DOWNLOAD_START", "Downloading %s", entry.Path)
|
||||
|
||||
@@ -1174,7 +1251,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
existingFile, err = os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing: %v", fullPath, err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
n := int64(1)
|
||||
@@ -1186,30 +1263,24 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
_, err = existingFile.Seek(entry.Size-n, 0)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to resize the initial file %s for in-place writing: %v", fullPath, err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
_, err = existingFile.Write([]byte("\x00\x00")[:n])
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to initialize the sparse file %s for in-place writing: %v", fullPath, err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
existingFile.Close()
|
||||
existingFile, err = os.Open(fullPath)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_OPEN", "Can't reopen the initial file just created: %v", err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
isNewFile = true
|
||||
}
|
||||
} else {
|
||||
LOG_TRACE("DOWNLOAD_OPEN", "Can't open the existing file: %v", err)
|
||||
}
|
||||
} else {
|
||||
if !overwrite {
|
||||
LOG_ERROR("DOWNLOAD_OVERWRITE",
|
||||
"File %s already exists. Please specify the -overwrite option to continue", entry.Path)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// The key in this map is the number of zeroes. The value is the corresponding hash.
|
||||
@@ -1278,7 +1349,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
}
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_SPLIT", "Failed to read existing file: %v", err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
if count > 0 {
|
||||
@@ -1299,6 +1370,19 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
}
|
||||
|
||||
fileHash = hex.EncodeToString(fileHasher.Sum(nil))
|
||||
|
||||
if fileHash == entry.Hash && fileHash != "" {
|
||||
LOG_TRACE("DOWNLOAD_SKIP", "File %s unchanged (by hash)", entry.Path)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// fileHash != entry.Hash, warn/error depending on -overwrite option
|
||||
if !overwrite {
|
||||
LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE",
|
||||
"File %s already exists. Please specify the -overwrite option to overwrite", entry.Path)
|
||||
return false, fmt.Errorf("file exists")
|
||||
}
|
||||
|
||||
} else {
|
||||
// If it is not inplace, we want to reuse any chunks in the existing file regardless their offets, so
|
||||
// we run the chunk maker to split the original file.
|
||||
@@ -1318,9 +1402,11 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
return nil, false
|
||||
})
|
||||
}
|
||||
|
||||
// This is an additional check comparing fileHash to entry.Hash above, so this should no longer occur
|
||||
if fileHash == entry.Hash && fileHash != "" {
|
||||
LOG_TRACE("DOWNLOAD_SKIP", "File %s unchanged (by hash)", entry.Path)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1348,7 +1434,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
existingFile, err = os.OpenFile(fullPath, os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open the file %s for in-place writing", fullPath)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1380,7 +1466,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
_, err = existingFile.Seek(offset, 0)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_SEEK", "Failed to set the offset to %d for file %s: %v", offset, fullPath, err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check if the chunk is available in the existing file
|
||||
@@ -1390,17 +1476,20 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
_, err := io.CopyN(hasher, existingFile, int64(existingLengths[j]))
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_READ", "Failed to read the existing chunk %s: %v", hash, err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
if IsDebugging() {
|
||||
LOG_DEBUG("DOWNLOAD_UNCHANGED", "Chunk %s is unchanged", manager.config.GetChunkIDFromHash(hash))
|
||||
}
|
||||
} else {
|
||||
chunk := chunkDownloader.WaitForChunk(i)
|
||||
if chunk.isBroken {
|
||||
return false, fmt.Errorf("chunk %s is corrupted", manager.config.GetChunkIDFromHash(hash))
|
||||
}
|
||||
_, err = existingFile.Write(chunk.GetBytes()[start:end])
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_WRITE", "Failed to write to the file: %v", err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
hasher.Write(chunk.GetBytes()[start:end])
|
||||
}
|
||||
@@ -1411,15 +1500,15 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
// Must truncate the file if the new size is smaller
|
||||
if err = existingFile.Truncate(offset); err != nil {
|
||||
LOG_ERROR("DOWNLOAD_TRUNCATE", "Failed to truncate the file at %d: %v", offset, err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Verify the download by hash
|
||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||
if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") {
|
||||
LOG_ERROR("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s (in-place)",
|
||||
LOG_WERROR(allowFailures, "DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s (in-place)",
|
||||
fullPath, "", entry.Hash)
|
||||
return false
|
||||
return false, fmt.Errorf("file corrupt (hash mismatch)")
|
||||
}
|
||||
|
||||
} else {
|
||||
@@ -1428,7 +1517,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
newFile, err = os.OpenFile(temporaryPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open file for writing: %v", err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
hasher := manager.config.NewFileHasher()
|
||||
@@ -1466,6 +1555,9 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
|
||||
if !hasLocalCopy {
|
||||
chunk := chunkDownloader.WaitForChunk(i)
|
||||
if chunk.isBroken {
|
||||
return false, fmt.Errorf("chunk %s is corrupted", manager.config.GetChunkIDFromHash(hash))
|
||||
}
|
||||
// If the chunk was downloaded from the storage, we may still need a portion of it.
|
||||
start := 0
|
||||
if i == entry.StartChunk {
|
||||
@@ -1481,7 +1573,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
_, err = newFile.Write(data)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_WRITE", "Failed to write file: %v", err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
hasher.Write(data)
|
||||
@@ -1490,9 +1582,9 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
|
||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||
if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") {
|
||||
LOG_ERROR("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s",
|
||||
LOG_WERROR(allowFailures, "DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s",
|
||||
entry.Path, hash, entry.Hash)
|
||||
return false
|
||||
return false, fmt.Errorf("file corrupt (hash mismatch)")
|
||||
}
|
||||
|
||||
if existingFile != nil {
|
||||
@@ -1506,31 +1598,40 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
||||
err = os.Remove(fullPath)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
LOG_ERROR("DOWNLOAD_REMOVE", "Failed to remove the old file: %v", err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err = os.Rename(temporaryPath, fullPath)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_RENAME", "Failed to rename the file %s to %s: %v", temporaryPath, fullPath, err)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if !showStatistics {
|
||||
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (%d)", entry.Path, entry.Size)
|
||||
}
|
||||
return true
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// CopySnapshots copies the specified snapshots from one storage to the other.
|
||||
func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapshotID string,
|
||||
revisionsToBeCopied []int, threads int) bool {
|
||||
revisionsToBeCopied []int, uploadingThreads int, downloadingThreads int) bool {
|
||||
|
||||
if !manager.config.IsCompatiableWith(otherManager.config) {
|
||||
LOG_ERROR("CONFIG_INCOMPATIBLE", "Two storages are not compatible for the copy operation")
|
||||
return false
|
||||
}
|
||||
|
||||
if otherManager.config.DataShards != 0 && otherManager.config.ParityShards != 0 {
|
||||
LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled for the destination storage with %d data shards and %d parity shards",
|
||||
otherManager.config.DataShards, otherManager.config.ParityShards)
|
||||
}
|
||||
|
||||
if otherManager.config.rsaPublicKey != nil && len(otherManager.config.FileKey) > 0 {
|
||||
LOG_INFO("BACKUP_KEY", "RSA encryption is enabled for the destination")
|
||||
}
|
||||
|
||||
if snapshotID == "" && len(revisionsToBeCopied) > 0 {
|
||||
LOG_ERROR("SNAPSHOT_ERROR", "You must specify the snapshot id when one or more revisions are specified.")
|
||||
return false
|
||||
@@ -1610,6 +1711,9 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
return true
|
||||
}
|
||||
|
||||
// These two maps store hashes of chunks in the source and destination storages, respectively. Note that
|
||||
// the value of 'chunks' is used to indicated if the chunk is a snapshot chunk, while the value of 'otherChunks'
|
||||
// is not used.
|
||||
chunks := make(map[string]bool)
|
||||
otherChunks := make(map[string]bool)
|
||||
|
||||
@@ -1622,21 +1726,15 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
LOG_TRACE("SNAPSHOT_COPY", "Copying snapshot %s at revision %d", snapshot.ID, snapshot.Revision)
|
||||
|
||||
for _, chunkHash := range snapshot.FileSequence {
|
||||
if _, found := chunks[chunkHash]; !found {
|
||||
chunks[chunkHash] = true
|
||||
}
|
||||
chunks[chunkHash] = true // The chunk is a snapshot chunk
|
||||
}
|
||||
|
||||
for _, chunkHash := range snapshot.ChunkSequence {
|
||||
if _, found := chunks[chunkHash]; !found {
|
||||
chunks[chunkHash] = true
|
||||
}
|
||||
chunks[chunkHash] = true // The chunk is a snapshot chunk
|
||||
}
|
||||
|
||||
for _, chunkHash := range snapshot.LengthSequence {
|
||||
if _, found := chunks[chunkHash]; !found {
|
||||
chunks[chunkHash] = true
|
||||
}
|
||||
chunks[chunkHash] = true // The chunk is a snapshot chunk
|
||||
}
|
||||
|
||||
description := manager.SnapshotManager.DownloadSequence(snapshot.ChunkSequence)
|
||||
@@ -1649,9 +1747,11 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
|
||||
for _, chunkHash := range snapshot.ChunkHashes {
|
||||
if _, found := chunks[chunkHash]; !found {
|
||||
chunks[chunkHash] = true
|
||||
chunks[chunkHash] = false // The chunk is a file chunk
|
||||
}
|
||||
}
|
||||
|
||||
snapshot.ChunkHashes = nil
|
||||
}
|
||||
|
||||
otherChunkFiles, otherChunkSizes := otherManager.SnapshotManager.ListAllFiles(otherManager.storage, "chunks/")
|
||||
@@ -1670,62 +1770,64 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
|
||||
LOG_DEBUG("SNAPSHOT_COPY", "Found %d chunks on destination storage", len(otherChunks))
|
||||
|
||||
chunksToCopy := 0
|
||||
chunksToSkip := 0
|
||||
var chunksToCopy []string
|
||||
|
||||
for chunkHash := range chunks {
|
||||
otherChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
||||
if _, found := otherChunks[otherChunkID]; found {
|
||||
chunksToSkip++
|
||||
} else {
|
||||
chunksToCopy++
|
||||
if _, found := otherChunks[otherChunkID]; !found {
|
||||
chunksToCopy = append(chunksToCopy, chunkHash)
|
||||
}
|
||||
}
|
||||
|
||||
LOG_DEBUG("SNAPSHOT_COPY", "Chunks to copy = %d, to skip = %d, total = %d", chunksToCopy, chunksToSkip, chunksToCopy+chunksToSkip)
|
||||
LOG_DEBUG("SNAPSHOT_COPY", "Total chunks in source snapshot revisions = %d\n", len(chunks))
|
||||
LOG_INFO("SNAPSHOT_COPY", "Chunks to copy: %d, to skip: %d, total: %d", len(chunksToCopy), len(chunks) - len(chunksToCopy), len(chunks))
|
||||
|
||||
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, false, threads)
|
||||
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, false, downloadingThreads, false)
|
||||
|
||||
chunkUploader := CreateChunkUploader(otherManager.config, otherManager.storage, nil, threads,
|
||||
var uploadedBytes int64
|
||||
startTime := time.Now()
|
||||
|
||||
copiedChunks := 0
|
||||
chunkUploader := CreateChunkUploader(otherManager.config, otherManager.storage, nil, uploadingThreads,
|
||||
func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
|
||||
if skipped {
|
||||
LOG_INFO("SNAPSHOT_COPY", "Chunk %s (%d/%d) exists at the destination", chunk.GetID(), chunkIndex, len(chunks))
|
||||
} else {
|
||||
LOG_INFO("SNAPSHOT_COPY", "Chunk %s (%d/%d) copied to the destination", chunk.GetID(), chunkIndex, len(chunks))
|
||||
action := "Skipped"
|
||||
if !skipped {
|
||||
copiedChunks++
|
||||
action = "Copied"
|
||||
}
|
||||
|
||||
atomic.AddInt64(&uploadedBytes, int64(chunkSize))
|
||||
|
||||
elapsedTime := time.Now().Sub(startTime).Seconds()
|
||||
speed := int64(float64(atomic.LoadInt64(&uploadedBytes)) / elapsedTime)
|
||||
remainingTime := int64(float64(len(chunksToCopy) - chunkIndex - 1) / float64(chunkIndex + 1) * elapsedTime)
|
||||
percentage := float64(chunkIndex + 1) / float64(len(chunksToCopy)) * 100.0
|
||||
LOG_INFO("COPY_PROGRESS", "%s chunk %s (%d/%d) %sB/s %s %.1f%%",
|
||||
action, chunk.GetID(), chunkIndex + 1, len(chunksToCopy),
|
||||
PrettySize(speed), PrettyTime(remainingTime), percentage)
|
||||
otherManager.config.PutChunk(chunk)
|
||||
})
|
||||
|
||||
chunkUploader.Start()
|
||||
|
||||
totalCopied := 0
|
||||
totalSkipped := 0
|
||||
chunkIndex := 0
|
||||
|
||||
for chunkHash := range chunks {
|
||||
chunkIndex++
|
||||
for _, chunkHash := range chunksToCopy {
|
||||
chunkDownloader.AddChunk(chunkHash)
|
||||
}
|
||||
for i, chunkHash := range chunksToCopy {
|
||||
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
|
||||
newChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
||||
if _, found := otherChunks[newChunkID]; !found {
|
||||
LOG_DEBUG("SNAPSHOT_COPY", "Copying chunk %s to %s", chunkID, newChunkID)
|
||||
i := chunkDownloader.AddChunk(chunkHash)
|
||||
chunk := chunkDownloader.WaitForChunk(i)
|
||||
newChunk := otherManager.config.GetChunk()
|
||||
newChunk.Reset(true)
|
||||
newChunk.Write(chunk.GetBytes())
|
||||
chunkUploader.StartChunk(newChunk, chunkIndex)
|
||||
totalCopied++
|
||||
} else {
|
||||
LOG_INFO("SNAPSHOT_COPY", "Chunk %s (%d/%d) skipped at the destination", chunkID, chunkIndex, len(chunks))
|
||||
totalSkipped++
|
||||
}
|
||||
LOG_DEBUG("SNAPSHOT_COPY", "Copying chunk %s to %s", chunkID, newChunkID)
|
||||
chunk := chunkDownloader.WaitForChunk(i)
|
||||
newChunk := otherManager.config.GetChunk()
|
||||
newChunk.Reset(true)
|
||||
newChunk.Write(chunk.GetBytes())
|
||||
newChunk.isSnapshot = chunks[chunkHash]
|
||||
chunkUploader.StartChunk(newChunk, i)
|
||||
}
|
||||
|
||||
chunkDownloader.Stop()
|
||||
chunkUploader.Stop()
|
||||
|
||||
LOG_INFO("SNAPSHOT_COPY", "Copy complete, %d total chunks, %d chunks copied, %d skipped", totalCopied+totalSkipped, totalCopied, totalSkipped)
|
||||
LOG_INFO("SNAPSHOT_COPY", "Copied %d new chunks and skipped %d existing chunks", copiedChunks, len(chunks) - copiedChunks)
|
||||
|
||||
for _, snapshot := range snapshots {
|
||||
if revisionMap[snapshot.ID][snapshot.Revision] == false {
|
||||
|
||||
@@ -169,6 +169,12 @@ func getFileHash(path string) (hash string) {
|
||||
return hex.EncodeToString(hasher.Sum(nil))
|
||||
}
|
||||
|
||||
func assertRestoreFailures(t *testing.T, failedFiles int, expectedFailedFiles int) {
|
||||
if failedFiles != expectedFailedFiles {
|
||||
t.Errorf("Failed to restore %d instead of %d file(s)", failedFiles, expectedFailedFiles)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackupManager(t *testing.T) {
|
||||
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
@@ -226,12 +232,20 @@ func TestBackupManager(t *testing.T) {
|
||||
cleanStorage(storage)
|
||||
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
|
||||
dataShards := 0
|
||||
parityShards := 0
|
||||
if testErasureCoding {
|
||||
dataShards = 5
|
||||
parityShards = 2
|
||||
}
|
||||
|
||||
if testFixedChunkSize {
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false) {
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "", dataShards, parityShards) {
|
||||
t.Errorf("Failed to initialize the storage")
|
||||
}
|
||||
} else {
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false) {
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false, "", dataShards, parityShards) {
|
||||
t.Errorf("Failed to initialize the storage")
|
||||
}
|
||||
}
|
||||
@@ -239,15 +253,16 @@ func TestBackupManager(t *testing.T) {
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
backupManager := CreateBackupManager("host1", storage, testDir, password, "")
|
||||
backupManager := CreateBackupManager("host1", storage, testDir, password, "", "", false)
|
||||
backupManager.SetupSnapshotCache("default")
|
||||
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||
backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
|
||||
failedFiles := backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||
assertRestoreFailures(t, failedFiles, 0)
|
||||
|
||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
|
||||
@@ -270,8 +285,9 @@ func TestBackupManager(t *testing.T) {
|
||||
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||
backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
|
||||
failedFiles = backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||
assertRestoreFailures(t, failedFiles, 0)
|
||||
|
||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||
@@ -298,8 +314,9 @@ func TestBackupManager(t *testing.T) {
|
||||
createRandomFile(testDir+"/repository2/dir5/file5", 100)
|
||||
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||
backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||
/*deleteMode=*/ true /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
|
||||
failedFiles = backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||
/*deleteMode=*/ true /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||
assertRestoreFailures(t, failedFiles, 0)
|
||||
|
||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||
@@ -325,8 +342,9 @@ func TestBackupManager(t *testing.T) {
|
||||
os.Remove(testDir + "/repository1/file2")
|
||||
os.Remove(testDir + "/repository1/dir1/file3")
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"})
|
||||
failedFiles = backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"} /*allowFailures=*/, false)
|
||||
assertRestoreFailures(t, failedFiles, 0)
|
||||
|
||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||
@@ -341,7 +359,7 @@ func TestBackupManager(t *testing.T) {
|
||||
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
|
||||
}
|
||||
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1, 2, 3} /*tag*/, "",
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, []int{1} /*tags*/, nil /*retentions*/, nil,
|
||||
/*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
|
||||
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
|
||||
@@ -349,7 +367,7 @@ func TestBackupManager(t *testing.T) {
|
||||
t.Errorf("Expected 2 snapshots but got %d", numberOfSnapshots)
|
||||
}
|
||||
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3} /*tag*/, "",
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false)
|
||||
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, nil /*tags*/, nil /*retentions*/, nil,
|
||||
/*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
|
||||
@@ -358,9 +376,348 @@ func TestBackupManager(t *testing.T) {
|
||||
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
|
||||
}
|
||||
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3, 4} /*tag*/, "",
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||
|
||||
/*buf := make([]byte, 1<<16)
|
||||
runtime.Stack(buf, true)
|
||||
fmt.Printf("%s", buf)*/
|
||||
}
|
||||
|
||||
// Create file with random file with certain seed
|
||||
func createRandomFileSeeded(path string, maxSize int, seed int64) {
|
||||
rand.Seed(seed)
|
||||
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
LOG_ERROR("RANDOM_FILE", "Can't open %s for writing: %v", path, err)
|
||||
return
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
size := maxSize/2 + rand.Int()%(maxSize/2)
|
||||
|
||||
buffer := make([]byte, 32*1024)
|
||||
for size > 0 {
|
||||
bytes := size
|
||||
if bytes > cap(buffer) {
|
||||
bytes = cap(buffer)
|
||||
}
|
||||
rand.Read(buffer[:bytes])
|
||||
bytes, err = file.Write(buffer[:bytes])
|
||||
if err != nil {
|
||||
LOG_ERROR("RANDOM_FILE", "Failed to write to %s: %v", path, err)
|
||||
return
|
||||
}
|
||||
size -= bytes
|
||||
}
|
||||
}
|
||||
|
||||
func corruptFile(path string, start int, length int, seed int64) {
|
||||
rand.Seed(seed)
|
||||
|
||||
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
LOG_ERROR("CORRUPT_FILE", "Can't open %s for writing: %v", path, err)
|
||||
return
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if file != nil {
|
||||
file.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = file.Seek(int64(start), 0)
|
||||
if err != nil {
|
||||
LOG_ERROR("CORRUPT_FILE", "Can't seek to the offset %d: %v", start, err)
|
||||
return
|
||||
}
|
||||
|
||||
buffer := make([]byte, length)
|
||||
rand.Read(buffer)
|
||||
|
||||
_, err = file.Write(buffer)
|
||||
if err != nil {
|
||||
LOG_ERROR("CORRUPT_FILE", "Failed to write to %s: %v", path, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestPersistRestore(t *testing.T) {
|
||||
// We want deterministic output here so we can test the expected files are corrupted by missing or corrupt chunks
|
||||
// There use rand functions with fixed seed, and known keys
|
||||
|
||||
setTestingT(t)
|
||||
SetLoggingLevel(INFO)
|
||||
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
switch e := r.(type) {
|
||||
case Exception:
|
||||
t.Errorf("%s %s", e.LogID, e.Message)
|
||||
debug.PrintStack()
|
||||
default:
|
||||
t.Errorf("%v", e)
|
||||
debug.PrintStack()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
testDir := path.Join(os.TempDir(), "duplicacy_test")
|
||||
os.RemoveAll(testDir)
|
||||
os.MkdirAll(testDir, 0700)
|
||||
os.Mkdir(testDir+"/repository1", 0700)
|
||||
os.Mkdir(testDir+"/repository1/dir1", 0700)
|
||||
os.Mkdir(testDir+"/repository1/.duplicacy", 0700)
|
||||
os.Mkdir(testDir+"/repository2", 0700)
|
||||
os.Mkdir(testDir+"/repository2/.duplicacy", 0700)
|
||||
os.Mkdir(testDir+"/repository3", 0700)
|
||||
os.Mkdir(testDir+"/repository3/.duplicacy", 0700)
|
||||
|
||||
maxFileSize := 1000000
|
||||
//maxFileSize := 200000
|
||||
|
||||
createRandomFileSeeded(testDir+"/repository1/file1", maxFileSize,1)
|
||||
createRandomFileSeeded(testDir+"/repository1/file2", maxFileSize,2)
|
||||
createRandomFileSeeded(testDir+"/repository1/dir1/file3", maxFileSize,3)
|
||||
|
||||
threads := 1
|
||||
|
||||
password := "duplicacy"
|
||||
|
||||
// We want deterministic output, plus ability to test encrypted storage
|
||||
// So make unencrypted storage with default keys, and encrypted as bit-identical copy of this but with password
|
||||
unencStorage, err := loadStorage(testDir+"/unenc_storage", threads)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create storage: %v", err)
|
||||
return
|
||||
}
|
||||
delay := 0
|
||||
if _, ok := unencStorage.(*ACDStorage); ok {
|
||||
delay = 1
|
||||
}
|
||||
if _, ok := unencStorage.(*OneDriveStorage); ok {
|
||||
delay = 5
|
||||
}
|
||||
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
cleanStorage(unencStorage)
|
||||
|
||||
if !ConfigStorage(unencStorage, 16384, 100, 64*1024, 256*1024, 16*1024, "", nil, false, "", 0, 0) {
|
||||
t.Errorf("Failed to initialize the unencrypted storage")
|
||||
}
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
unencConfig, _, err := DownloadConfig(unencStorage, "")
|
||||
if err != nil {
|
||||
t.Errorf("Failed to download storage config: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Make encrypted storage
|
||||
storage, err := loadStorage(testDir+"/enc_storage", threads)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create encrypted storage: %v", err)
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
cleanStorage(storage)
|
||||
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, unencConfig, true, "", 0, 0) {
|
||||
t.Errorf("Failed to initialize the encrypted storage")
|
||||
}
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
|
||||
// do unencrypted backup
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
unencBackupManager := CreateBackupManager("host1", unencStorage, testDir, "", "", "", false)
|
||||
unencBackupManager.SetupSnapshotCache("default")
|
||||
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
unencBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
|
||||
|
||||
// do encrypted backup
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
encBackupManager := CreateBackupManager("host1", storage, testDir, password, "", "", false)
|
||||
encBackupManager.SetupSnapshotCache("default")
|
||||
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
encBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
|
||||
|
||||
// check snapshots
|
||||
unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
|
||||
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
|
||||
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||
|
||||
encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
|
||||
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
|
||||
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||
|
||||
// check functions
|
||||
checkAllUncorrupted := func(cmpRepository string) {
|
||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||
if _, err := os.Stat(testDir + cmpRepository + "/" + f); os.IsNotExist(err) {
|
||||
t.Errorf("File %s does not exist", f)
|
||||
continue
|
||||
}
|
||||
|
||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||
hash2 := getFileHash(testDir + cmpRepository + "/" + f)
|
||||
if hash1 != hash2 {
|
||||
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||
}
|
||||
}
|
||||
}
|
||||
checkMissingFile := func(cmpRepository string, expectMissing string) {
|
||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||
_, err := os.Stat(testDir + cmpRepository + "/" + f)
|
||||
if err==nil {
|
||||
if f==expectMissing {
|
||||
t.Errorf("File %s exists, expected to be missing", f)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if os.IsNotExist(err) {
|
||||
if f!=expectMissing {
|
||||
t.Errorf("File %s does not exist", f)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||
hash2 := getFileHash(testDir + cmpRepository + "/" + f)
|
||||
if hash1 != hash2 {
|
||||
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||
}
|
||||
}
|
||||
}
|
||||
checkCorruptedFile := func(cmpRepository string, expectCorrupted string) {
|
||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||
if _, err := os.Stat(testDir + cmpRepository + "/" + f); os.IsNotExist(err) {
|
||||
t.Errorf("File %s does not exist", f)
|
||||
continue
|
||||
}
|
||||
|
||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||
hash2 := getFileHash(testDir + cmpRepository + "/" + f)
|
||||
if (f==expectCorrupted) {
|
||||
if hash1 == hash2 {
|
||||
t.Errorf("File %s has same hashes, expected to be corrupted: %s vs %s", f, hash1, hash2)
|
||||
}
|
||||
|
||||
} else {
|
||||
if hash1 != hash2 {
|
||||
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// test restore all uncorrupted to repository3
|
||||
SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy")
|
||||
failedFiles := unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||
assertRestoreFailures(t, failedFiles, 0)
|
||||
checkAllUncorrupted("/repository3")
|
||||
|
||||
// test for corrupt files and -persist
|
||||
// corrupt a chunk
|
||||
chunkToCorrupt1 := "/4d/538e5dfd2b08e782bfeb56d1360fb5d7eb9d8c4b2531cc2fca79efbaec910c"
|
||||
// this should affect file1
|
||||
chunkToCorrupt2 := "/2b/f953a766d0196ce026ae259e76e3c186a0e4bcd3ce10f1571d17f86f0a5497"
|
||||
// this should affect dir1/file3
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
if i==0 {
|
||||
// test corrupt chunks
|
||||
corruptFile(testDir+"/unenc_storage"+"/chunks"+chunkToCorrupt1, 128, 128, 4)
|
||||
corruptFile(testDir+"/enc_storage"+"/chunks"+chunkToCorrupt2, 128, 128, 4)
|
||||
} else {
|
||||
// test missing chunks
|
||||
os.Remove(testDir+"/unenc_storage"+"/chunks"+chunkToCorrupt1)
|
||||
os.Remove(testDir+"/enc_storage"+"/chunks"+chunkToCorrupt2)
|
||||
}
|
||||
|
||||
// check snapshots with --persist (allowFailures == true)
|
||||
// this would cause a panic and os.Exit from duplicacy_log if allowFailures == false
|
||||
unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
|
||||
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
|
||||
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, true)
|
||||
|
||||
encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
|
||||
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
|
||||
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, true)
|
||||
|
||||
|
||||
// test restore corrupted, inPlace = true, corrupted files will have hash failures
|
||||
os.RemoveAll(testDir+"/repository2")
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||
failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||
assertRestoreFailures(t, failedFiles, 1)
|
||||
|
||||
// check restore, expect file1 to be corrupted
|
||||
checkCorruptedFile("/repository2", "file1")
|
||||
|
||||
|
||||
os.RemoveAll(testDir+"/repository2")
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||
failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||
assertRestoreFailures(t, failedFiles, 1)
|
||||
|
||||
// check restore, expect file3 to be corrupted
|
||||
checkCorruptedFile("/repository2", "dir1/file3")
|
||||
|
||||
//SetLoggingLevel(DEBUG)
|
||||
// test restore corrupted, inPlace = false, corrupted files will be missing
|
||||
os.RemoveAll(testDir+"/repository2")
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||
failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||
assertRestoreFailures(t, failedFiles, 1)
|
||||
|
||||
// check restore, expect file1 to be corrupted
|
||||
checkMissingFile("/repository2", "file1")
|
||||
|
||||
|
||||
os.RemoveAll(testDir+"/repository2")
|
||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||
failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||
assertRestoreFailures(t, failedFiles, 1)
|
||||
|
||||
// check restore, expect file3 to be corrupted
|
||||
checkMissingFile("/repository2", "dir1/file3")
|
||||
|
||||
// test restore corrupted files from different backups, inPlace = true
|
||||
// with overwrite=true, corrupted file1 from unenc will be restored correctly from enc
|
||||
// the latter will not touch the existing file3 with correct hash
|
||||
os.RemoveAll(testDir+"/repository2")
|
||||
failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||
assertRestoreFailures(t, failedFiles, 1)
|
||||
|
||||
failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||
assertRestoreFailures(t, failedFiles, 0)
|
||||
checkAllUncorrupted("/repository2")
|
||||
|
||||
// restore to repository3, with overwrite and allowFailures (true/false), quickMode = false (use hashes)
|
||||
// should always succeed as uncorrupted files already exist with correct hash, so these will be ignored
|
||||
SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy")
|
||||
failedFiles = unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||
assertRestoreFailures(t, failedFiles, 0)
|
||||
checkAllUncorrupted("/repository3")
|
||||
|
||||
failedFiles = unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||
assertRestoreFailures(t, failedFiles, 0)
|
||||
checkAllUncorrupted("/repository3")
|
||||
}
|
||||
|
||||
}
|
||||
@@ -41,7 +41,7 @@ func benchmarkSplit(reader *bytes.Reader, fileSize int64, chunkSize int, compres
|
||||
if encryption {
|
||||
key = "0123456789abcdef0123456789abcdef"
|
||||
}
|
||||
err := chunk.Encrypt([]byte(key), "")
|
||||
err := chunk.Encrypt([]byte(key), "", false)
|
||||
if err != nil {
|
||||
LOG_ERROR("BENCHMARK_ENCRYPT", "Failed to encrypt the chunk: %v", err)
|
||||
}
|
||||
|
||||
@@ -8,11 +8,13 @@ import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"crypto/aes"
|
||||
"crypto/rsa"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
@@ -20,6 +22,8 @@ import (
|
||||
"runtime"
|
||||
|
||||
"github.com/bkaradzic/go-lz4"
|
||||
"github.com/minio/highwayhash"
|
||||
"github.com/klauspost/reedsolomon"
|
||||
)
|
||||
|
||||
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
|
||||
@@ -60,10 +64,20 @@ type Chunk struct {
|
||||
|
||||
config *Config // Every chunk is associated with a Config object. Which hashing algorithm to use is determined
|
||||
// by the config
|
||||
|
||||
isSnapshot bool // Indicates if the chunk is a snapshot chunk (instead of a file chunk). This is only used by RSA
|
||||
// encryption, where a snapshot chunk is not encrypted by RSA
|
||||
|
||||
isBroken bool // Indicates the chunk did not download correctly. This is only used for -persist (allowFailures) mode
|
||||
}
|
||||
|
||||
// Magic word to identify a duplicacy format encrypted file, plus a version number.
|
||||
var ENCRYPTION_HEADER = "duplicacy\000"
|
||||
var ENCRYPTION_BANNER = "duplicacy\000"
|
||||
|
||||
// RSA encrypted chunks start with "duplicacy\002"
|
||||
var ENCRYPTION_VERSION_RSA byte = 2
|
||||
|
||||
var ERASURE_CODING_BANNER = "duplicacy\003"
|
||||
|
||||
// CreateChunk creates a new chunk.
|
||||
func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
|
||||
@@ -113,6 +127,8 @@ func (chunk *Chunk) Reset(hashNeeded bool) {
|
||||
chunk.hash = nil
|
||||
chunk.id = ""
|
||||
chunk.size = 0
|
||||
chunk.isSnapshot = false
|
||||
chunk.isBroken = false
|
||||
}
|
||||
|
||||
// Write implements the Writer interface.
|
||||
@@ -170,7 +186,7 @@ func (chunk *Chunk) VerifyID() {
|
||||
|
||||
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
|
||||
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
|
||||
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err error) {
|
||||
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapshot bool) (err error) {
|
||||
|
||||
var aesBlock cipher.Block
|
||||
var gcm cipher.AEAD
|
||||
@@ -186,8 +202,17 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
if len(encryptionKey) > 0 {
|
||||
|
||||
key := encryptionKey
|
||||
|
||||
if len(derivationKey) > 0 {
|
||||
usingRSA := false
|
||||
// Enable RSA encryption only when the chunk is not a snapshot chunk
|
||||
if chunk.config.rsaPublicKey != nil && !isSnapshot && !chunk.isSnapshot {
|
||||
randomKey := make([]byte, 32)
|
||||
_, err := rand.Read(randomKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key = randomKey
|
||||
usingRSA = true
|
||||
} else if len(derivationKey) > 0 {
|
||||
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
|
||||
hasher.Write(encryptionKey)
|
||||
key = hasher.Sum(nil)
|
||||
@@ -204,7 +229,21 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
}
|
||||
|
||||
// Start with the magic number and the version number.
|
||||
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER))
|
||||
if usingRSA {
|
||||
// RSA encryption starts "duplicacy\002"
|
||||
encryptedBuffer.Write([]byte(ENCRYPTION_BANNER)[:len(ENCRYPTION_BANNER) - 1])
|
||||
encryptedBuffer.Write([]byte{ENCRYPTION_VERSION_RSA})
|
||||
|
||||
// Then the encrypted key
|
||||
encryptedKey, err := rsa.EncryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPublicKey, key, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
binary.Write(encryptedBuffer, binary.LittleEndian, uint16(len(encryptedKey)))
|
||||
encryptedBuffer.Write(encryptedKey)
|
||||
} else {
|
||||
encryptedBuffer.Write([]byte(ENCRYPTION_BANNER))
|
||||
}
|
||||
|
||||
// Followed by the nonce
|
||||
nonce = make([]byte, gcm.NonceSize())
|
||||
@@ -214,10 +253,9 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
}
|
||||
encryptedBuffer.Write(nonce)
|
||||
offset = encryptedBuffer.Len()
|
||||
|
||||
}
|
||||
|
||||
// offset is either 0 or the length of header + nonce
|
||||
// offset is either 0 or the length of banner + nonce
|
||||
|
||||
if chunk.config.CompressionLevel >= -1 && chunk.config.CompressionLevel <= 9 {
|
||||
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
|
||||
@@ -242,26 +280,79 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
return fmt.Errorf("Invalid compression level: %d", chunk.config.CompressionLevel)
|
||||
}
|
||||
|
||||
if len(encryptionKey) == 0 {
|
||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||
return nil
|
||||
if len(encryptionKey) > 0 {
|
||||
|
||||
// PKCS7 is used. The sizes of compressed chunks leak information about the original chunks so we want the padding sizes
|
||||
// to be the maximum allowed by PKCS7
|
||||
dataLength := encryptedBuffer.Len() - offset
|
||||
paddingLength := 256 - dataLength%256
|
||||
|
||||
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
|
||||
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
|
||||
|
||||
// The encrypted data will be appended to the duplicacy banner and the once.
|
||||
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
|
||||
encryptedBuffer.Bytes()[offset:offset+dataLength+paddingLength], nil)
|
||||
|
||||
encryptedBuffer.Truncate(len(encryptedBytes))
|
||||
}
|
||||
|
||||
// PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes
|
||||
// to be the maximum allowed by PKCS7
|
||||
dataLength := encryptedBuffer.Len() - offset
|
||||
paddingLength := 256 - dataLength%256
|
||||
if chunk.config.DataShards == 0 || chunk.config.ParityShards == 0 {
|
||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||
return
|
||||
}
|
||||
|
||||
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
|
||||
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
|
||||
// Start erasure coding
|
||||
encoder, err := reedsolomon.New(chunk.config.DataShards, chunk.config.ParityShards)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chunkSize := len(encryptedBuffer.Bytes())
|
||||
shardSize := (chunkSize + chunk.config.DataShards - 1) / chunk.config.DataShards
|
||||
// Append zeros to make the last shard to have the same size as other
|
||||
encryptedBuffer.Write(make([]byte, shardSize * chunk.config.DataShards - chunkSize))
|
||||
// Grow the buffer for parity shards
|
||||
encryptedBuffer.Grow(shardSize * chunk.config.ParityShards)
|
||||
// Now create one slice for each shard, reusing the data in the buffer
|
||||
data := make([][]byte, chunk.config.DataShards + chunk.config.ParityShards)
|
||||
for i := 0; i < chunk.config.DataShards + chunk.config.ParityShards; i++ {
|
||||
data[i] = encryptedBuffer.Bytes()[i * shardSize: (i + 1) * shardSize]
|
||||
}
|
||||
// This populates the parity shard
|
||||
encoder.Encode(data)
|
||||
|
||||
// The encrypted data will be appended to the duplicacy header and the once.
|
||||
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
|
||||
encryptedBuffer.Bytes()[offset:offset+dataLength+paddingLength], nil)
|
||||
// Prepare the chunk to be uploaded
|
||||
chunk.buffer.Reset()
|
||||
// First the banner
|
||||
chunk.buffer.Write([]byte(ERASURE_CODING_BANNER))
|
||||
// Then the header which includes the chunk size, data/parity and a 2-byte checksum
|
||||
header := make([]byte, 14)
|
||||
binary.LittleEndian.PutUint64(header[0:], uint64(chunkSize))
|
||||
binary.LittleEndian.PutUint16(header[8:], uint16(chunk.config.DataShards))
|
||||
binary.LittleEndian.PutUint16(header[10:], uint16(chunk.config.ParityShards))
|
||||
header[12] = header[0] ^ header[2] ^ header[4] ^ header[6] ^ header[8] ^ header[10]
|
||||
header[13] = header[1] ^ header[3] ^ header[5] ^ header[7] ^ header[9] ^ header[11]
|
||||
chunk.buffer.Write(header)
|
||||
// Calculate the highway hash for each shard
|
||||
hashKey := make([]byte, 32)
|
||||
for _, part := range data {
|
||||
hasher, err := highwayhash.New(hashKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = hasher.Write(part)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chunk.buffer.Write(hasher.Sum(nil))
|
||||
}
|
||||
|
||||
encryptedBuffer.Truncate(len(encryptedBytes))
|
||||
|
||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||
// Copy the data
|
||||
for _, part := range data {
|
||||
chunk.buffer.Write(part)
|
||||
}
|
||||
// Append the header again for redundancy
|
||||
chunk.buffer.Write(header)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -291,6 +382,122 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
}()
|
||||
|
||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||
bannerLength := len(ENCRYPTION_BANNER)
|
||||
|
||||
if len(encryptedBuffer.Bytes()) > bannerLength && string(encryptedBuffer.Bytes()[:bannerLength]) == ERASURE_CODING_BANNER {
|
||||
|
||||
// The chunk was encoded with erasure coding
|
||||
if len(encryptedBuffer.Bytes()) < bannerLength + 14 {
|
||||
return fmt.Errorf("Erasure coding header truncated (%d bytes)", len(encryptedBuffer.Bytes()))
|
||||
}
|
||||
// Check the header checksum
|
||||
header := encryptedBuffer.Bytes()[bannerLength: bannerLength + 14]
|
||||
if header[12] != header[0] ^ header[2] ^ header[4] ^ header[6] ^ header[8] ^ header[10] ||
|
||||
header[13] != header[1] ^ header[3] ^ header[5] ^ header[7] ^ header[9] ^ header[11] {
|
||||
return fmt.Errorf("Erasure coding header corrupted (%x)", header)
|
||||
}
|
||||
|
||||
// Read the parameters
|
||||
chunkSize := int(binary.LittleEndian.Uint64(header[0:8]))
|
||||
dataShards := int(binary.LittleEndian.Uint16(header[8:10]))
|
||||
parityShards := int(binary.LittleEndian.Uint16(header[10:12]))
|
||||
shardSize := (chunkSize + chunk.config.DataShards - 1) / chunk.config.DataShards
|
||||
// This is the length the chunk file should have
|
||||
expectedLength := bannerLength + 2 * len(header) + (dataShards + parityShards) * (shardSize + 32)
|
||||
// The minimum length that can be recovered from
|
||||
minimumLength := bannerLength + len(header) + (dataShards + parityShards) * 32 + dataShards * shardSize
|
||||
LOG_DEBUG("CHUNK_ERASURECODE", "Chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards)
|
||||
if len(encryptedBuffer.Bytes()) > expectedLength {
|
||||
LOG_WARN("CHUNK_ERASURECODE", "Chunk has %d bytes (instead of %d)", len(encryptedBuffer.Bytes()), expectedLength)
|
||||
} else if len(encryptedBuffer.Bytes()) == expectedLength {
|
||||
// Correct size; fall through
|
||||
} else if len(encryptedBuffer.Bytes()) > minimumLength {
|
||||
LOG_WARN("CHUNK_ERASURECODE", "Chunk is truncated (%d out of %d bytes)", len(encryptedBuffer.Bytes()), expectedLength)
|
||||
} else {
|
||||
return fmt.Errorf("Not enough chunk data for recovery; chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards)
|
||||
}
|
||||
|
||||
// Where the hashes start
|
||||
hashOffset := bannerLength + len(header)
|
||||
// Where the data start
|
||||
dataOffset := hashOffset + (dataShards + parityShards) * 32
|
||||
|
||||
data := make([][]byte, dataShards + parityShards)
|
||||
recoveryNeeded := false
|
||||
hashKey := make([]byte, 32)
|
||||
availableShards := 0
|
||||
for i := 0; i < dataShards + parityShards; i++ {
|
||||
start := dataOffset + i * shardSize
|
||||
if start + shardSize > len(encryptedBuffer.Bytes()) {
|
||||
// the current shard is incomplete
|
||||
break
|
||||
}
|
||||
// Now verify the hash
|
||||
hasher, err := highwayhash.New(hashKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = hasher.Write(encryptedBuffer.Bytes()[start: start + shardSize])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bytes.Compare(hasher.Sum(nil), encryptedBuffer.Bytes()[hashOffset + i * 32: hashOffset + (i + 1) * 32]) != 0 {
|
||||
if i < dataShards {
|
||||
recoveryNeeded = true
|
||||
}
|
||||
} else {
|
||||
// The shard is good
|
||||
data[i] = encryptedBuffer.Bytes()[start: start + shardSize]
|
||||
availableShards++
|
||||
if availableShards >= dataShards {
|
||||
// We have enough shards to recover; skip the remaining shards
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !recoveryNeeded {
|
||||
// Remove the padding zeros from the last shard
|
||||
encryptedBuffer.Truncate(dataOffset + chunkSize)
|
||||
// Skip the header and hashes
|
||||
encryptedBuffer.Read(encryptedBuffer.Bytes()[:dataOffset])
|
||||
} else {
|
||||
if availableShards < dataShards {
|
||||
return fmt.Errorf("Not enough chunk data for recover; only %d out of %d shards are complete", availableShards, dataShards + parityShards)
|
||||
}
|
||||
|
||||
// Show the validity of shards using a string of * and -
|
||||
slots := ""
|
||||
for _, part := range data {
|
||||
if len(part) != 0 {
|
||||
slots += "*"
|
||||
} else {
|
||||
slots += "-"
|
||||
}
|
||||
}
|
||||
|
||||
LOG_WARN("CHUNK_ERASURECODE", "Recovering a %d byte chunk from %d byte shards: %s", chunkSize, shardSize, slots)
|
||||
encoder, err := reedsolomon.New(dataShards, parityShards)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = encoder.Reconstruct(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
LOG_DEBUG("CHUNK_ERASURECODE", "Chunk data successfully recovered")
|
||||
buffer := AllocateChunkBuffer()
|
||||
buffer.Reset()
|
||||
for i := 0; i < dataShards; i++ {
|
||||
buffer.Write(data[i])
|
||||
}
|
||||
buffer.Truncate(chunkSize)
|
||||
|
||||
ReleaseChunkBuffer(encryptedBuffer)
|
||||
encryptedBuffer = buffer
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(encryptionKey) > 0 {
|
||||
|
||||
@@ -308,6 +515,41 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
key = hasher.Sum(nil)
|
||||
}
|
||||
|
||||
if len(encryptedBuffer.Bytes()) < bannerLength + 12 {
|
||||
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||
}
|
||||
|
||||
if string(encryptedBuffer.Bytes()[:bannerLength-1]) != ENCRYPTION_BANNER[:bannerLength-1] {
|
||||
return fmt.Errorf("The storage doesn't seem to be encrypted")
|
||||
}
|
||||
|
||||
encryptionVersion := encryptedBuffer.Bytes()[bannerLength-1]
|
||||
if encryptionVersion != 0 && encryptionVersion != ENCRYPTION_VERSION_RSA {
|
||||
return fmt.Errorf("Unsupported encryption version %d", encryptionVersion)
|
||||
}
|
||||
|
||||
if encryptionVersion == ENCRYPTION_VERSION_RSA {
|
||||
if chunk.config.rsaPrivateKey == nil {
|
||||
LOG_ERROR("CHUNK_DECRYPT", "An RSA private key is required to decrypt the chunk")
|
||||
return fmt.Errorf("An RSA private key is required to decrypt the chunk")
|
||||
}
|
||||
|
||||
encryptedKeyLength := binary.LittleEndian.Uint16(encryptedBuffer.Bytes()[bannerLength:bannerLength+2])
|
||||
|
||||
if len(encryptedBuffer.Bytes()) < bannerLength + 14 + int(encryptedKeyLength) {
|
||||
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||
}
|
||||
|
||||
encryptedKey := encryptedBuffer.Bytes()[bannerLength + 2:bannerLength + 2 + int(encryptedKeyLength)]
|
||||
bannerLength += 2 + int(encryptedKeyLength)
|
||||
|
||||
decryptedKey, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPrivateKey, encryptedKey, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key = decryptedKey
|
||||
}
|
||||
|
||||
aesBlock, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -318,22 +560,8 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
return err
|
||||
}
|
||||
|
||||
headerLength := len(ENCRYPTION_HEADER)
|
||||
offset = headerLength + gcm.NonceSize()
|
||||
|
||||
if len(encryptedBuffer.Bytes()) < offset {
|
||||
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||
}
|
||||
|
||||
if string(encryptedBuffer.Bytes()[:headerLength-1]) != ENCRYPTION_HEADER[:headerLength-1] {
|
||||
return fmt.Errorf("The storage doesn't seem to be encrypted")
|
||||
}
|
||||
|
||||
if encryptedBuffer.Bytes()[headerLength-1] != 0 {
|
||||
return fmt.Errorf("Unsupported encryption version %d", encryptedBuffer.Bytes()[headerLength-1])
|
||||
}
|
||||
|
||||
nonce := encryptedBuffer.Bytes()[headerLength:offset]
|
||||
offset = bannerLength + gcm.NonceSize()
|
||||
nonce := encryptedBuffer.Bytes()[bannerLength:offset]
|
||||
|
||||
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
|
||||
encryptedBuffer.Bytes()[offset:], nil)
|
||||
|
||||
@@ -7,11 +7,51 @@ package duplicacy
|
||||
import (
|
||||
"bytes"
|
||||
crypto_rand "crypto/rand"
|
||||
"crypto/rsa"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestChunk(t *testing.T) {
|
||||
func TestErasureCoding(t *testing.T) {
|
||||
key := []byte("duplicacydefault")
|
||||
|
||||
config := CreateConfig()
|
||||
config.HashKey = key
|
||||
config.IDKey = key
|
||||
config.MinimumChunkSize = 100
|
||||
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
||||
config.DataShards = 5
|
||||
config.ParityShards = 2
|
||||
|
||||
chunk := CreateChunk(config, true)
|
||||
chunk.Reset(true)
|
||||
data := make([]byte, 100)
|
||||
for i := 0; i < len(data); i++ {
|
||||
data[i] = byte(i)
|
||||
}
|
||||
chunk.Write(data)
|
||||
err := chunk.Encrypt([]byte(""), "", false)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to encrypt the test data: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData := make([]byte, chunk.GetLength())
|
||||
copy(encryptedData, chunk.GetBytes())
|
||||
|
||||
crypto_rand.Read(encryptedData[280:300])
|
||||
|
||||
chunk.Reset(false)
|
||||
chunk.Write(encryptedData)
|
||||
err = chunk.Decrypt([]byte(""), "")
|
||||
if err != nil {
|
||||
t.Errorf("Failed to decrypt the data: %v", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestChunkBasic(t *testing.T) {
|
||||
|
||||
key := []byte("duplicacydefault")
|
||||
|
||||
@@ -22,7 +62,19 @@ func TestChunk(t *testing.T) {
|
||||
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
||||
maxSize := 1000000
|
||||
|
||||
remainderLength := -1
|
||||
if testRSAEncryption {
|
||||
privateKey, err := rsa.GenerateKey(crypto_rand.Reader, 2048)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to generate a random private key: %v", err)
|
||||
}
|
||||
config.rsaPrivateKey = privateKey
|
||||
config.rsaPublicKey = privateKey.Public().(*rsa.PublicKey)
|
||||
}
|
||||
|
||||
if testErasureCoding {
|
||||
config.DataShards = 5
|
||||
config.ParityShards = 2
|
||||
}
|
||||
|
||||
for i := 0; i < 500; i++ {
|
||||
|
||||
@@ -37,7 +89,7 @@ func TestChunk(t *testing.T) {
|
||||
hash := chunk.GetHash()
|
||||
id := chunk.GetID()
|
||||
|
||||
err := chunk.Encrypt(key, "")
|
||||
err := chunk.Encrypt(key, "", false)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to encrypt the data: %v", err)
|
||||
continue
|
||||
@@ -46,10 +98,14 @@ func TestChunk(t *testing.T) {
|
||||
encryptedData := make([]byte, chunk.GetLength())
|
||||
copy(encryptedData, chunk.GetBytes())
|
||||
|
||||
if remainderLength == -1 {
|
||||
remainderLength = len(encryptedData) % 256
|
||||
} else if len(encryptedData)%256 != remainderLength {
|
||||
t.Errorf("Incorrect padding size")
|
||||
if testErasureCoding {
|
||||
offset := 24 + 32 * 7
|
||||
start := rand.Int() % (len(encryptedData) - offset) + offset
|
||||
length := (len(encryptedData) - offset) / 7
|
||||
if start + length > len(encryptedData) {
|
||||
length = len(encryptedData) - start
|
||||
}
|
||||
crypto_rand.Read(encryptedData[start: start+length])
|
||||
}
|
||||
|
||||
chunk.Reset(false)
|
||||
|
||||
@@ -31,11 +31,15 @@ type ChunkDownloadCompletion struct {
|
||||
// corresponding ChunkDownloadTask is sent to the dowloading goroutine. Once a chunk is downloaded, it will be
|
||||
// inserted in the completed task list.
|
||||
type ChunkDownloader struct {
|
||||
totalChunkSize int64 // Total chunk size
|
||||
downloadedChunkSize int64 // Downloaded chunk size
|
||||
|
||||
config *Config // Associated config
|
||||
storage Storage // Download from this storage
|
||||
snapshotCache *FileStorage // Used as cache if not nil; usually for downloading snapshot chunks
|
||||
showStatistics bool // Show a stats log for each chunk if true
|
||||
threads int // Number of threads
|
||||
allowFailures bool // Whether to failfast on download error, or continue
|
||||
|
||||
taskList []ChunkDownloadTask // The list of chunks to be downloaded
|
||||
completedTasks map[int]bool // Store downloaded chunks
|
||||
@@ -46,20 +50,21 @@ type ChunkDownloader struct {
|
||||
completionChannel chan ChunkDownloadCompletion // A downloading goroutine sends back the chunk via this channel after downloading
|
||||
|
||||
startTime int64 // The time it starts downloading
|
||||
totalChunkSize int64 // Total chunk size
|
||||
downloadedChunkSize int64 // Downloaded chunk size
|
||||
numberOfDownloadedChunks int // The number of chunks that have been downloaded
|
||||
numberOfDownloadingChunks int // The number of chunks still being downloaded
|
||||
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
|
||||
|
||||
NumberOfFailedChunks int // The number of chunks that can't be downloaded
|
||||
}
|
||||
|
||||
func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int) *ChunkDownloader {
|
||||
func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int, allowFailures bool) *ChunkDownloader {
|
||||
downloader := &ChunkDownloader{
|
||||
config: config,
|
||||
storage: storage,
|
||||
snapshotCache: snapshotCache,
|
||||
showStatistics: showStatistics,
|
||||
threads: threads,
|
||||
allowFailures: allowFailures,
|
||||
|
||||
taskList: nil,
|
||||
completedTasks: make(map[int]bool),
|
||||
@@ -126,6 +131,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files []*Entry)
|
||||
|
||||
// AddChunk adds a single chunk the download list.
|
||||
func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
|
||||
|
||||
task := ChunkDownloadTask{
|
||||
chunkIndex: len(downloader.taskList),
|
||||
chunkHash: chunkHash,
|
||||
@@ -197,6 +203,16 @@ func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
|
||||
downloader.lastChunkIndex = chunkIndex
|
||||
}
|
||||
|
||||
// Return the chunk last downloaded and its hash
|
||||
func (downloader *ChunkDownloader) GetLastDownloadedChunk() (chunk *Chunk, chunkHash string) {
|
||||
if downloader.lastChunkIndex >= len(downloader.taskList) {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
task := downloader.taskList[downloader.lastChunkIndex]
|
||||
return task.chunk, task.chunkHash
|
||||
}
|
||||
|
||||
// WaitForChunk waits until the specified chunk is ready
|
||||
func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
|
||||
|
||||
@@ -239,10 +255,57 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
|
||||
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
||||
downloader.numberOfDownloadedChunks++
|
||||
downloader.numberOfDownloadingChunks--
|
||||
if completion.chunk.isBroken {
|
||||
downloader.NumberOfFailedChunks++
|
||||
}
|
||||
}
|
||||
return downloader.taskList[chunkIndex].chunk
|
||||
}
|
||||
|
||||
// WaitForCompletion waits until all chunks have been downloaded
|
||||
func (downloader *ChunkDownloader) WaitForCompletion() {
|
||||
|
||||
// Tasks in completedTasks have not been counted by numberOfActiveChunks
|
||||
downloader.numberOfActiveChunks -= len(downloader.completedTasks)
|
||||
|
||||
// find the completed task with the largest index; we'll start from the next index
|
||||
for index := range downloader.completedTasks {
|
||||
if downloader.lastChunkIndex < index {
|
||||
downloader.lastChunkIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
// Looping until there isn't a download task in progress
|
||||
for downloader.numberOfActiveChunks > 0 || downloader.lastChunkIndex + 1 < len(downloader.taskList) {
|
||||
|
||||
// Wait for a completion event first
|
||||
if downloader.numberOfActiveChunks > 0 {
|
||||
completion := <-downloader.completionChannel
|
||||
downloader.config.PutChunk(completion.chunk)
|
||||
downloader.numberOfActiveChunks--
|
||||
downloader.numberOfDownloadedChunks++
|
||||
downloader.numberOfDownloadingChunks--
|
||||
if completion.chunk.isBroken {
|
||||
downloader.NumberOfFailedChunks++
|
||||
}
|
||||
}
|
||||
|
||||
// Pass the tasks one by one to the download queue
|
||||
if downloader.lastChunkIndex + 1 < len(downloader.taskList) {
|
||||
task := &downloader.taskList[downloader.lastChunkIndex + 1]
|
||||
if task.isDownloading {
|
||||
downloader.lastChunkIndex++
|
||||
continue
|
||||
}
|
||||
downloader.taskQueue <- *task
|
||||
task.isDownloading = true
|
||||
downloader.numberOfDownloadingChunks++
|
||||
downloader.numberOfActiveChunks++
|
||||
downloader.lastChunkIndex++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop terminates all downloading goroutines
|
||||
func (downloader *ChunkDownloader) Stop() {
|
||||
for downloader.numberOfDownloadingChunks > 0 {
|
||||
@@ -251,7 +314,10 @@ func (downloader *ChunkDownloader) Stop() {
|
||||
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
||||
downloader.numberOfDownloadedChunks++
|
||||
downloader.numberOfDownloadingChunks--
|
||||
}
|
||||
if completion.chunk.isBroken {
|
||||
downloader.NumberOfFailedChunks++
|
||||
}
|
||||
}
|
||||
|
||||
for i := range downloader.completedTasks {
|
||||
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
||||
@@ -305,13 +371,22 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
||||
// will be set up before the encryption
|
||||
chunk.Reset(false)
|
||||
|
||||
// If failures are allowed, complete the task properly
|
||||
completeFailedChunk := func(chunk *Chunk) {
|
||||
if downloader.allowFailures {
|
||||
chunk.isBroken = true
|
||||
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
|
||||
}
|
||||
}
|
||||
|
||||
const MaxDownloadAttempts = 3
|
||||
for downloadAttempt := 0; ; downloadAttempt++ {
|
||||
|
||||
// Find the chunk by ID first.
|
||||
chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
||||
completeFailedChunk(chunk)
|
||||
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -319,7 +394,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
||||
// No chunk is found. Have to find it in the fossil pool again.
|
||||
fossilPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, true)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
||||
completeFailedChunk(chunk)
|
||||
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -341,11 +417,12 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
||||
continue
|
||||
}
|
||||
|
||||
completeFailedChunk(chunk)
|
||||
// A chunk is not found. This is a serious error and hopefully it will never happen.
|
||||
if err != nil {
|
||||
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
|
||||
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
|
||||
} else {
|
||||
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
|
||||
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -354,7 +431,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
||||
// downloading again.
|
||||
err = downloader.storage.MoveFile(threadIndex, fossilPath, chunkPath)
|
||||
if err != nil {
|
||||
LOG_FATAL("DOWNLOAD_CHUNK", "Failed to resurrect chunk %s: %v", chunkID, err)
|
||||
completeFailedChunk(chunk)
|
||||
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to resurrect chunk %s: %v", chunkID, err)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -371,7 +449,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
||||
chunk.Reset(false)
|
||||
continue
|
||||
} else {
|
||||
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to download the chunk %s: %v", chunkID, err)
|
||||
completeFailedChunk(chunk)
|
||||
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to download the chunk %s: %v", chunkID, err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -383,7 +462,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
||||
chunk.Reset(false)
|
||||
continue
|
||||
} else {
|
||||
LOG_ERROR("DOWNLOAD_DECRYPT", "Failed to decrypt the chunk %s: %v", chunkID, err)
|
||||
completeFailedChunk(chunk)
|
||||
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_DECRYPT", "Failed to decrypt the chunk %s: %v", chunkID, err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -395,7 +475,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
||||
chunk.Reset(false)
|
||||
continue
|
||||
} else {
|
||||
LOG_FATAL("DOWNLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
|
||||
completeFailedChunk(chunk)
|
||||
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -128,7 +128,7 @@ func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) boo
|
||||
}
|
||||
|
||||
// Encrypt the chunk only after we know that it must be uploaded.
|
||||
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash())
|
||||
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash(), uploader.snapshotCache != nil)
|
||||
if err != nil {
|
||||
LOG_ERROR("UPLOAD_CHUNK", "Failed to encrypt the chunk %s: %v", chunkID, err)
|
||||
return false
|
||||
|
||||
@@ -101,7 +101,7 @@ func TestUploaderAndDownloader(t *testing.T) {
|
||||
|
||||
chunkUploader.Stop()
|
||||
|
||||
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
|
||||
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads, false)
|
||||
chunkDownloader.totalChunkSize = int64(totalFileSize)
|
||||
|
||||
for _, chunk := range chunks {
|
||||
|
||||
@@ -9,15 +9,21 @@ import (
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"hash"
|
||||
"os"
|
||||
"strings"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"sync/atomic"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
|
||||
blake2 "github.com/minio/blake2b-simd"
|
||||
)
|
||||
@@ -29,8 +35,8 @@ var DEFAULT_KEY = []byte("duplicacy")
|
||||
// standard zlib levels of -1 to 9.
|
||||
var DEFAULT_COMPRESSION_LEVEL = 100
|
||||
|
||||
// The new header of the config file (to differentiate from the old format where the salt and iterations are fixed)
|
||||
var CONFIG_HEADER = "duplicacy\001"
|
||||
// The new banner of the config file (to differentiate from the old format where the salt and iterations are fixed)
|
||||
var CONFIG_BANNER = "duplicacy\001"
|
||||
|
||||
// The length of the salt used in the new format
|
||||
var CONFIG_SALT_LENGTH = 32
|
||||
@@ -65,6 +71,14 @@ type Config struct {
|
||||
// for encrypting a non-chunk file
|
||||
FileKey []byte `json:"-"`
|
||||
|
||||
// for erasure coding
|
||||
DataShards int `json:'data-shards'`
|
||||
ParityShards int `json:'parity-shards'`
|
||||
|
||||
// for RSA encryption
|
||||
rsaPrivateKey *rsa.PrivateKey
|
||||
rsaPublicKey *rsa.PublicKey
|
||||
|
||||
chunkPool chan *Chunk
|
||||
numberOfChunks int32
|
||||
dryRun bool
|
||||
@@ -80,10 +94,15 @@ type jsonableConfig struct {
|
||||
IDKey string `json:"id-key"`
|
||||
ChunkKey string `json:"chunk-key"`
|
||||
FileKey string `json:"file-key"`
|
||||
RSAPublicKey string `json:"rsa-public-key"`
|
||||
}
|
||||
|
||||
func (config *Config) MarshalJSON() ([]byte, error) {
|
||||
|
||||
publicKey := []byte {}
|
||||
if config.rsaPublicKey != nil {
|
||||
publicKey, _ = x509.MarshalPKIXPublicKey(config.rsaPublicKey)
|
||||
}
|
||||
return json.Marshal(&jsonableConfig{
|
||||
aliasedConfig: (*aliasedConfig)(config),
|
||||
ChunkSeed: hex.EncodeToString(config.ChunkSeed),
|
||||
@@ -91,6 +110,7 @@ func (config *Config) MarshalJSON() ([]byte, error) {
|
||||
IDKey: hex.EncodeToString(config.IDKey),
|
||||
ChunkKey: hex.EncodeToString(config.ChunkKey),
|
||||
FileKey: hex.EncodeToString(config.FileKey),
|
||||
RSAPublicKey: hex.EncodeToString(publicKey),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -120,6 +140,19 @@ func (config *Config) UnmarshalJSON(description []byte) (err error) {
|
||||
return fmt.Errorf("Invalid representation of the file key in the config")
|
||||
}
|
||||
|
||||
if publicKey, err := hex.DecodeString(aliased.RSAPublicKey); err != nil {
|
||||
return fmt.Errorf("Invalid hex encoding of the RSA public key in the config")
|
||||
} else if len(publicKey) > 0 {
|
||||
parsedKey, err := x509.ParsePKIXPublicKey(publicKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid RSA public key in the config: %v", err)
|
||||
}
|
||||
config.rsaPublicKey = parsedKey.(*rsa.PublicKey)
|
||||
if config.rsaPublicKey == nil {
|
||||
return fmt.Errorf("Unsupported public key type %s in the config", reflect.TypeOf(parsedKey))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -140,6 +173,33 @@ func (config *Config) Print() {
|
||||
LOG_INFO("CONFIG_INFO", "Maximum chunk size: %d", config.MaximumChunkSize)
|
||||
LOG_INFO("CONFIG_INFO", "Minimum chunk size: %d", config.MinimumChunkSize)
|
||||
LOG_INFO("CONFIG_INFO", "Chunk seed: %x", config.ChunkSeed)
|
||||
|
||||
LOG_TRACE("CONFIG_INFO", "Hash key: %x", config.HashKey)
|
||||
LOG_TRACE("CONFIG_INFO", "ID key: %x", config.IDKey)
|
||||
|
||||
if len(config.ChunkKey) > 0 {
|
||||
LOG_TRACE("CONFIG_INFO", "File chunks are encrypted")
|
||||
}
|
||||
|
||||
if len(config.FileKey) > 0 {
|
||||
LOG_TRACE("CONFIG_INFO", "Metadata chunks are encrypted")
|
||||
}
|
||||
|
||||
if config.DataShards != 0 && config.ParityShards != 0 {
|
||||
LOG_TRACE("CONFIG_INFO", "Data shards: %d, parity shards: %d", config.DataShards, config.ParityShards)
|
||||
}
|
||||
|
||||
if config.rsaPublicKey != nil {
|
||||
pkisPublicKey, _ := x509.MarshalPKIXPublicKey(config.rsaPublicKey)
|
||||
|
||||
publicKey := pem.EncodeToMemory(&pem.Block{
|
||||
Type: "PUBLIC KEY",
|
||||
Bytes: pkisPublicKey,
|
||||
})
|
||||
|
||||
LOG_TRACE("CONFIG_INFO", "RSA public key: %s", publicKey)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
|
||||
@@ -335,11 +395,11 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if len(configFile.GetBytes()) < len(ENCRYPTION_HEADER) {
|
||||
if len(configFile.GetBytes()) < len(ENCRYPTION_BANNER) {
|
||||
return nil, false, fmt.Errorf("The storage has an invalid config file")
|
||||
}
|
||||
|
||||
if string(configFile.GetBytes()[:len(ENCRYPTION_HEADER)-1]) == ENCRYPTION_HEADER[:len(ENCRYPTION_HEADER)-1] && len(password) == 0 {
|
||||
if string(configFile.GetBytes()[:len(ENCRYPTION_BANNER)-1]) == ENCRYPTION_BANNER[:len(ENCRYPTION_BANNER)-1] && len(password) == 0 {
|
||||
return nil, true, fmt.Errorf("The storage is likely to have been initialized with a password before")
|
||||
}
|
||||
|
||||
@@ -347,23 +407,23 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
||||
|
||||
if len(password) > 0 {
|
||||
|
||||
if string(configFile.GetBytes()[:len(ENCRYPTION_HEADER)]) == ENCRYPTION_HEADER {
|
||||
if string(configFile.GetBytes()[:len(ENCRYPTION_BANNER)]) == ENCRYPTION_BANNER {
|
||||
// This is the old config format with a static salt and a fixed number of iterations
|
||||
masterKey = GenerateKeyFromPassword(password, DEFAULT_KEY, CONFIG_DEFAULT_ITERATIONS)
|
||||
LOG_TRACE("CONFIG_FORMAT", "Using a static salt and %d iterations for key derivation", CONFIG_DEFAULT_ITERATIONS)
|
||||
} else if string(configFile.GetBytes()[:len(CONFIG_HEADER)]) == CONFIG_HEADER {
|
||||
} else if string(configFile.GetBytes()[:len(CONFIG_BANNER)]) == CONFIG_BANNER {
|
||||
// This is the new config format with a random salt and a configurable number of iterations
|
||||
encryptedLength := len(configFile.GetBytes()) - CONFIG_SALT_LENGTH - 4
|
||||
|
||||
// Extract the salt and the number of iterations
|
||||
saltStart := configFile.GetBytes()[len(CONFIG_HEADER):]
|
||||
saltStart := configFile.GetBytes()[len(CONFIG_BANNER):]
|
||||
iterations := binary.LittleEndian.Uint32(saltStart[CONFIG_SALT_LENGTH : CONFIG_SALT_LENGTH+4])
|
||||
LOG_TRACE("CONFIG_ITERATIONS", "Using %d iterations for key derivation", iterations)
|
||||
masterKey = GenerateKeyFromPassword(password, saltStart[:CONFIG_SALT_LENGTH], int(iterations))
|
||||
|
||||
// Copy to a temporary buffer to replace the header and remove the salt and the number of riterations
|
||||
// Copy to a temporary buffer to replace the banner and remove the salt and the number of riterations
|
||||
var encrypted bytes.Buffer
|
||||
encrypted.Write([]byte(ENCRYPTION_HEADER))
|
||||
encrypted.Write([]byte(ENCRYPTION_BANNER))
|
||||
encrypted.Write(saltStart[CONFIG_SALT_LENGTH+4:])
|
||||
|
||||
configFile.Reset(false)
|
||||
@@ -372,7 +432,7 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
||||
LOG_ERROR("CONFIG_DOWNLOAD", "Encrypted config has %d bytes instead of expected %d bytes", len(configFile.GetBytes()), encryptedLength)
|
||||
}
|
||||
} else {
|
||||
return nil, true, fmt.Errorf("The config file has an invalid header")
|
||||
return nil, true, fmt.Errorf("The config file has an invalid banner")
|
||||
}
|
||||
|
||||
// Decrypt the config file. masterKey == nil means no encryption.
|
||||
@@ -430,21 +490,21 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i
|
||||
|
||||
if len(password) > 0 {
|
||||
// Encrypt the config file with masterKey. If masterKey is nil then no encryption is performed.
|
||||
err = chunk.Encrypt(masterKey, "")
|
||||
err = chunk.Encrypt(masterKey, "", true)
|
||||
if err != nil {
|
||||
LOG_ERROR("CONFIG_CREATE", "Failed to create the config file: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// The new encrypted format for config is CONFIG_HEADER + salt + #iterations + encrypted content
|
||||
// The new encrypted format for config is CONFIG_BANNER + salt + #iterations + encrypted content
|
||||
encryptedLength := len(chunk.GetBytes()) + CONFIG_SALT_LENGTH + 4
|
||||
|
||||
// Copy to a temporary buffer to replace the header and add the salt and the number of iterations
|
||||
// Copy to a temporary buffer to replace the banner and add the salt and the number of iterations
|
||||
var encrypted bytes.Buffer
|
||||
encrypted.Write([]byte(CONFIG_HEADER))
|
||||
encrypted.Write([]byte(CONFIG_BANNER))
|
||||
encrypted.Write(salt)
|
||||
binary.Write(&encrypted, binary.LittleEndian, uint32(iterations))
|
||||
encrypted.Write(chunk.GetBytes()[len(ENCRYPTION_HEADER):])
|
||||
encrypted.Write(chunk.GetBytes()[len(ENCRYPTION_BANNER):])
|
||||
|
||||
chunk.Reset(false)
|
||||
chunk.Write(encrypted.Bytes())
|
||||
@@ -477,7 +537,7 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i
|
||||
// it simply creates a file named 'config' that stores various parameters as well as a set of keys if encryption
|
||||
// is enabled.
|
||||
func ConfigStorage(storage Storage, iterations int, compressionLevel int, averageChunkSize int, maximumChunkSize int,
|
||||
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool) bool {
|
||||
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool, keyFile string, dataShards int, parityShards int) bool {
|
||||
|
||||
exist, _, _, err := storage.GetFileInfo(0, "config")
|
||||
if err != nil {
|
||||
@@ -496,5 +556,129 @@ func ConfigStorage(storage Storage, iterations int, compressionLevel int, averag
|
||||
return false
|
||||
}
|
||||
|
||||
if keyFile != "" {
|
||||
config.loadRSAPublicKey(keyFile)
|
||||
}
|
||||
|
||||
config.DataShards = dataShards
|
||||
config.ParityShards = parityShards
|
||||
|
||||
return UploadConfig(storage, config, password, iterations)
|
||||
}
|
||||
|
||||
func (config *Config) loadRSAPublicKey(keyFile string) {
|
||||
encodedKey := []byte(keyFile)
|
||||
var err error
|
||||
|
||||
// keyFile may be the actually key, in which case we don't need to read from a file
|
||||
if !strings.Contains(keyFile, "-----BEGIN") {
|
||||
encodedKey, err = ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
LOG_ERROR("BACKUP_KEY", "Failed to read the public key file: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
decodedKey, _ := pem.Decode(encodedKey)
|
||||
if decodedKey == nil {
|
||||
LOG_ERROR("RSA_PUBLIC", "unrecognized public key in %s", keyFile)
|
||||
return
|
||||
}
|
||||
if decodedKey.Type != "PUBLIC KEY" {
|
||||
LOG_ERROR("RSA_PUBLIC", "Unsupported public key type %s in %s", decodedKey.Type, keyFile)
|
||||
return
|
||||
}
|
||||
|
||||
parsedKey, err := x509.ParsePKIXPublicKey(decodedKey.Bytes)
|
||||
if err != nil {
|
||||
LOG_ERROR("RSA_PUBLIC", "Failed to parse the public key in %s: %v", keyFile, err)
|
||||
return
|
||||
}
|
||||
|
||||
key, ok := parsedKey.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
LOG_ERROR("RSA_PUBLIC", "Unsupported public key type %s in %s", reflect.TypeOf(parsedKey), keyFile)
|
||||
return
|
||||
}
|
||||
|
||||
config.rsaPublicKey = key
|
||||
}
|
||||
|
||||
// loadRSAPrivateKey loads the specifed private key file for decrypting file chunks
|
||||
func (config *Config) loadRSAPrivateKey(keyFile string, passphrase string) {
|
||||
|
||||
if config.rsaPublicKey == nil {
|
||||
LOG_ERROR("RSA_PUBLIC", "The storage was not encrypted by an RSA key")
|
||||
return
|
||||
}
|
||||
|
||||
encodedKey := []byte(keyFile)
|
||||
var err error
|
||||
|
||||
// keyFile may be the actually key, in which case we don't need to read from a file
|
||||
if !strings.Contains(keyFile, "-----BEGIN") {
|
||||
encodedKey, err = ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
LOG_ERROR("RSA_PRIVATE", "Failed to read the private key file: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
decodedKey, _ := pem.Decode(encodedKey)
|
||||
if decodedKey == nil {
|
||||
LOG_ERROR("RSA_PRIVATE", "unrecognized private key in %s", keyFile)
|
||||
return
|
||||
}
|
||||
if decodedKey.Type != "RSA PRIVATE KEY" {
|
||||
LOG_ERROR("RSA_PRIVATE", "Unsupported private key type %s in %s", decodedKey.Type, keyFile)
|
||||
return
|
||||
}
|
||||
|
||||
var decodedKeyBytes []byte
|
||||
if passphrase != "" {
|
||||
decodedKeyBytes, err = x509.DecryptPEMBlock(decodedKey, []byte(passphrase))
|
||||
} else {
|
||||
decodedKeyBytes = decodedKey.Bytes
|
||||
}
|
||||
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParsePKCS1PrivateKey(decodedKeyBytes); err != nil {
|
||||
if parsedKey, err = x509.ParsePKCS8PrivateKey(decodedKeyBytes); err != nil {
|
||||
LOG_ERROR("RSA_PRIVATE", "Failed to parse the private key in %s: %v", keyFile, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
key, ok := parsedKey.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
LOG_ERROR("RSA_PRIVATE", "Unsupported private key type %s in %s", reflect.TypeOf(parsedKey), keyFile)
|
||||
return
|
||||
}
|
||||
|
||||
data := make([]byte, 32)
|
||||
_, err = rand.Read(data)
|
||||
if err != nil {
|
||||
LOG_ERROR("RSA_PRIVATE", "Failed to generate random data for testing the private key: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Now test if the private key matches the public key
|
||||
encryptedData, err := rsa.EncryptOAEP(sha256.New(), rand.Reader, config.rsaPublicKey, data, nil)
|
||||
if err != nil {
|
||||
LOG_ERROR("RSA_PRIVATE", "Failed to encrypt random data with the public key: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
decryptedData, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, key, encryptedData, nil)
|
||||
if err != nil {
|
||||
LOG_ERROR("RSA_PRIVATE", "Incorrect private key: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.Equal(data, decryptedData) {
|
||||
LOG_ERROR("RSA_PRIVATE", "Decrypted data do not match the original data")
|
||||
return
|
||||
}
|
||||
|
||||
config.rsaPrivateKey = key
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ package duplicacy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/gilbertchen/go-dropbox"
|
||||
@@ -199,6 +200,7 @@ func (storage *DropboxStorage) DownloadFile(threadIndex int, filePath string, ch
|
||||
}
|
||||
|
||||
defer output.Body.Close()
|
||||
defer ioutil.ReadAll(output.Body)
|
||||
|
||||
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.clients))
|
||||
return err
|
||||
|
||||
@@ -443,7 +443,7 @@ func (files FileInfoCompare) Less(i, j int) bool {
|
||||
|
||||
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths
|
||||
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files.
|
||||
func ListEntries(top string, path string, fileList *[]*Entry, patterns []string, nobackupFile string, discardAttributes bool) (directoryList []*Entry,
|
||||
func ListEntries(top string, path string, fileList *[]*Entry, patterns []string, nobackupFile string, discardAttributes bool, excludeByAttribute bool) (directoryList []*Entry,
|
||||
skippedFiles []string, err error) {
|
||||
|
||||
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
|
||||
@@ -490,7 +490,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
|
||||
}
|
||||
if entry.IsLink() {
|
||||
isRegular := false
|
||||
isRegular, entry.Link, err = Readlink(filepath.Join(top, entry.Path))
|
||||
isRegular, entry.Link, err = Readlink(joinPath(top, entry.Path))
|
||||
if err != nil {
|
||||
LOG_WARN("LIST_LINK", "Failed to read the symlink %s: %v", entry.Path, err)
|
||||
skippedFiles = append(skippedFiles, entry.Path)
|
||||
@@ -500,7 +500,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
|
||||
if isRegular {
|
||||
entry.Mode ^= uint32(os.ModeSymlink)
|
||||
} else if path == "" && (filepath.IsAbs(entry.Link) || filepath.HasPrefix(entry.Link, `\\`)) && !strings.HasPrefix(entry.Link, normalizedTop) {
|
||||
stat, err := os.Stat(filepath.Join(top, entry.Path))
|
||||
stat, err := os.Stat(joinPath(top, entry.Path))
|
||||
if err != nil {
|
||||
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err)
|
||||
skippedFiles = append(skippedFiles, entry.Path)
|
||||
@@ -513,6 +513,9 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
|
||||
// path from f.Name(); note that a "/" is append assuming a symbolic link is always a directory
|
||||
newEntry.Path = filepath.Join(normalizedPath, f.Name()) + "/"
|
||||
}
|
||||
if len(patterns) > 0 && !MatchPath(newEntry.Path, patterns) {
|
||||
continue
|
||||
}
|
||||
entry = newEntry
|
||||
}
|
||||
}
|
||||
@@ -521,6 +524,11 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
|
||||
entry.ReadAttributes(top)
|
||||
}
|
||||
|
||||
if excludeByAttribute && excludedByAttribute(entry.Attributes) {
|
||||
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded by attribute", entry.Path)
|
||||
continue
|
||||
}
|
||||
|
||||
if f.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||
LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
|
||||
skippedFiles = append(skippedFiles, entry.Path)
|
||||
|
||||
@@ -9,8 +9,12 @@ import (
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gilbertchen/xattr"
|
||||
)
|
||||
|
||||
func TestEntrySort(t *testing.T) {
|
||||
@@ -173,7 +177,7 @@ func TestEntryList(t *testing.T) {
|
||||
directory := directories[len(directories)-1]
|
||||
directories = directories[:len(directories)-1]
|
||||
entries = append(entries, directory)
|
||||
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, "", false)
|
||||
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, "", false, false)
|
||||
if err != nil {
|
||||
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
|
||||
}
|
||||
@@ -216,3 +220,110 @@ func TestEntryList(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestEntryExcludeByAttribute tests the excludeByAttribute parameter to the ListEntries function
|
||||
func TestEntryExcludeByAttribute(t *testing.T) {
|
||||
|
||||
if !(runtime.GOOS == "darwin" || runtime.GOOS == "linux") {
|
||||
t.Skip("skipping test not darwin or linux")
|
||||
}
|
||||
|
||||
testDir := filepath.Join(os.TempDir(), "duplicacy_test")
|
||||
|
||||
os.RemoveAll(testDir)
|
||||
os.MkdirAll(testDir, 0700)
|
||||
|
||||
// Files or folders named with "exclude" below will have the exclusion attribute set on them
|
||||
// When ListEntries is called with excludeByAttribute true, they should be excluded.
|
||||
DATA := [...]string{
|
||||
"excludefile",
|
||||
"includefile",
|
||||
"excludedir/",
|
||||
"excludedir/file",
|
||||
"includedir/",
|
||||
"includedir/includefile",
|
||||
"includedir/excludefile",
|
||||
}
|
||||
|
||||
for _, file := range DATA {
|
||||
fullPath := filepath.Join(testDir, file)
|
||||
if file[len(file)-1] == '/' {
|
||||
err := os.Mkdir(fullPath, 0700)
|
||||
if err != nil {
|
||||
t.Errorf("Mkdir(%s) returned an error: %s", fullPath, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err := ioutil.WriteFile(fullPath, []byte(file), 0700)
|
||||
if err != nil {
|
||||
t.Errorf("WriteFile(%s) returned an error: %s", fullPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, file := range DATA {
|
||||
fullPath := filepath.Join(testDir, file)
|
||||
if strings.Contains(file, "exclude") {
|
||||
xattr.Setxattr(fullPath, "com.apple.metadata:com_apple_backup_excludeItem", []byte("com.apple.backupd"))
|
||||
}
|
||||
}
|
||||
|
||||
for _, excludeByAttribute := range [2]bool{true, false} {
|
||||
t.Logf("testing excludeByAttribute: %t", excludeByAttribute)
|
||||
directories := make([]*Entry, 0, 4)
|
||||
directories = append(directories, CreateEntry("", 0, 0, 0))
|
||||
|
||||
entries := make([]*Entry, 0, 4)
|
||||
|
||||
for len(directories) > 0 {
|
||||
directory := directories[len(directories)-1]
|
||||
directories = directories[:len(directories)-1]
|
||||
entries = append(entries, directory)
|
||||
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, "", false, excludeByAttribute)
|
||||
if err != nil {
|
||||
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
|
||||
}
|
||||
directories = append(directories, subdirectories...)
|
||||
}
|
||||
|
||||
entries = entries[1:]
|
||||
|
||||
for _, entry := range entries {
|
||||
t.Logf("entry: %s", entry.Path)
|
||||
}
|
||||
|
||||
i := 0
|
||||
for _, file := range DATA {
|
||||
entryFound := false
|
||||
var entry *Entry
|
||||
for _, entry = range entries {
|
||||
if entry.Path == file {
|
||||
entryFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if excludeByAttribute && strings.Contains(file, "exclude") {
|
||||
if entryFound {
|
||||
t.Errorf("file: %s, expected to be excluded but wasn't. attributes: %v", file, entry.Attributes)
|
||||
i++
|
||||
} else {
|
||||
t.Logf("file: %s, excluded", file)
|
||||
}
|
||||
} else {
|
||||
if entryFound {
|
||||
t.Logf("file: %s, included. attributes: %v", file, entry.Attributes)
|
||||
i++
|
||||
} else {
|
||||
t.Errorf("file: %s, expected to be included but wasn't", file)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if !t.Failed() {
|
||||
os.RemoveAll(testDir)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
618
src/duplicacy_filefabricstorage.go
Normal file
618
src/duplicacy_filefabricstorage.go
Normal file
@@ -0,0 +1,618 @@
|
||||
// Copyright (c) Storage Made Easy. All rights reserved.
|
||||
//
|
||||
// This storage backend is contributed by Storage Made Easy (https://storagemadeeasy.com/) to be used in
|
||||
// Duplicacy and its derivative works.
|
||||
//
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"fmt"
|
||||
"time"
|
||||
"sync"
|
||||
"bytes"
|
||||
"errors"
|
||||
"strings"
|
||||
"net/url"
|
||||
"net/http"
|
||||
"math/rand"
|
||||
"io/ioutil"
|
||||
"encoding/xml"
|
||||
"path/filepath"
|
||||
"mime/multipart"
|
||||
)
|
||||
|
||||
// The XML element representing a file returned by the File Fabric server
|
||||
type FileFabricFile struct {
|
||||
XMLName xml.Name
|
||||
ID string `xml:"fi_id"`
|
||||
Path string `xml:"path"`
|
||||
Size int64 `xml:"fi_size"`
|
||||
Type int `xml:"fi_type"`
|
||||
}
|
||||
|
||||
// The XML element representing a file list returned by the server
|
||||
type FileFabricFileList struct {
|
||||
XMLName xml.Name `xml:"files"`
|
||||
Files []FileFabricFile `xml:",any"`
|
||||
}
|
||||
|
||||
type FileFabricStorage struct {
|
||||
StorageBase
|
||||
|
||||
endpoint string // the server
|
||||
authToken string // the authentication token
|
||||
accessToken string // the access token (as returned by getTokenByAuthToken)
|
||||
storageDir string // the path of the storage directory
|
||||
storageDirID string // the id of 'storageDir'
|
||||
|
||||
client *http.Client // the default http client
|
||||
threads int // number of threads
|
||||
maxRetries int // maximum number of tries
|
||||
directoryCache map[string]string // stores ids for directories known to this backend
|
||||
directoryCacheLock sync.Mutex // lock for accessing directoryCache
|
||||
|
||||
isAuthorized bool
|
||||
testMode bool
|
||||
}
|
||||
|
||||
var (
|
||||
errFileFabricAuthorizationFailure = errors.New("Authentication failure")
|
||||
errFileFabricDirectoryExists = errors.New("Directory exists")
|
||||
)
|
||||
|
||||
// The general server response
|
||||
type FileFabricResponse struct {
|
||||
Status string `xml:"status"`
|
||||
Message string `xml:"statusmessage"`
|
||||
}
|
||||
|
||||
// Check the server response and return an error representing the error message it contains
|
||||
func checkFileFabricResponse(response FileFabricResponse, actionFormat string, actionArguments ...interface{}) error {
|
||||
|
||||
action := fmt.Sprintf(actionFormat, actionArguments...)
|
||||
if response.Status == "ok" && response.Message == "Success" {
|
||||
return nil
|
||||
} else if response.Status == "error_data" {
|
||||
if response.Message == "Folder with same name already exists." {
|
||||
return errFileFabricDirectoryExists
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("Failed to %s (status: %s, message: %s)", action, response.Status, response.Message)
|
||||
}
|
||||
|
||||
// Create a File Fabric storage backend
|
||||
func CreateFileFabricStorage(endpoint string, token string, storageDir string, threads int) (storage *FileFabricStorage, err error) {
|
||||
|
||||
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||
storageDir += "/"
|
||||
}
|
||||
|
||||
storage = &FileFabricStorage{
|
||||
|
||||
endpoint: endpoint,
|
||||
authToken: token,
|
||||
client: http.DefaultClient,
|
||||
threads: threads,
|
||||
directoryCache: make(map[string]string),
|
||||
maxRetries: 12,
|
||||
}
|
||||
|
||||
err = storage.getAccessToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storageDirID, isDir, _, err := storage.getFileInfo(0, storageDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if storageDirID == "" {
|
||||
return nil, fmt.Errorf("Storage path %s does not exist", storageDir)
|
||||
}
|
||||
if !isDir {
|
||||
return nil, fmt.Errorf("Storage path %s is not a directory", storageDir)
|
||||
}
|
||||
storage.storageDir = storageDir
|
||||
storage.storageDirID = storageDirID
|
||||
|
||||
for _, dir := range []string{"snapshots", "chunks"} {
|
||||
storage.CreateDirectory(0, dir)
|
||||
}
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
// Retrieve the access token using an auth token
|
||||
func (storage *FileFabricStorage) getAccessToken() (error) {
|
||||
|
||||
formData := url.Values { "authtoken": {storage.authToken},}
|
||||
readCloser, _, _, err := storage.sendRequest(0, http.MethodPost, storage.getAPIURL("getTokenByAuthToken"), nil, formData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
var output struct {
|
||||
FileFabricResponse
|
||||
Token string `xml:"token"`
|
||||
}
|
||||
|
||||
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = checkFileFabricResponse(output.FileFabricResponse, "request the access token")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
storage.accessToken = output.Token
|
||||
return nil
|
||||
}
|
||||
|
||||
// Determine if we should retry based on the number of retries given by 'retry' and if so calculate the delay with exponential backoff
|
||||
func (storage *FileFabricStorage) shouldRetry(retry int, messageFormat string, messageArguments ...interface{}) bool {
|
||||
message := fmt.Sprintf(messageFormat, messageArguments...)
|
||||
|
||||
if retry >= storage.maxRetries {
|
||||
LOG_WARN("FILEFABRIC_REQUEST", "%s", message)
|
||||
return false
|
||||
}
|
||||
backoff := 1 << uint(retry)
|
||||
if backoff > 60 {
|
||||
backoff = 60
|
||||
}
|
||||
delay := rand.Intn(backoff*500) + backoff*500
|
||||
LOG_INFO("FILEFABRIC_RETRY", "%s; retrying after %.1f seconds", message, float32(delay) / 1000.0)
|
||||
time.Sleep(time.Duration(delay) * time.Millisecond)
|
||||
return true
|
||||
}
|
||||
|
||||
// Send a request to the server
|
||||
func (storage *FileFabricStorage) sendRequest(threadIndex int, method string, requestURL string, requestHeaders map[string]string, input interface{}) ( io.ReadCloser, http.Header, int64, error) {
|
||||
|
||||
var response *http.Response
|
||||
|
||||
for retries := 0; ; retries++ {
|
||||
var inputReader io.Reader
|
||||
|
||||
switch input.(type) {
|
||||
case url.Values:
|
||||
values := input.(url.Values)
|
||||
inputReader = strings.NewReader(values.Encode())
|
||||
if requestHeaders == nil {
|
||||
requestHeaders = make(map[string]string)
|
||||
}
|
||||
requestHeaders["Content-Type"] = "application/x-www-form-urlencoded"
|
||||
case *RateLimitedReader:
|
||||
rateLimitedReader := input.(*RateLimitedReader)
|
||||
rateLimitedReader.Reset()
|
||||
inputReader = rateLimitedReader
|
||||
default:
|
||||
LOG_FATAL("FILEFABRIC_REQUEST", "Input type is not supported")
|
||||
return nil, nil, 0, fmt.Errorf("Input type is not supported")
|
||||
}
|
||||
|
||||
request, err := http.NewRequest(method, requestURL, inputReader)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
if requestHeaders != nil {
|
||||
for key, value := range requestHeaders {
|
||||
request.Header.Set(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := input.(*RateLimitedReader); ok {
|
||||
request.ContentLength = input.(*RateLimitedReader).Length()
|
||||
}
|
||||
|
||||
response, err = storage.client.Do(request)
|
||||
if err != nil {
|
||||
if !storage.shouldRetry(retries, "[%d] %s %s returned an error: %v", threadIndex, method, requestURL, err) {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if response.StatusCode < 300 {
|
||||
return response.Body, response.Header, response.ContentLength, nil
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
defer io.Copy(ioutil.Discard, response.Body)
|
||||
|
||||
var output struct {
|
||||
Status string `xml:"status"`
|
||||
Message string `xml:"statusmessage"`
|
||||
}
|
||||
|
||||
err = xml.NewDecoder(response.Body).Decode(&output)
|
||||
if err != nil {
|
||||
if !storage.shouldRetry(retries, "[%d] %s %s returned an invalid response: %v", threadIndex, method, requestURL, err) {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if !storage.shouldRetry(retries, "[%d] %s %s returned status: %s, message: %s", threadIndex, method, requestURL, output.Status, output.Message) {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (storage *FileFabricStorage) getAPIURL(function string) string {
|
||||
if storage.accessToken == "" {
|
||||
return "https://" + storage.endpoint + "/api/*/" + function + "/"
|
||||
} else {
|
||||
return "https://" + storage.endpoint + "/api/" + storage.accessToken + "/" + function + "/"
|
||||
}
|
||||
}
|
||||
|
||||
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
|
||||
// a size of 0. If 'dir' is 'snapshots', only subdirectories will be returned. If 'dir' is 'snapshots/repository_id', then only
|
||||
// files will be returned. If 'dir' is 'chunks', the implementation can return the list either recusively or non-recusively.
|
||||
func (storage *FileFabricStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||
if dir != "" && dir[len(dir)-1] != '/' {
|
||||
dir += "/"
|
||||
}
|
||||
|
||||
dirID, _, _, err := storage.getFileInfo(threadIndex, dir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if dirID == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
lastID := ""
|
||||
|
||||
for {
|
||||
formData := url.Values { "marker": {lastID}, "limit": {"1000"}, "includefolders": {"n"}, "fi_pid" : {dirID}}
|
||||
if dir == "snapshots/" {
|
||||
formData["includefolders"] = []string{"y"}
|
||||
}
|
||||
if storage.testMode {
|
||||
formData["limit"] = []string{"5"}
|
||||
}
|
||||
|
||||
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("getListOfFiles"), nil, formData)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
var output struct {
|
||||
FileFabricResponse
|
||||
FileList FileFabricFileList `xml:"files"`
|
||||
Truncated int `xml:"truncated"`
|
||||
}
|
||||
|
||||
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
err = checkFileFabricResponse(output.FileFabricResponse, "list the storage directory '%s'", dir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if dir == "snapshots/" {
|
||||
for _, file := range output.FileList.Files {
|
||||
if file.Type == 1 {
|
||||
files = append(files, file.Path + "/")
|
||||
}
|
||||
lastID = file.ID
|
||||
}
|
||||
} else {
|
||||
for _, file := range output.FileList.Files {
|
||||
if file.Type == 0 {
|
||||
files = append(files, file.Path)
|
||||
sizes = append(sizes, file.Size)
|
||||
}
|
||||
lastID = file.ID
|
||||
}
|
||||
}
|
||||
|
||||
if output.Truncated != 1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return files, sizes, nil
|
||||
}
|
||||
|
||||
// getFileInfo returns the information about the file or directory at 'filePath'.
|
||||
func (storage *FileFabricStorage) getFileInfo(threadIndex int, filePath string) (fileID string, isDir bool, size int64, err error) {
|
||||
|
||||
formData := url.Values { "path" : {storage.storageDir + filePath}}
|
||||
|
||||
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("checkPathExists"), nil, formData)
|
||||
if err != nil {
|
||||
return "", false, 0, err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
var output struct {
|
||||
FileFabricResponse
|
||||
File FileFabricFile `xml:"file"`
|
||||
Exists string `xml:"exists"`
|
||||
}
|
||||
|
||||
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||
if err != nil {
|
||||
return "", false, 0, err
|
||||
}
|
||||
|
||||
err = checkFileFabricResponse(output.FileFabricResponse, "get the info on '%s'", filePath)
|
||||
if err != nil {
|
||||
return "", false, 0, err
|
||||
}
|
||||
|
||||
if output.Exists != "y" {
|
||||
return "", false, 0, nil
|
||||
} else {
|
||||
if output.File.Type == 1 {
|
||||
for filePath != "" && filePath[len(filePath)-1] == '/' {
|
||||
filePath = filePath[:len(filePath)-1]
|
||||
}
|
||||
|
||||
storage.directoryCacheLock.Lock()
|
||||
storage.directoryCache[filePath] = output.File.ID
|
||||
storage.directoryCacheLock.Unlock()
|
||||
}
|
||||
return output.File.ID, output.File.Type == 1, output.File.Size, nil
|
||||
}
|
||||
}
|
||||
|
||||
// GetFileInfo returns the information about the file or directory at 'filePath'. This is a function required by the Storage interface.
|
||||
func (storage *FileFabricStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||
|
||||
fileID := ""
|
||||
fileID, isDir, size, err = storage.getFileInfo(threadIndex, filePath)
|
||||
return fileID != "", isDir, size, err
|
||||
}
|
||||
|
||||
// DeleteFile deletes the file or directory at 'filePath'.
|
||||
func (storage *FileFabricStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||
|
||||
fileID, _, _, _ := storage.getFileInfo(threadIndex, filePath)
|
||||
if fileID == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
formData := url.Values { "fi_id" : {fileID}}
|
||||
|
||||
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doDeleteFile"), nil, formData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
var output FileFabricResponse
|
||||
|
||||
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = checkFileFabricResponse(output, "delete file '%s'", filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MoveFile renames the file.
|
||||
func (storage *FileFabricStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||
fileID, _, _, _ := storage.getFileInfo(threadIndex, from)
|
||||
if fileID == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
formData := url.Values { "fi_id" : {fileID}, "fi_name": {filepath.Base(to)},}
|
||||
|
||||
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doRenameFile"), nil, formData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
var output FileFabricResponse
|
||||
|
||||
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = checkFileFabricResponse(output, "rename file '%s' to '%s'", from, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createParentDirectory creates the parent directory if it doesn't exist in the cache.
|
||||
func (storage *FileFabricStorage) createParentDirectory(threadIndex int, dir string) (parentID string, err error) {
|
||||
|
||||
found := strings.LastIndex(dir, "/")
|
||||
if found == -1 {
|
||||
return storage.storageDirID, nil
|
||||
}
|
||||
parent := dir[:found]
|
||||
|
||||
storage.directoryCacheLock.Lock()
|
||||
parentID = storage.directoryCache[parent]
|
||||
storage.directoryCacheLock.Unlock()
|
||||
|
||||
if parentID != "" {
|
||||
return parentID, nil
|
||||
}
|
||||
|
||||
parentID, err = storage.createDirectory(threadIndex, parent)
|
||||
if err != nil {
|
||||
if err == errFileFabricDirectoryExists {
|
||||
var isDir bool
|
||||
parentID, isDir, _, err = storage.getFileInfo(threadIndex, parent)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if isDir == false {
|
||||
return "", fmt.Errorf("'%s' in the storage is a file", parent)
|
||||
}
|
||||
storage.directoryCacheLock.Lock()
|
||||
storage.directoryCache[parent] = parentID
|
||||
storage.directoryCacheLock.Unlock()
|
||||
return parentID, nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return parentID, nil
|
||||
}
|
||||
|
||||
// createDirectory creates a new directory.
|
||||
func (storage *FileFabricStorage) createDirectory(threadIndex int, dir string) (dirID string, err error) {
|
||||
for dir != "" && dir[len(dir)-1] == '/' {
|
||||
dir = dir[:len(dir)-1]
|
||||
}
|
||||
|
||||
parentID, err := storage.createParentDirectory(threadIndex, dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
formData := url.Values { "fi_name": {filepath.Base(dir)}, "fi_pid" : {parentID}}
|
||||
|
||||
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doCreateNewFolder"), nil, formData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
var output struct {
|
||||
FileFabricResponse
|
||||
File FileFabricFile `xml:"file"`
|
||||
}
|
||||
|
||||
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = checkFileFabricResponse(output.FileFabricResponse, "create directory '%s'", dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
storage.directoryCacheLock.Lock()
|
||||
storage.directoryCache[dir] = output.File.ID
|
||||
storage.directoryCacheLock.Unlock()
|
||||
|
||||
return output.File.ID, nil
|
||||
}
|
||||
|
||||
func (storage *FileFabricStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||
_, err = storage.createDirectory(threadIndex, dir)
|
||||
if err == errFileFabricDirectoryExists {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *FileFabricStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
formData := url.Values { "fi_id" : {storage.storageDir + filePath}}
|
||||
|
||||
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("getFile"), nil, formData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.threads)
|
||||
return err
|
||||
}
|
||||
|
||||
// UploadFile writes 'content' to the file at 'filePath'.
|
||||
func (storage *FileFabricStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||
|
||||
parentID, err := storage.createParentDirectory(threadIndex, filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fileName := filepath.Base(filePath)
|
||||
requestBody := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(requestBody)
|
||||
part, _ := writer.CreateFormFile("file_1", fileName)
|
||||
part.Write(content)
|
||||
|
||||
writer.WriteField("file_name1", fileName)
|
||||
writer.WriteField("fi_pid", parentID)
|
||||
writer.WriteField("fi_structtype", "g")
|
||||
writer.Close()
|
||||
|
||||
headers := make(map[string]string)
|
||||
headers["Content-Type"] = writer.FormDataContentType()
|
||||
|
||||
rateLimitedReader := CreateRateLimitedReader(requestBody.Bytes(), storage.UploadRateLimit/storage.threads)
|
||||
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doUploadFiles"), headers, rateLimitedReader)
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
var output FileFabricResponse
|
||||
|
||||
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = checkFileFabricResponse(output, "upload file '%s'", filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *FileFabricStorage) IsCacheNeeded() bool { return true }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *FileFabricStorage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *FileFabricStorage) IsStrongConsistent() bool { return false }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *FileFabricStorage) IsFastListing() bool { return false }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *FileFabricStorage) EnableTestMode() { storage.testMode = true }
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -78,7 +79,7 @@ func (storage *FileStorage) ListFiles(threadIndex int, dir string) (files []stri
|
||||
|
||||
for _, f := range list {
|
||||
name := f.Name()
|
||||
if f.IsDir() && name[len(name)-1] != '/' {
|
||||
if (f.IsDir() || f.Mode() & os.ModeSymlink != 0) && name[len(name)-1] != '/' {
|
||||
name += "/"
|
||||
}
|
||||
files = append(files, name)
|
||||
@@ -164,8 +165,8 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if !stat.IsDir() {
|
||||
return fmt.Errorf("The path %s is not a directory", dir)
|
||||
if !stat.IsDir() && stat.Mode() & os.ModeSymlink == 0 {
|
||||
return fmt.Errorf("The path %s is not a directory or symlink", dir)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -190,10 +191,13 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
|
||||
return err
|
||||
}
|
||||
|
||||
err = file.Sync()
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return err
|
||||
if err = file.Sync(); err != nil {
|
||||
pathErr, ok := err.(*os.PathError)
|
||||
isNotSupported := ok && pathErr.Op == "sync" && pathErr.Err == syscall.ENOTSUP
|
||||
if !isNotSupported {
|
||||
_ = file.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = file.Close()
|
||||
|
||||
@@ -20,13 +20,16 @@ import (
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var (
|
||||
GCDFileMimeType = "application/octet-stream"
|
||||
GCDDirectoryMimeType = "application/vnd.google-apps.folder"
|
||||
GCDUserDrive = "root"
|
||||
)
|
||||
|
||||
type GCDStorage struct {
|
||||
@@ -37,6 +40,7 @@ type GCDStorage struct {
|
||||
idCacheLock sync.Mutex
|
||||
backoffs []int // desired backoff time in seconds for each thread
|
||||
attempts []int // number of failed attempts since last success for each thread
|
||||
driveID string // the ID of the shared drive or 'root' (GCDUserDrive) if the user's drive
|
||||
|
||||
createDirectoryLock sync.Mutex
|
||||
isConnected bool
|
||||
@@ -78,6 +82,14 @@ func (storage *GCDStorage) shouldRetry(threadIndex int, err error) (bool, error)
|
||||
// User Rate Limit Exceeded
|
||||
message = e.Message
|
||||
retry = true
|
||||
} else if e.Code == 408 {
|
||||
// Request timeout
|
||||
message = e.Message
|
||||
retry = true
|
||||
} else if e.Code == 400 && strings.Contains(e.Message, "failedPrecondition") {
|
||||
// Daily quota exceeded
|
||||
message = e.Message
|
||||
retry = true
|
||||
} else if e.Code == 401 {
|
||||
// Only retry on authorization error when storage has been connected before
|
||||
if storage.isConnected {
|
||||
@@ -187,7 +199,11 @@ func (storage *GCDStorage) listFiles(threadIndex int, parentID string, listFiles
|
||||
var err error
|
||||
|
||||
for {
|
||||
fileList, err = storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount).Do()
|
||||
q := storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount)
|
||||
if storage.driveID != GCDUserDrive {
|
||||
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
|
||||
}
|
||||
fileList, err = q.Do()
|
||||
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
|
||||
break
|
||||
} else if retry {
|
||||
@@ -215,7 +231,11 @@ func (storage *GCDStorage) listByName(threadIndex int, parentID string, name str
|
||||
|
||||
for {
|
||||
query := "name = '" + name + "' and '" + parentID + "' in parents and trashed = false "
|
||||
fileList, err = storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)").Do()
|
||||
q := storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)")
|
||||
if storage.driveID != GCDUserDrive {
|
||||
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
|
||||
}
|
||||
fileList, err = q.Do()
|
||||
|
||||
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
|
||||
break
|
||||
@@ -244,7 +264,7 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, creat
|
||||
return fileID, nil
|
||||
}
|
||||
|
||||
fileID := "root"
|
||||
fileID := storage.driveID
|
||||
|
||||
if rootID, ok := storage.findPathID(""); ok {
|
||||
fileID = rootID
|
||||
@@ -299,37 +319,85 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, creat
|
||||
}
|
||||
|
||||
// CreateGCDStorage creates a GCD storage object.
|
||||
func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storage *GCDStorage, err error) {
|
||||
func CreateGCDStorage(tokenFile string, driveID string, storagePath string, threads int) (storage *GCDStorage, err error) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
description, err := ioutil.ReadFile(tokenFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcdConfig := &GCDConfig{}
|
||||
if err := json.Unmarshal(description, gcdConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var object map[string]interface{}
|
||||
|
||||
oauth2Config := oauth2.Config{
|
||||
ClientID: gcdConfig.ClientID,
|
||||
ClientSecret: gcdConfig.ClientSecret,
|
||||
Endpoint: gcdConfig.Endpoint,
|
||||
}
|
||||
|
||||
authClient := oauth2Config.Client(context.Background(), &gcdConfig.Token)
|
||||
|
||||
service, err := drive.New(authClient)
|
||||
err = json.Unmarshal(description, &object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isServiceAccount := false
|
||||
if value, ok := object["type"]; ok {
|
||||
if authType, ok := value.(string); ok && authType == "service_account" {
|
||||
isServiceAccount = true
|
||||
}
|
||||
}
|
||||
|
||||
var tokenSource oauth2.TokenSource
|
||||
|
||||
if isServiceAccount {
|
||||
config, err := google.JWTConfigFromJSON(description, drive.DriveScope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tokenSource = config.TokenSource(ctx)
|
||||
} else {
|
||||
gcdConfig := &GCDConfig{}
|
||||
if err := json.Unmarshal(description, gcdConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := oauth2.Config{
|
||||
ClientID: gcdConfig.ClientID,
|
||||
ClientSecret: gcdConfig.ClientSecret,
|
||||
Endpoint: gcdConfig.Endpoint,
|
||||
}
|
||||
tokenSource = config.TokenSource(ctx, &gcdConfig.Token)
|
||||
}
|
||||
|
||||
service, err := drive.NewService(ctx, option.WithTokenSource(tokenSource))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(driveID) == 0 {
|
||||
driveID = GCDUserDrive
|
||||
} else {
|
||||
driveList, err := drive.NewTeamdrivesService(service).List().Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to look up the drive id: %v", err)
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, teamDrive := range driveList.TeamDrives {
|
||||
if teamDrive.Id == driveID || teamDrive.Name == driveID {
|
||||
driveID = teamDrive.Id
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return nil, fmt.Errorf("%s is not the id or name of a shared drive", driveID)
|
||||
}
|
||||
}
|
||||
|
||||
storage = &GCDStorage{
|
||||
service: service,
|
||||
numberOfThreads: threads,
|
||||
idCache: make(map[string]string),
|
||||
backoffs: make([]int, threads),
|
||||
attempts: make([]int, threads),
|
||||
driveID: driveID,
|
||||
}
|
||||
|
||||
for i := range storage.backoffs {
|
||||
@@ -337,6 +405,7 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
||||
storage.attempts[i] = 0
|
||||
}
|
||||
|
||||
storage.savePathID("", driveID)
|
||||
storagePathID, err := storage.getIDFromPath(0, storagePath, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -411,39 +480,76 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
||||
}
|
||||
return files, nil, nil
|
||||
} else {
|
||||
files := []string{}
|
||||
sizes := []int64{}
|
||||
lock := sync.Mutex {}
|
||||
allFiles := []string{}
|
||||
allSizes := []int64{}
|
||||
|
||||
errorChannel := make(chan error)
|
||||
directoryChannel := make(chan string)
|
||||
activeWorkers := 0
|
||||
|
||||
parents := []string{"chunks", "fossils"}
|
||||
for i := 0; i < len(parents); i++ {
|
||||
parent := parents[i]
|
||||
pathID, ok := storage.findPathID(parent)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
entries, err := storage.listFiles(threadIndex, pathID, true, true)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.MimeType != GCDDirectoryMimeType {
|
||||
name := entry.Name
|
||||
if strings.HasPrefix(parent, "fossils") {
|
||||
name = parent + "/" + name + ".fsl"
|
||||
name = name[len("fossils/"):]
|
||||
} else {
|
||||
name = parent + "/" + name
|
||||
name = name[len("chunks/"):]
|
||||
for len(parents) > 0 || activeWorkers > 0 {
|
||||
|
||||
if len(parents) > 0 && activeWorkers < storage.numberOfThreads {
|
||||
parent := parents[0]
|
||||
parents = parents[1:]
|
||||
activeWorkers++
|
||||
go func(parent string) {
|
||||
pathID, ok := storage.findPathID(parent)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
entries, err := storage.listFiles(threadIndex, pathID, true, true)
|
||||
if err != nil {
|
||||
errorChannel <- err
|
||||
return
|
||||
}
|
||||
|
||||
LOG_DEBUG("GCD_STORAGE", "Listing %s; %d items returned", parent, len(entries))
|
||||
|
||||
files := []string {}
|
||||
sizes := []int64 {}
|
||||
for _, entry := range entries {
|
||||
if entry.MimeType != GCDDirectoryMimeType {
|
||||
name := entry.Name
|
||||
if strings.HasPrefix(parent, "fossils") {
|
||||
name = parent + "/" + name + ".fsl"
|
||||
name = name[len("fossils/"):]
|
||||
} else {
|
||||
name = parent + "/" + name
|
||||
name = name[len("chunks/"):]
|
||||
}
|
||||
files = append(files, name)
|
||||
sizes = append(sizes, entry.Size)
|
||||
} else {
|
||||
directoryChannel <- parent+"/"+entry.Name
|
||||
storage.savePathID(parent+"/"+entry.Name, entry.Id)
|
||||
}
|
||||
}
|
||||
lock.Lock()
|
||||
allFiles = append(allFiles, files...)
|
||||
allSizes = append(allSizes, sizes...)
|
||||
lock.Unlock()
|
||||
directoryChannel <- ""
|
||||
} (parent)
|
||||
}
|
||||
|
||||
if activeWorkers > 0 {
|
||||
select {
|
||||
case err := <- errorChannel:
|
||||
return nil, nil, err
|
||||
case directory := <- directoryChannel:
|
||||
if directory == "" {
|
||||
activeWorkers--
|
||||
} else {
|
||||
parents = append(parents, directory)
|
||||
}
|
||||
files = append(files, name)
|
||||
sizes = append(sizes, entry.Size)
|
||||
} else {
|
||||
parents = append(parents, parent+"/"+entry.Name)
|
||||
storage.savePathID(parent+"/"+entry.Name, entry.Id)
|
||||
}
|
||||
}
|
||||
}
|
||||
return files, sizes, nil
|
||||
|
||||
return allFiles, allSizes, nil
|
||||
}
|
||||
|
||||
}
|
||||
@@ -458,7 +564,7 @@ func (storage *GCDStorage) DeleteFile(threadIndex int, filePath string) (err err
|
||||
}
|
||||
|
||||
for {
|
||||
err = storage.service.Files.Delete(fileID).Fields("id").Do()
|
||||
err = storage.service.Files.Delete(fileID).SupportsAllDrives(true).Fields("id").Do()
|
||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||
storage.deletePathID(filePath)
|
||||
return nil
|
||||
@@ -504,7 +610,7 @@ func (storage *GCDStorage) MoveFile(threadIndex int, from string, to string) (er
|
||||
}
|
||||
|
||||
for {
|
||||
_, err = storage.service.Files.Update(fileID, nil).AddParents(toParentID).RemoveParents(fromParentID).Do()
|
||||
_, err = storage.service.Files.Update(fileID, nil).SupportsAllDrives(true).AddParents(toParentID).RemoveParents(fromParentID).Do()
|
||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||
break
|
||||
} else if retry {
|
||||
@@ -555,7 +661,7 @@ func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err err
|
||||
Parents: []string{parentID},
|
||||
}
|
||||
|
||||
file, err = storage.service.Files.Create(file).Fields("id").Do()
|
||||
file, err = storage.service.Files.Create(file).SupportsAllDrives(true).Fields("id").Do()
|
||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||
break
|
||||
} else {
|
||||
@@ -626,7 +732,7 @@ func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk
|
||||
for {
|
||||
// AcknowledgeAbuse(true) lets the download proceed even if GCD thinks that it contains malware.
|
||||
// TODO: Should this prompt the user or log a warning?
|
||||
req := storage.service.Files.Get(fileID)
|
||||
req := storage.service.Files.Get(fileID).SupportsAllDrives(true)
|
||||
if e, ok := err.(*googleapi.Error); ok {
|
||||
if strings.Contains(err.Error(), "cannotDownloadAbusiveFile") || len(e.Errors) > 0 && e.Errors[0].Reason == "cannotDownloadAbusiveFile" {
|
||||
LOG_WARN("GCD_STORAGE", "%s is marked as abusive, will download anyway.", filePath)
|
||||
@@ -672,7 +778,7 @@ func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content
|
||||
|
||||
for {
|
||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||
_, err = storage.service.Files.Create(file).Media(reader).Fields("id").Do()
|
||||
_, err = storage.service.Files.Create(file).SupportsAllDrives(true).Media(reader).Fields("id").Do()
|
||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||
break
|
||||
} else if retry {
|
||||
|
||||
@@ -7,10 +7,12 @@ package duplicacy
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"log"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,6 +45,13 @@ func setTestingT(t *testing.T) {
|
||||
testingT = t
|
||||
}
|
||||
|
||||
// Contains the ids of logs that won't be displayed
|
||||
var suppressedLogs map[string]bool = map[string]bool{}
|
||||
|
||||
func SuppressLog(id string) {
|
||||
suppressedLogs[id] = true
|
||||
}
|
||||
|
||||
func getLevelName(level int) string {
|
||||
switch level {
|
||||
case DEBUG:
|
||||
@@ -98,6 +107,15 @@ func LOG_ERROR(logID string, format string, v ...interface{}) {
|
||||
logf(ERROR, logID, format, v...)
|
||||
}
|
||||
|
||||
func LOG_WERROR(isWarning bool, logID string, format string, v ...interface{}) {
|
||||
if isWarning {
|
||||
logf(WARN, logID, format, v...)
|
||||
} else {
|
||||
logf(ERROR, logID, format, v...)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
func LOG_FATAL(logID string, format string, v ...interface{}) {
|
||||
logf(FATAL, logID, format, v...)
|
||||
}
|
||||
@@ -143,6 +161,12 @@ func logf(level int, logID string, format string, v ...interface{}) {
|
||||
defer logMutex.Unlock()
|
||||
|
||||
if level >= loggingLevel {
|
||||
if level <= ERROR && len(suppressedLogs) > 0 {
|
||||
if _, found := suppressedLogs[logID]; found {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if printLogHeader {
|
||||
fmt.Printf("%s %s %s %s\n",
|
||||
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
||||
@@ -161,6 +185,32 @@ func logf(level int, logID string, format string, v ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// Set up logging for libraries that Duplicacy depends on. They can call 'log.Printf("[ID] message")'
|
||||
// to produce logs in Duplicacy's format
|
||||
type Logger struct {
|
||||
formatRegex *regexp.Regexp
|
||||
}
|
||||
|
||||
func (logger *Logger) Write(line []byte) (n int, err error) {
|
||||
n = len(line)
|
||||
for len(line) > 0 && line[len(line) - 1] == '\n' {
|
||||
line = line[:len(line) - 1]
|
||||
}
|
||||
matched := logger.formatRegex.FindStringSubmatch(string(line))
|
||||
if matched != nil {
|
||||
LOG_INFO(matched[1], "%s", matched[2])
|
||||
} else {
|
||||
LOG_INFO("LOG_DEFAULT", "%s", line)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func init() {
|
||||
log.SetFlags(0)
|
||||
log.SetOutput(&Logger{ formatRegex: regexp.MustCompile(`^\[(.+)\]\s*(.+)`) })
|
||||
}
|
||||
|
||||
const (
|
||||
duplicacyExitCode = 100
|
||||
otherExitCode = 101
|
||||
|
||||
@@ -13,8 +13,10 @@ import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"strings"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
"path/filepath"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
@@ -32,9 +34,6 @@ type OneDriveErrorResponse struct {
|
||||
Error OneDriveError `json:"error"`
|
||||
}
|
||||
|
||||
var OneDriveRefreshTokenURL = "https://duplicacy.com/one_refresh"
|
||||
var OneDriveAPIURL = "https://api.onedrive.com/v1.0"
|
||||
|
||||
type OneDriveClient struct {
|
||||
HTTPClient *http.Client
|
||||
|
||||
@@ -44,9 +43,13 @@ type OneDriveClient struct {
|
||||
|
||||
IsConnected bool
|
||||
TestMode bool
|
||||
|
||||
IsBusiness bool
|
||||
RefreshTokenURL string
|
||||
APIURL string
|
||||
}
|
||||
|
||||
func NewOneDriveClient(tokenFile string) (*OneDriveClient, error) {
|
||||
func NewOneDriveClient(tokenFile string, isBusiness bool) (*OneDriveClient, error) {
|
||||
|
||||
description, err := ioutil.ReadFile(tokenFile)
|
||||
if err != nil {
|
||||
@@ -63,6 +66,15 @@ func NewOneDriveClient(tokenFile string) (*OneDriveClient, error) {
|
||||
TokenFile: tokenFile,
|
||||
Token: token,
|
||||
TokenLock: &sync.Mutex{},
|
||||
IsBusiness: isBusiness,
|
||||
}
|
||||
|
||||
if isBusiness {
|
||||
client.RefreshTokenURL = "https://duplicacy.com/odb_refresh"
|
||||
client.APIURL = "https://graph.microsoft.com/v1.0/me"
|
||||
} else {
|
||||
client.RefreshTokenURL = "https://duplicacy.com/one_refresh"
|
||||
client.APIURL = "https://api.onedrive.com/v1.0"
|
||||
}
|
||||
|
||||
client.RefreshToken(false)
|
||||
@@ -75,7 +87,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
||||
var response *http.Response
|
||||
|
||||
backoff := 1
|
||||
for i := 0; i < 8; i++ {
|
||||
for i := 0; i < 12; i++ {
|
||||
|
||||
LOG_DEBUG("ONEDRIVE_CALL", "%s %s", method, url)
|
||||
|
||||
@@ -106,9 +118,10 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
||||
|
||||
if reader, ok := inputReader.(*RateLimitedReader); ok {
|
||||
request.ContentLength = reader.Length()
|
||||
request.Header.Set("Content-Range", fmt.Sprintf("bytes 0-%d/%d", reader.Length() - 1, reader.Length()))
|
||||
}
|
||||
|
||||
if url != OneDriveRefreshTokenURL {
|
||||
if url != client.RefreshTokenURL {
|
||||
client.TokenLock.Lock()
|
||||
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
|
||||
client.TokenLock.Unlock()
|
||||
@@ -117,6 +130,8 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
||||
request.Header.Set("Content-Type", contentType)
|
||||
}
|
||||
|
||||
request.Header.Set("User-Agent", "ISV|Acrosync|Duplicacy/2.0")
|
||||
|
||||
response, err = client.HTTPClient.Do(request)
|
||||
if err != nil {
|
||||
if client.IsConnected {
|
||||
@@ -133,6 +148,9 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
||||
time.Sleep(retryAfter * time.Millisecond)
|
||||
}
|
||||
backoff *= 2
|
||||
if backoff > 256 {
|
||||
backoff = 256
|
||||
}
|
||||
continue
|
||||
}
|
||||
return nil, 0, err
|
||||
@@ -152,7 +170,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
||||
|
||||
if response.StatusCode == 401 {
|
||||
|
||||
if url == OneDriveRefreshTokenURL {
|
||||
if url == client.RefreshTokenURL {
|
||||
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
|
||||
}
|
||||
|
||||
@@ -161,11 +179,23 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
||||
return nil, 0, err
|
||||
}
|
||||
continue
|
||||
} else if response.StatusCode == 409 {
|
||||
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Conflict"}
|
||||
} else if response.StatusCode > 401 && response.StatusCode != 404 {
|
||||
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
||||
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
|
||||
time.Sleep(retryAfter * time.Millisecond)
|
||||
delay := int((rand.Float32() * 0.5 + 0.5) * 1000.0 * float32(backoff))
|
||||
if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 {
|
||||
retryAfter, _ := strconv.Atoi(backoffList[0])
|
||||
if retryAfter * 1000 > delay {
|
||||
delay = retryAfter * 1000
|
||||
}
|
||||
}
|
||||
|
||||
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, delay)
|
||||
time.Sleep(time.Duration(delay) * time.Millisecond)
|
||||
backoff *= 2
|
||||
if backoff > 256 {
|
||||
backoff = 256
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
|
||||
@@ -188,7 +218,7 @@ func (client *OneDriveClient) RefreshToken(force bool) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
readCloser, _, err := client.call(OneDriveRefreshTokenURL, "POST", client.Token, "")
|
||||
readCloser, _, err := client.call(client.RefreshTokenURL, "POST", client.Token, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to refresh the access token: %v", err)
|
||||
}
|
||||
@@ -228,9 +258,9 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
|
||||
|
||||
entries := []OneDriveEntry{}
|
||||
|
||||
url := OneDriveAPIURL + "/drive/root:/" + path + ":/children"
|
||||
url := client.APIURL + "/drive/root:/" + path + ":/children"
|
||||
if path == "" {
|
||||
url = OneDriveAPIURL + "/drive/root/children"
|
||||
url = client.APIURL + "/drive/root/children"
|
||||
}
|
||||
if client.TestMode {
|
||||
url += "?top=8"
|
||||
@@ -266,7 +296,7 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
|
||||
|
||||
func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) {
|
||||
|
||||
url := OneDriveAPIURL + "/drive/root:/" + path
|
||||
url := client.APIURL + "/drive/root:/" + path
|
||||
url += "?select=id,name,size,folder"
|
||||
|
||||
readCloser, _, err := client.call(url, "GET", 0, "")
|
||||
@@ -291,28 +321,95 @@ func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, err
|
||||
|
||||
func (client *OneDriveClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
|
||||
|
||||
url := OneDriveAPIURL + "/drive/items/root:/" + path + ":/content"
|
||||
url := client.APIURL + "/drive/items/root:/" + path + ":/content"
|
||||
|
||||
return client.call(url, "GET", 0, "")
|
||||
}
|
||||
|
||||
func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
|
||||
|
||||
url := OneDriveAPIURL + "/drive/root:/" + path + ":/content"
|
||||
// Upload file using the simple method; this is only possible for OneDrive Personal or if the file
|
||||
// is smaller than 4MB for OneDrive Business
|
||||
if !client.IsBusiness || (client.TestMode && rand.Int() % 2 == 0) {
|
||||
url := client.APIURL + "/drive/root:/" + path + ":/content"
|
||||
|
||||
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
|
||||
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
readCloser.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// For large files, create an upload session first
|
||||
uploadURL, err := client.CreateUploadSession(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return client.UploadFileSession(uploadURL, content, rateLimit)
|
||||
}
|
||||
|
||||
func (client *OneDriveClient) CreateUploadSession(path string) (uploadURL string, err error) {
|
||||
|
||||
type CreateUploadSessionItem struct {
|
||||
ConflictBehavior string `json:"@microsoft.graph.conflictBehavior"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
input := map[string]interface{} {
|
||||
"item": CreateUploadSessionItem {
|
||||
ConflictBehavior: "replace",
|
||||
Name: filepath.Base(path),
|
||||
},
|
||||
}
|
||||
|
||||
readCloser, _, err := client.call(client.APIURL + "/drive/root:/" + path + ":/createUploadSession", "POST", input, "application/json")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
type CreateUploadSessionOutput struct {
|
||||
UploadURL string `json:"uploadUrl"`
|
||||
}
|
||||
|
||||
output := &CreateUploadSessionOutput{}
|
||||
|
||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
readCloser.Close()
|
||||
return output.UploadURL, nil
|
||||
}
|
||||
|
||||
func (client *OneDriveClient) UploadFileSession(uploadURL string, content []byte, rateLimit int) (err error) {
|
||||
|
||||
readCloser, _, err := client.call(uploadURL, "PUT", CreateRateLimitedReader(content, rateLimit), "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
type UploadFileSessionOutput struct {
|
||||
Size int `json:"size"`
|
||||
}
|
||||
output := &UploadFileSessionOutput{}
|
||||
|
||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||
return fmt.Errorf("Failed to complete the file upload session: %v", err)
|
||||
}
|
||||
|
||||
if output.Size != len(content) {
|
||||
return fmt.Errorf("Uploaded %d bytes out of %d bytes", output.Size, len(content))
|
||||
}
|
||||
|
||||
readCloser.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *OneDriveClient) DeleteFile(path string) error {
|
||||
|
||||
url := OneDriveAPIURL + "/drive/root:/" + path
|
||||
url := client.APIURL + "/drive/root:/" + path
|
||||
|
||||
readCloser, _, err := client.call(url, "DELETE", 0, "")
|
||||
if err != nil {
|
||||
@@ -325,7 +422,7 @@ func (client *OneDriveClient) DeleteFile(path string) error {
|
||||
|
||||
func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
||||
|
||||
url := OneDriveAPIURL + "/drive/root:/" + path
|
||||
url := client.APIURL + "/drive/root:/" + path
|
||||
|
||||
parentReference := make(map[string]string)
|
||||
parentReference["path"] = "/drive/root:/" + parent
|
||||
@@ -335,6 +432,20 @@ func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
||||
|
||||
readCloser, _, err := client.call(url, "PATCH", parameters, "application/json")
|
||||
if err != nil {
|
||||
if e, ok := err.(OneDriveError); ok && e.Status == 400 {
|
||||
// The destination directory doesn't exist; trying to create it...
|
||||
dir := filepath.Dir(parent)
|
||||
if dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
client.CreateDirectory(dir, filepath.Base(parent))
|
||||
readCloser, _, err = client.call(url, "PATCH", parameters, "application/json")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -344,24 +455,29 @@ func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
||||
|
||||
func (client *OneDriveClient) CreateDirectory(path string, name string) error {
|
||||
|
||||
url := OneDriveAPIURL + "/root/children"
|
||||
url := client.APIURL + "/root/children"
|
||||
|
||||
if path != "" {
|
||||
|
||||
parentID, isDir, _, err := client.GetFileInfo(path)
|
||||
pathID, isDir, _, err := client.GetFileInfo(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if parentID == "" {
|
||||
return fmt.Errorf("The path '%s' does not exist", path)
|
||||
if pathID == "" {
|
||||
dir := filepath.Dir(path)
|
||||
if dir != "." {
|
||||
// The parent directory doesn't exist; trying to create it...
|
||||
client.CreateDirectory(dir, filepath.Base(path))
|
||||
isDir = true
|
||||
}
|
||||
}
|
||||
|
||||
if !isDir {
|
||||
return fmt.Errorf("The path '%s' is not a directory", path)
|
||||
}
|
||||
|
||||
url = OneDriveAPIURL + "/drive/items/" + parentID + "/children"
|
||||
url = client.APIURL + "/drive/root:/" + path + ":/children"
|
||||
}
|
||||
|
||||
parameters := make(map[string]interface{})
|
||||
@@ -370,6 +486,11 @@ func (client *OneDriveClient) CreateDirectory(path string, name string) error {
|
||||
|
||||
readCloser, _, err := client.call(url, "POST", parameters, "application/json")
|
||||
if err != nil {
|
||||
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
|
||||
// This error usually means the directory already exists
|
||||
LOG_TRACE("ONEDRIVE_MKDIR", "The directory '%s/%s' already exists", path, name)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestOneDriveClient(t *testing.T) {
|
||||
|
||||
oneDriveClient, err := NewOneDriveClient("one-token.json")
|
||||
oneDriveClient, err := NewOneDriveClient("one-token.json", false)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create the OneDrive client: %v", err)
|
||||
return
|
||||
|
||||
@@ -19,13 +19,13 @@ type OneDriveStorage struct {
|
||||
}
|
||||
|
||||
// CreateOneDriveStorage creates an OneDrive storage object.
|
||||
func CreateOneDriveStorage(tokenFile string, storagePath string, threads int) (storage *OneDriveStorage, err error) {
|
||||
func CreateOneDriveStorage(tokenFile string, isBusiness bool, storagePath string, threads int) (storage *OneDriveStorage, err error) {
|
||||
|
||||
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
|
||||
storagePath = storagePath[:len(storagePath)-1]
|
||||
}
|
||||
|
||||
client, err := NewOneDriveClient(tokenFile)
|
||||
client, err := NewOneDriveClient(tokenFile, isBusiness)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -80,6 +80,7 @@ func (storage *OneDriveStorage) convertFilePath(filePath string) string {
|
||||
|
||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||
func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||
|
||||
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||
dir = dir[:len(dir)-1]
|
||||
}
|
||||
|
||||
@@ -25,6 +25,8 @@ type Preference struct {
|
||||
DoNotSavePassword bool `json:"no_save_password"`
|
||||
NobackupFile string `json:"nobackup_file"`
|
||||
Keys map[string]string `json:"keys"`
|
||||
FiltersFile string `json:"filters"`
|
||||
ExcludeByAttribute bool `json:"exclude_by_attribute"`
|
||||
}
|
||||
|
||||
var preferencePath string
|
||||
|
||||
@@ -210,7 +210,7 @@ func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *
|
||||
|
||||
defer output.Body.Close()
|
||||
|
||||
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.bucket))
|
||||
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||
return err
|
||||
|
||||
}
|
||||
@@ -225,7 +225,7 @@ func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content [
|
||||
Bucket: aws.String(storage.bucket),
|
||||
Key: aws.String(storage.storageDir + filePath),
|
||||
ACL: aws.String(s3.ObjectCannedACLPrivate),
|
||||
Body: CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.bucket)),
|
||||
Body: CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads),
|
||||
ContentType: aws.String("application/duplicacy"),
|
||||
}
|
||||
|
||||
|
||||
@@ -43,10 +43,10 @@ func CreateSFTPStorageWithPassword(server string, port int, username string, sto
|
||||
return nil
|
||||
}
|
||||
|
||||
return CreateSFTPStorage(server, port, username, storageDir, minimumNesting, authMethods, hostKeyCallback, threads)
|
||||
return CreateSFTPStorage(false, server, port, username, storageDir, minimumNesting, authMethods, hostKeyCallback, threads)
|
||||
}
|
||||
|
||||
func CreateSFTPStorage(server string, port int, username string, storageDir string, minimumNesting int,
|
||||
func CreateSFTPStorage(compatibilityMode bool, server string, port int, username string, storageDir string, minimumNesting int,
|
||||
authMethods []ssh.AuthMethod,
|
||||
hostKeyCallback func(hostname string, remote net.Addr,
|
||||
key ssh.PublicKey) error, threads int) (storage *SFTPStorage, err error) {
|
||||
@@ -57,8 +57,21 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
|
||||
HostKeyCallback: hostKeyCallback,
|
||||
}
|
||||
|
||||
if server == "sftp.hidrive.strato.com" {
|
||||
sftpConfig.Ciphers = []string{"aes128-ctr", "aes256-ctr"}
|
||||
if compatibilityMode {
|
||||
sftpConfig.Ciphers = []string{
|
||||
"aes128-ctr", "aes192-ctr", "aes256-ctr",
|
||||
"aes128-gcm@openssh.com",
|
||||
"chacha20-poly1305@openssh.com",
|
||||
"arcfour256", "arcfour128", "arcfour",
|
||||
"aes128-cbc",
|
||||
"3des-cbc",
|
||||
}
|
||||
sftpConfig.KeyExchanges = [] string {
|
||||
"curve25519-sha256@libssh.org",
|
||||
"ecdh-sha2-nistp256", "ecdh-sha2-nistp384", "ecdh-sha2-nistp521",
|
||||
"diffie-hellman-group1-sha1", "diffie-hellman-group14-sha1",
|
||||
"diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256",
|
||||
}
|
||||
}
|
||||
|
||||
serverAddress := fmt.Sprintf("%s:%d", server, port)
|
||||
@@ -91,7 +104,7 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
|
||||
storageDir: storageDir,
|
||||
minimumNesting: minimumNesting,
|
||||
numberOfThreads: threads,
|
||||
numberOfTries: 6,
|
||||
numberOfTries: 8,
|
||||
serverAddress: serverAddress,
|
||||
sftpConfig: sftpConfig,
|
||||
}
|
||||
@@ -129,22 +142,19 @@ func (storage *SFTPStorage) retry(f func () error) error {
|
||||
delay *= 2
|
||||
|
||||
storage.clientLock.Lock()
|
||||
if storage.client != nil {
|
||||
storage.client.Close()
|
||||
storage.client = nil
|
||||
}
|
||||
|
||||
connection, err := ssh.Dial("tcp", storage.serverAddress, storage.sftpConfig)
|
||||
if err != nil {
|
||||
LOG_WARN("SFT_RECONNECT", "Failed to connect to %s: %v; retrying", storage.serverAddress, err)
|
||||
storage.clientLock.Unlock()
|
||||
return err
|
||||
continue
|
||||
}
|
||||
|
||||
client, err := sftp.NewClient(connection)
|
||||
if err != nil {
|
||||
LOG_WARN("SFT_RECONNECT", "Failed to create a new SFTP client to %s: %v; retrying", storage.serverAddress, err)
|
||||
connection.Close()
|
||||
storage.clientLock.Unlock()
|
||||
return err
|
||||
continue
|
||||
}
|
||||
storage.client = client
|
||||
storage.clientLock.Unlock()
|
||||
@@ -275,36 +285,19 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
||||
fullPath := path.Join(storage.storageDir, filePath)
|
||||
|
||||
dirs := strings.Split(filePath, "/")
|
||||
if len(dirs) > 1 {
|
||||
fullDir := path.Dir(fullPath)
|
||||
err = storage.retry(func() error {
|
||||
_, err := storage.getSFTPClient().Stat(fullDir)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
// The error may be caused by a non-existent fullDir, or a broken connection. In either case,
|
||||
// we just assume it is the former because there isn't a way to tell which is the case.
|
||||
for i := range dirs[1 : len(dirs)-1] {
|
||||
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
|
||||
// We don't check the error; just keep going blindly but always store the last err
|
||||
err = storage.getSFTPClient().Mkdir(subDir)
|
||||
}
|
||||
fullDir := path.Dir(fullPath)
|
||||
return storage.retry(func() error {
|
||||
|
||||
// If there is an error creating the dirs, we check fullDir one more time, because another thread
|
||||
// may happen to create the same fullDir ahead of this thread
|
||||
if err != nil {
|
||||
err = storage.retry(func() error {
|
||||
_, err := storage.getSFTPClient().Stat(fullDir)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
if len(dirs) > 1 {
|
||||
_, err := storage.getSFTPClient().Stat(fullDir)
|
||||
if os.IsNotExist(err) {
|
||||
for i := range dirs[1 : len(dirs)-1] {
|
||||
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
|
||||
// We don't check the error; just keep going blindly
|
||||
storage.getSFTPClient().Mkdir(subDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return storage.retry(func() error {
|
||||
|
||||
letters := "abcdefghijklmnopqrstuvwxyz"
|
||||
suffix := make([]byte, 8)
|
||||
@@ -325,7 +318,11 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
||||
file.Close()
|
||||
return err
|
||||
}
|
||||
file.Close()
|
||||
|
||||
err = file.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = storage.getSFTPClient().Rename(temporaryFile, fullPath)
|
||||
if err != nil {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
@@ -123,11 +124,11 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
|
||||
}
|
||||
deviceIdRepository, err := GetPathDeviceId(top)
|
||||
if err != nil {
|
||||
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: ", top)
|
||||
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: %s", top)
|
||||
return top
|
||||
}
|
||||
if deviceIdLocal != deviceIdRepository {
|
||||
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: ", top)
|
||||
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: %s", top)
|
||||
return top
|
||||
}
|
||||
|
||||
@@ -145,22 +146,37 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
|
||||
// Use tmutil to create snapshot
|
||||
tmutilOutput, err := CommandWithTimeout(timeoutInSeconds, "tmutil", "snapshot")
|
||||
if err != nil {
|
||||
LOG_ERROR("VSS_CREATE", "Error while calling tmutil: ", err)
|
||||
LOG_ERROR("VSS_CREATE", "Error while calling tmutil: %v", err)
|
||||
return top
|
||||
}
|
||||
|
||||
colonPos := strings.IndexByte(tmutilOutput, ':')
|
||||
if colonPos < 0 {
|
||||
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: ", tmutilOutput)
|
||||
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: %s", tmutilOutput)
|
||||
return top
|
||||
}
|
||||
snapshotDate = strings.TrimSpace(tmutilOutput[colonPos+1:])
|
||||
|
||||
tmutilOutput, err = CommandWithTimeout(timeoutInSeconds, "tmutil", "listlocalsnapshots", ".")
|
||||
if err != nil {
|
||||
LOG_ERROR("VSS_CREATE", "Error while calling 'tmutil listlocalsnapshots': %v", err)
|
||||
return top
|
||||
}
|
||||
snapshotName := "com.apple.TimeMachine." + snapshotDate
|
||||
|
||||
r := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`)
|
||||
snapshotNames := r.FindStringSubmatch(tmutilOutput)
|
||||
if len(snapshotNames) > 0 {
|
||||
snapshotName = snapshotNames[0]
|
||||
} else {
|
||||
LOG_WARN("VSS_CREATE", "Error while using 'tmutil listlocalsnapshots' to find snapshot name. Will fallback to 'com.apple.TimeMachine.SNAPSHOT_DATE'")
|
||||
}
|
||||
|
||||
// Mount snapshot as readonly and hide from GUI i.e. Finder
|
||||
_, err = CommandWithTimeout(timeoutInSeconds,
|
||||
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s=com.apple.TimeMachine."+snapshotDate, "/", snapshotPath)
|
||||
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/", snapshotPath)
|
||||
if err != nil {
|
||||
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: ", err)
|
||||
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: %v", err)
|
||||
return top
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -57,7 +58,7 @@ func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
|
||||
|
||||
// CreateSnapshotFromDirectory creates a snapshot from the local directory 'top'. Only 'Files'
|
||||
// will be constructed, while 'ChunkHashes' and 'ChunkLengths' can only be populated after uploading.
|
||||
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (snapshot *Snapshot, skippedDirectories []string,
|
||||
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, filtersFile string, excludeByAttribute bool) (snapshot *Snapshot, skippedDirectories []string,
|
||||
skippedFiles []string, err error) {
|
||||
|
||||
snapshot = &Snapshot{
|
||||
@@ -68,47 +69,10 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (sn
|
||||
|
||||
var patterns []string
|
||||
|
||||
patternFile, err := ioutil.ReadFile(path.Join(GetDuplicacyPreferencePath(), "filters"))
|
||||
if err == nil {
|
||||
for _, pattern := range strings.Split(string(patternFile), "\n") {
|
||||
pattern = strings.TrimSpace(pattern)
|
||||
if len(pattern) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if pattern[0] == '#' {
|
||||
continue
|
||||
}
|
||||
|
||||
if IsUnspecifiedFilter(pattern) {
|
||||
pattern = "+" + pattern
|
||||
}
|
||||
|
||||
if IsEmptyFilter(pattern) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
||||
valid, err := IsValidRegex(pattern[2:])
|
||||
if !valid || err != nil {
|
||||
LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
|
||||
}
|
||||
}
|
||||
|
||||
patterns = append(patterns, pattern)
|
||||
}
|
||||
|
||||
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))
|
||||
|
||||
LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
|
||||
|
||||
if IsTracing() {
|
||||
for _, pattern := range patterns {
|
||||
LOG_TRACE("SNAPSHOT_PATTERN", "Pattern: %s", pattern)
|
||||
}
|
||||
}
|
||||
|
||||
if filtersFile == "" {
|
||||
filtersFile = joinPath(GetDuplicacyPreferencePath(), "filters")
|
||||
}
|
||||
patterns = ProcessFilters(filtersFile)
|
||||
|
||||
directories := make([]*Entry, 0, 256)
|
||||
directories = append(directories, CreateEntry("", 0, 0, 0))
|
||||
@@ -125,8 +89,12 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (sn
|
||||
directory := directories[len(directories)-1]
|
||||
directories = directories[:len(directories)-1]
|
||||
snapshot.Files = append(snapshot.Files, directory)
|
||||
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, nobackupFile, snapshot.discardAttributes)
|
||||
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, nobackupFile, snapshot.discardAttributes, excludeByAttribute)
|
||||
if err != nil {
|
||||
if directory.Path == "" {
|
||||
LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err)
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory: %v", err)
|
||||
skippedDirectories = append(skippedDirectories, directory.Path)
|
||||
continue
|
||||
@@ -150,6 +118,103 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (sn
|
||||
return snapshot, skippedDirectories, skippedFiles, nil
|
||||
}
|
||||
|
||||
func AppendPattern(patterns []string, new_pattern string) (new_patterns []string) {
|
||||
for _, pattern := range patterns {
|
||||
if pattern == new_pattern {
|
||||
LOG_INFO("SNAPSHOT_FILTER", "Ignoring duplicate pattern: %s ...", new_pattern)
|
||||
return patterns
|
||||
}
|
||||
}
|
||||
new_patterns = append(patterns, new_pattern)
|
||||
return new_patterns
|
||||
}
|
||||
func ProcessFilters(filtersFile string) (patterns []string) {
|
||||
patterns = ProcessFilterFile(filtersFile, make([]string, 0))
|
||||
|
||||
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))
|
||||
|
||||
LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
|
||||
|
||||
if IsTracing() {
|
||||
for _, pattern := range patterns {
|
||||
LOG_TRACE("SNAPSHOT_PATTERN", "Pattern: %s", pattern)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return patterns
|
||||
}
|
||||
|
||||
func ProcessFilterFile(patternFile string, includedFiles []string) (patterns []string) {
|
||||
for _, file := range includedFiles {
|
||||
if file == patternFile {
|
||||
// cycle in include mechanism discovered.
|
||||
LOG_ERROR("SNAPSHOT_FILTER", "The filter file %s has already been included", patternFile)
|
||||
return patterns
|
||||
}
|
||||
}
|
||||
includedFiles = append(includedFiles, patternFile)
|
||||
LOG_INFO("SNAPSHOT_FILTER", "Parsing filter file %s", patternFile)
|
||||
patternFileContent, err := ioutil.ReadFile(patternFile)
|
||||
if err == nil {
|
||||
patternFileLines := strings.Split(string(patternFileContent), "\n")
|
||||
patterns = ProcessFilterLines(patternFileLines, includedFiles)
|
||||
}
|
||||
return patterns
|
||||
}
|
||||
|
||||
func ProcessFilterLines(patternFileLines []string, includedFiles []string) (patterns []string) {
|
||||
for _, pattern := range patternFileLines {
|
||||
pattern = strings.TrimSpace(pattern)
|
||||
if len(pattern) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(pattern, "@") {
|
||||
patternIncludeFile := strings.TrimSpace(pattern[1:])
|
||||
if patternIncludeFile == "" {
|
||||
continue
|
||||
}
|
||||
if ! filepath.IsAbs(patternIncludeFile) {
|
||||
basePath := ""
|
||||
if len(includedFiles) == 0 {
|
||||
basePath, _ = os.Getwd()
|
||||
} else {
|
||||
basePath = filepath.Dir(includedFiles[len(includedFiles)-1])
|
||||
}
|
||||
patternIncludeFile = joinPath(basePath, patternIncludeFile)
|
||||
}
|
||||
for _, pattern := range ProcessFilterFile(patternIncludeFile, includedFiles) {
|
||||
patterns = AppendPattern(patterns, pattern)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if pattern[0] == '#' {
|
||||
continue
|
||||
}
|
||||
|
||||
if IsUnspecifiedFilter(pattern) {
|
||||
pattern = "+" + pattern
|
||||
}
|
||||
|
||||
if IsEmptyFilter(pattern) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
||||
valid, err := IsValidRegex(pattern[2:])
|
||||
if !valid || err != nil {
|
||||
LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
|
||||
}
|
||||
}
|
||||
|
||||
patterns = AppendPattern(patterns, pattern)
|
||||
}
|
||||
|
||||
return patterns
|
||||
}
|
||||
|
||||
// This is the struct used to save/load incomplete snapshots
|
||||
type IncompleteSnapshot struct {
|
||||
Files []*Entry
|
||||
|
||||
@@ -270,7 +270,7 @@ func (reader *sequenceReader) Read(data []byte) (n int, err error) {
|
||||
|
||||
func (manager *SnapshotManager) CreateChunkDownloader() {
|
||||
if manager.chunkDownloader == nil {
|
||||
manager.chunkDownloader = CreateChunkDownloader(manager.config, manager.storage, manager.snapshotCache, false, 1)
|
||||
manager.chunkDownloader = CreateChunkDownloader(manager.config, manager.storage, manager.snapshotCache, false, 1, false)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -381,6 +381,13 @@ func (manager *SnapshotManager) DownloadSnapshotContents(snapshot *Snapshot, pat
|
||||
return true
|
||||
}
|
||||
|
||||
// ClearSnapshotContents removes contents loaded by DownloadSnapshotContents
|
||||
func (manager *SnapshotManager) ClearSnapshotContents(snapshot *Snapshot) {
|
||||
snapshot.ChunkHashes = nil
|
||||
snapshot.ChunkLengths = nil
|
||||
snapshot.Files = nil
|
||||
}
|
||||
|
||||
// CleanSnapshotCache removes all files not referenced by the specified 'snapshot' in the snapshot cache.
|
||||
func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, allSnapshots map[string][]*Snapshot) bool {
|
||||
|
||||
@@ -653,6 +660,51 @@ func (manager *SnapshotManager) GetSnapshotChunks(snapshot *Snapshot, keepChunkH
|
||||
return chunks
|
||||
}
|
||||
|
||||
// GetSnapshotChunkHashes has an option to retrieve chunk hashes in addition to chunk ids.
|
||||
func (manager *SnapshotManager) GetSnapshotChunkHashes(snapshot *Snapshot, chunkHashes *map[string]bool, chunkIDs map[string]bool) {
|
||||
|
||||
for _, chunkHash := range snapshot.FileSequence {
|
||||
if chunkHashes != nil {
|
||||
(*chunkHashes)[chunkHash] = true
|
||||
}
|
||||
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
|
||||
}
|
||||
|
||||
for _, chunkHash := range snapshot.ChunkSequence {
|
||||
if chunkHashes != nil {
|
||||
(*chunkHashes)[chunkHash] = true
|
||||
}
|
||||
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
|
||||
}
|
||||
|
||||
for _, chunkHash := range snapshot.LengthSequence {
|
||||
if chunkHashes != nil {
|
||||
(*chunkHashes)[chunkHash] = true
|
||||
}
|
||||
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
|
||||
}
|
||||
|
||||
if len(snapshot.ChunkHashes) == 0 {
|
||||
|
||||
description := manager.DownloadSequence(snapshot.ChunkSequence)
|
||||
err := snapshot.LoadChunks(description)
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_CHUNK", "Failed to load chunks for snapshot %s at revision %d: %v",
|
||||
snapshot.ID, snapshot.Revision, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, chunkHash := range snapshot.ChunkHashes {
|
||||
if chunkHashes != nil {
|
||||
(*chunkHashes)[chunkHash] = true
|
||||
}
|
||||
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
|
||||
}
|
||||
|
||||
snapshot.ClearChunks()
|
||||
}
|
||||
|
||||
// ListSnapshots shows the information about a snapshot.
|
||||
func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList []int, tag string,
|
||||
showFiles bool, showChunks bool) int {
|
||||
@@ -757,10 +809,12 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
|
||||
|
||||
// ListSnapshots shows the information about a snapshot.
|
||||
func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToCheck []int, tag string, showStatistics bool, showTabular bool,
|
||||
checkFiles bool, searchFossils bool, resurrect bool) bool {
|
||||
checkFiles bool, checkChunks, searchFossils bool, resurrect bool, threads int, allowFailures bool) bool {
|
||||
|
||||
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
|
||||
snapshotID, revisionsToCheck, tag, showStatistics, checkFiles, searchFossils, resurrect)
|
||||
manager.chunkDownloader = CreateChunkDownloader(manager.config, manager.storage, manager.snapshotCache, false, threads, allowFailures)
|
||||
|
||||
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, showTabular: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
|
||||
snapshotID, revisionsToCheck, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
|
||||
|
||||
snapshotMap := make(map[string][]*Snapshot)
|
||||
var err error
|
||||
@@ -774,6 +828,8 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
// Store the index of the snapshot that references each chunk; if the chunk is shared by multiple chunks, the index is -1
|
||||
chunkSnapshotMap := make(map[string]int)
|
||||
|
||||
emptyChunks := 0
|
||||
|
||||
LOG_INFO("SNAPSHOT_CHECK", "Listing all chunks")
|
||||
allChunks, allSizes := manager.ListAllFiles(manager.storage, chunkDir)
|
||||
|
||||
@@ -788,9 +844,14 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
|
||||
chunk = strings.Replace(chunk, "/", "", -1)
|
||||
chunkSizeMap[chunk] = allSizes[i]
|
||||
|
||||
if allSizes[i] == 0 && !strings.HasSuffix(chunk, ".tmp") {
|
||||
LOG_WARN("SNAPSHOT_CHECK", "Chunk %s has a size of 0", chunk)
|
||||
emptyChunks++
|
||||
}
|
||||
}
|
||||
|
||||
if snapshotID == "" || showStatistics {
|
||||
if snapshotID == "" || showStatistics || showTabular {
|
||||
snapshotIDs, err := manager.ListSnapshotIDs()
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
||||
@@ -810,7 +871,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
for snapshotID = range snapshotMap {
|
||||
|
||||
revisions := revisionsToCheck
|
||||
if len(revisions) == 0 || showStatistics {
|
||||
if len(revisions) == 0 || showStatistics || showTabular {
|
||||
revisions, err = manager.ListSnapshotRevisions(snapshotID)
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", snapshotID, err)
|
||||
@@ -839,6 +900,12 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
}
|
||||
LOG_INFO("SNAPSHOT_CHECK", "Total chunk size is %s in %d chunks", PrettyNumber(totalChunkSize), len(chunkSizeMap))
|
||||
|
||||
var allChunkHashes *map[string]bool
|
||||
if checkChunks && !checkFiles {
|
||||
m := make(map[string]bool)
|
||||
allChunkHashes = &m
|
||||
}
|
||||
|
||||
for snapshotID = range snapshotMap {
|
||||
|
||||
for _, snapshot := range snapshotMap[snapshotID] {
|
||||
@@ -846,13 +913,12 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
if checkFiles {
|
||||
manager.DownloadSnapshotContents(snapshot, nil, false)
|
||||
manager.VerifySnapshot(snapshot)
|
||||
manager.ClearSnapshotContents(snapshot)
|
||||
continue
|
||||
}
|
||||
|
||||
chunks := make(map[string]bool)
|
||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot, false) {
|
||||
chunks[chunkID] = true
|
||||
}
|
||||
manager.GetSnapshotChunkHashes(snapshot, allChunkHashes, chunks)
|
||||
|
||||
missingChunks := 0
|
||||
for chunkID := range chunks {
|
||||
@@ -860,6 +926,20 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
_, found := chunkSizeMap[chunkID]
|
||||
|
||||
if !found {
|
||||
|
||||
// Look up the chunk again in case it actually exists, but only if there aren't
|
||||
// too many missing chunks.
|
||||
if missingChunks < 100 {
|
||||
_, exist, _, err := manager.storage.FindChunk(0, chunkID, false)
|
||||
if err != nil {
|
||||
LOG_WARN("SNAPSHOT_VALIDATE", "Failed to check the existence of chunk %s: %v",
|
||||
chunkID, err)
|
||||
} else if exist {
|
||||
LOG_INFO("SNAPSHOT_VALIDATE", "Chunk %s is confirmed to exist", chunkID)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !searchFossils {
|
||||
missingChunks += 1
|
||||
LOG_WARN("SNAPSHOT_VALIDATE",
|
||||
@@ -870,7 +950,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
|
||||
chunkPath, exist, size, err := manager.storage.FindChunk(0, chunkID, true)
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_VALIDATE", "Failed to check the existence of chunk %s: %v",
|
||||
LOG_ERROR("SNAPSHOT_VALIDATE", "Failed to check the existence of fossil %s: %v",
|
||||
chunkID, err)
|
||||
return false
|
||||
}
|
||||
@@ -926,12 +1006,61 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
return false
|
||||
}
|
||||
|
||||
if emptyChunks > 0 {
|
||||
LOG_ERROR("SNAPSHOT_CHECK", "%d chunks have a size of 0", emptyChunks)
|
||||
return false
|
||||
}
|
||||
|
||||
if showTabular {
|
||||
manager.ShowStatisticsTabular(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
|
||||
} else if showStatistics {
|
||||
manager.ShowStatistics(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
|
||||
}
|
||||
|
||||
if checkChunks && !checkFiles {
|
||||
manager.chunkDownloader.snapshotCache = nil
|
||||
LOG_INFO("SNAPSHOT_VERIFY", "Verifying %d chunks", len(*allChunkHashes))
|
||||
|
||||
startTime := time.Now()
|
||||
var chunkHashes []string
|
||||
|
||||
// The index of the first chunk to add to the downloader, which may have already downloaded
|
||||
// some metadata chunks so the index doesn't start with 0.
|
||||
chunkIndex := -1
|
||||
|
||||
for chunkHash := range *allChunkHashes {
|
||||
chunkHashes = append(chunkHashes, chunkHash)
|
||||
if chunkIndex == -1 {
|
||||
chunkIndex = manager.chunkDownloader.AddChunk(chunkHash)
|
||||
} else {
|
||||
manager.chunkDownloader.AddChunk(chunkHash)
|
||||
}
|
||||
}
|
||||
|
||||
var downloadedChunkSize int64
|
||||
totalChunks := len(*allChunkHashes)
|
||||
for i := 0; i < totalChunks; i++ {
|
||||
chunk := manager.chunkDownloader.WaitForChunk(i + chunkIndex)
|
||||
if chunk.isBroken {
|
||||
continue
|
||||
}
|
||||
downloadedChunkSize += int64(chunk.GetLength())
|
||||
|
||||
elapsedTime := time.Now().Sub(startTime).Seconds()
|
||||
speed := int64(float64(downloadedChunkSize) / elapsedTime)
|
||||
remainingTime := int64(float64(totalChunks - i - 1) / float64(i + 1) * elapsedTime)
|
||||
percentage := float64(i + 1) / float64(totalChunks) * 100.0
|
||||
LOG_INFO("VERIFY_PROGRESS", "Verified chunk %s (%d/%d), %sB/s %s %.1f%%",
|
||||
manager.config.GetChunkIDFromHash(chunkHashes[i]), i + 1, totalChunks,
|
||||
PrettySize(speed), PrettyTime(remainingTime), percentage)
|
||||
}
|
||||
|
||||
if manager.chunkDownloader.NumberOfFailedChunks > 0 {
|
||||
LOG_ERROR("SNAPSHOT_VERIFY", "%d out of %d chunks are corrupted", manager.chunkDownloader.NumberOfFailedChunks, totalChunks)
|
||||
} else {
|
||||
LOG_INFO("SNAPSHOT_VERIFY", "All %d chunks have been successfully verified", totalChunks)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -998,18 +1127,20 @@ func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string][]*
|
||||
earliestSeenChunks := make(map[string]int)
|
||||
|
||||
for _, snapshot := range snapshotList {
|
||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot, true) {
|
||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot, false) {
|
||||
if earliestSeenChunks[chunkID] == 0 {
|
||||
earliestSeenChunks[chunkID] = math.MaxInt32
|
||||
}
|
||||
earliestSeenChunks[chunkID] = MinInt(earliestSeenChunks[chunkID], snapshot.Revision)
|
||||
if earliestSeenChunks[chunkID] > snapshot.Revision {
|
||||
earliestSeenChunks[chunkID] = snapshot.Revision
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, snapshot := range snapshotList {
|
||||
|
||||
chunks := make(map[string]bool)
|
||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot, true) {
|
||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot, false) {
|
||||
chunks[chunkID] = true
|
||||
snapshotChunks[chunkID] = true
|
||||
}
|
||||
@@ -1178,7 +1309,6 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
|
||||
}
|
||||
|
||||
var chunk *Chunk
|
||||
currentHash := ""
|
||||
|
||||
for i := file.StartChunk; i <= file.EndChunk; i++ {
|
||||
start := 0
|
||||
@@ -1191,10 +1321,12 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
|
||||
}
|
||||
|
||||
hash := snapshot.ChunkHashes[i]
|
||||
if currentHash != hash {
|
||||
lastChunk, lastChunkHash := manager.chunkDownloader.GetLastDownloadedChunk()
|
||||
if lastChunkHash != hash {
|
||||
i := manager.chunkDownloader.AddChunk(hash)
|
||||
chunk = manager.chunkDownloader.WaitForChunk(i)
|
||||
currentHash = hash
|
||||
} else {
|
||||
chunk = lastChunk
|
||||
}
|
||||
|
||||
output(chunk.GetBytes()[start:end])
|
||||
@@ -1269,21 +1401,20 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
|
||||
}
|
||||
|
||||
file := manager.FindFile(snapshot, path, false)
|
||||
var content []byte
|
||||
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) { content = append(content, chunk...) }) {
|
||||
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) {
|
||||
fmt.Printf("%s", chunk)
|
||||
}) {
|
||||
LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d",
|
||||
path, snapshot.ID, snapshot.Revision)
|
||||
return false
|
||||
}
|
||||
|
||||
fmt.Printf("%s", string(content))
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Diff compares two snapshots, or two revision of a file if the file argument is given.
|
||||
func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []int,
|
||||
filePath string, compareByHash bool, nobackupFile string) bool {
|
||||
filePath string, compareByHash bool, nobackupFile string, filtersFile string, excludeByAttribute bool) bool {
|
||||
|
||||
LOG_DEBUG("DIFF_PARAMETERS", "top: %s, id: %s, revision: %v, path: %s, compareByHash: %t",
|
||||
top, snapshotID, revisions, filePath, compareByHash)
|
||||
@@ -1296,7 +1427,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
||||
if len(revisions) <= 1 {
|
||||
// Only scan the repository if filePath is not provided
|
||||
if len(filePath) == 0 {
|
||||
rightSnapshot, _, _, err = CreateSnapshotFromDirectory(snapshotID, top, nobackupFile)
|
||||
rightSnapshot, _, _, err = CreateSnapshotFromDirectory(snapshotID, top, nobackupFile, filtersFile, excludeByAttribute)
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
|
||||
return false
|
||||
@@ -1467,7 +1598,11 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
||||
same = right.IsSameAs(left)
|
||||
}
|
||||
} else {
|
||||
same = left.Hash == right.Hash
|
||||
if left.Size == 0 && right.Size == 0 {
|
||||
same = true
|
||||
} else {
|
||||
same = left.Hash == right.Hash
|
||||
}
|
||||
}
|
||||
|
||||
if !same {
|
||||
@@ -1838,7 +1973,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
||||
if _, found := newChunks[chunk]; found {
|
||||
// The fossil is referenced so it can't be deleted.
|
||||
if dryRun {
|
||||
LOG_INFO("FOSSIL_RESURRECT", "Fossil %s would be resurrected: %v", chunk)
|
||||
LOG_INFO("FOSSIL_RESURRECT", "Fossil %s would be resurrected", chunk)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -2446,7 +2581,7 @@ func (manager *SnapshotManager) UploadFile(path string, derivationKey string, co
|
||||
derivationKey = derivationKey[len(derivationKey)-64:]
|
||||
}
|
||||
|
||||
err := manager.fileChunk.Encrypt(manager.config.FileKey, derivationKey)
|
||||
err := manager.fileChunk.Encrypt(manager.config.FileKey, derivationKey, true)
|
||||
if err != nil {
|
||||
LOG_ERROR("UPLOAD_File", "Failed to encrypt the file %s: %v", path, err)
|
||||
return false
|
||||
|
||||
@@ -620,7 +620,7 @@ func TestPruneNewSnapshots(t *testing.T) {
|
||||
// Now chunkHash1 wil be resurrected
|
||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||
checkTestSnapshots(snapshotManager, 4, 0)
|
||||
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false)
|
||||
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false, false, 1, false)
|
||||
}
|
||||
|
||||
// A fossil collection left by an aborted prune should be ignored if any supposedly deleted snapshot exists
|
||||
@@ -669,7 +669,7 @@ func TestPruneGhostSnapshots(t *testing.T) {
|
||||
// Run the prune again but the fossil collection should be igored, since revision 1 still exists
|
||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||
checkTestSnapshots(snapshotManager, 3, 2)
|
||||
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, true /*searchFossils*/, false)
|
||||
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, false, true /*searchFossils*/, false, 1, false)
|
||||
|
||||
// Prune snapshot 1 again
|
||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||
@@ -683,5 +683,5 @@ func TestPruneGhostSnapshots(t *testing.T) {
|
||||
// Run the prune again and this time the fossil collection will be processed and the fossils removed
|
||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||
checkTestSnapshots(snapshotManager, 3, 0)
|
||||
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false)
|
||||
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false, false, 1, false)
|
||||
}
|
||||
|
||||
@@ -268,7 +268,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
if matched == nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Unrecognizable storage URL: %s", storageURL)
|
||||
return nil
|
||||
} else if matched[1] == "sftp" {
|
||||
} else if matched[1] == "sftp" || matched[1] == "sftpc" {
|
||||
server := matched[3]
|
||||
username := matched[2]
|
||||
storageDir := matched[5]
|
||||
@@ -336,7 +336,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
keyFile = GetPassword(preference, "ssh_key_file", "Enter the path of the private key file:",
|
||||
true, resetPassword)
|
||||
|
||||
var key ssh.Signer
|
||||
var keySigner ssh.Signer
|
||||
var err error
|
||||
|
||||
if keyFile == "" {
|
||||
@@ -347,15 +347,15 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
if err != nil {
|
||||
LOG_INFO("SSH_PUBLICKEY", "Failed to read the private key file: %v", err)
|
||||
} else {
|
||||
key, err = ssh.ParsePrivateKey(content)
|
||||
keySigner, err = ssh.ParsePrivateKey(content)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "cannot decode encrypted private keys") {
|
||||
if _, ok := err.(*ssh.PassphraseMissingError); ok {
|
||||
LOG_TRACE("SSH_PUBLICKEY", "The private key file is encrypted")
|
||||
passphrase = GetPassword(preference, "ssh_passphrase", "Enter the passphrase to decrypt the private key file:", false, resetPassword)
|
||||
if len(passphrase) == 0 {
|
||||
LOG_INFO("SSH_PUBLICKEY", "No passphrase to descrypt the private key file %s", keyFile)
|
||||
} else {
|
||||
key, err = ssh.ParsePrivateKeyWithPassphrase(content, []byte(passphrase))
|
||||
keySigner, err = ssh.ParsePrivateKeyWithPassphrase(content, []byte(passphrase))
|
||||
if err != nil {
|
||||
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the encrypted private key file %s: %v", keyFile, err)
|
||||
}
|
||||
@@ -364,11 +364,35 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the private key file %s: %v", keyFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
if keySigner != nil {
|
||||
certFile := keyFile + "-cert.pub"
|
||||
if stat, err := os.Stat(certFile); err == nil && !stat.IsDir() {
|
||||
LOG_DEBUG("SSH_CERTIFICATE", "Attempting to use ssh certificate from file %s", certFile)
|
||||
var content []byte
|
||||
content, err = ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
LOG_INFO("SSH_CERTIFICATE", "Failed to read ssh certificate file %s: %v", certFile, err)
|
||||
} else {
|
||||
pubKey, _, _, _, err := ssh.ParseAuthorizedKey(content)
|
||||
if err != nil {
|
||||
LOG_INFO("SSH_CERTIFICATE", "Failed parse ssh certificate file %s: %v", certFile, err)
|
||||
} else {
|
||||
certSigner, err := ssh.NewCertSigner(pubKey.(*ssh.Certificate), keySigner)
|
||||
if err != nil {
|
||||
LOG_INFO("SSH_CERTIFICATE", "Failed to create certificate signer: %v", err)
|
||||
} else {
|
||||
keySigner = certSigner
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if key != nil {
|
||||
signers = append(signers, key)
|
||||
if keySigner != nil {
|
||||
signers = append(signers, keySigner)
|
||||
}
|
||||
|
||||
if len(signers) > 0 {
|
||||
@@ -416,7 +440,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
return checkHostKey(hostname, remote, key)
|
||||
}
|
||||
|
||||
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, 2, authMethods, hostKeyChecker, threads)
|
||||
sftpStorage, err := CreateSFTPStorage(matched[1] == "sftpc", server, port, username, storageDir, 2, authMethods, hostKeyChecker, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the SFTP storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
@@ -526,11 +550,30 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
return dropboxStorage
|
||||
} else if matched[1] == "b2" {
|
||||
bucket := matched[3]
|
||||
storageDir := matched[5]
|
||||
|
||||
accountID := GetPassword(preference, "b2_id", "Enter Backblaze Account ID:", true, resetPassword)
|
||||
applicationKey := GetPassword(preference, "b2_key", "Enter Backblaze Application Key:", true, resetPassword)
|
||||
accountID := GetPassword(preference, "b2_id", "Enter Backblaze account or application id:", true, resetPassword)
|
||||
applicationKey := GetPassword(preference, "b2_key", "Enter corresponding Backblaze application key:", true, resetPassword)
|
||||
|
||||
b2Storage, err := CreateB2Storage(accountID, applicationKey, bucket, threads)
|
||||
b2Storage, err := CreateB2Storage(accountID, applicationKey, "", bucket, storageDir, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
SavePassword(preference, "b2_id", accountID)
|
||||
SavePassword(preference, "b2_key", applicationKey)
|
||||
return b2Storage
|
||||
} else if matched[1] == "b2-custom" {
|
||||
b2customUrlRegex := regexp.MustCompile(`^b2-custom://([^/]+)/([^/]+)(/(.+))?`)
|
||||
matched := b2customUrlRegex.FindStringSubmatch(storageURL)
|
||||
downloadURL := "https://" + matched[1]
|
||||
bucket := matched[2]
|
||||
storageDir := matched[4]
|
||||
|
||||
accountID := GetPassword(preference, "b2_id", "Enter Backblaze account or application id:", true, resetPassword)
|
||||
applicationKey := GetPassword(preference, "b2_key", "Enter corresponding Backblaze application key:", true, resetPassword)
|
||||
|
||||
b2Storage, err := CreateB2Storage(accountID, applicationKey, downloadURL, bucket, storageDir, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
@@ -581,26 +624,35 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
SavePassword(preference, "gcs_token", tokenFile)
|
||||
return gcsStorage
|
||||
} else if matched[1] == "gcd" {
|
||||
// Handle writing directly to the root of the drive
|
||||
// For gcd://driveid@/, driveid@ is match[3] not match[2]
|
||||
if matched[2] == "" && strings.HasSuffix(matched[3], "@") {
|
||||
matched[2], matched[3] = matched[3], matched[2]
|
||||
}
|
||||
driveID := matched[2]
|
||||
if driveID != "" {
|
||||
driveID = driveID[:len(driveID)-1]
|
||||
}
|
||||
storagePath := matched[3] + matched[4]
|
||||
prompt := fmt.Sprintf("Enter the path of the Google Drive token file (downloadable from https://duplicacy.com/gcd_start):")
|
||||
tokenFile := GetPassword(preference, "gcd_token", prompt, true, resetPassword)
|
||||
gcdStorage, err := CreateGCDStorage(tokenFile, storagePath, threads)
|
||||
gcdStorage, err := CreateGCDStorage(tokenFile, driveID, storagePath, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Drive storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
SavePassword(preference, "gcd_token", tokenFile)
|
||||
return gcdStorage
|
||||
} else if matched[1] == "one" {
|
||||
} else if matched[1] == "one" || matched[1] == "odb" {
|
||||
storagePath := matched[3] + matched[4]
|
||||
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
|
||||
tokenFile := GetPassword(preference, "one_token", prompt, true, resetPassword)
|
||||
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, storagePath, threads)
|
||||
tokenFile := GetPassword(preference, matched[1] + "_token", prompt, true, resetPassword)
|
||||
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
SavePassword(preference, "one_token", tokenFile)
|
||||
SavePassword(preference, matched[1] + "_token", tokenFile)
|
||||
return oneDriveStorage
|
||||
} else if matched[1] == "hubic" {
|
||||
storagePath := matched[3] + matched[4]
|
||||
@@ -626,6 +678,10 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
} else if matched[1] == "webdav" || matched[1] == "webdav-http" {
|
||||
server := matched[3]
|
||||
username := matched[2]
|
||||
if username == "" {
|
||||
LOG_ERROR("STORAGE_CREATE", "No username is provided to access the WebDAV storage")
|
||||
return nil
|
||||
}
|
||||
username = username[:len(username)-1]
|
||||
storageDir := matched[5]
|
||||
port := 0
|
||||
@@ -646,6 +702,18 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
}
|
||||
SavePassword(preference, "webdav_password", password)
|
||||
return webDAVStorage
|
||||
} else if matched[1] == "fabric" {
|
||||
endpoint := matched[3]
|
||||
storageDir := matched[5]
|
||||
prompt := fmt.Sprintf("Enter the token for accessing the Storage Made Easy File Fabric storage:")
|
||||
token := GetPassword(preference, "fabric_token", prompt, true, resetPassword)
|
||||
smeStorage, err := CreateFileFabricStorage(endpoint, token, storageDir, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the File Fabric storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
SavePassword(preference, "fabric_token", token)
|
||||
return smeStorage
|
||||
} else {
|
||||
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
|
||||
return nil
|
||||
|
||||
@@ -27,6 +27,8 @@ var testRateLimit int
|
||||
var testQuickMode bool
|
||||
var testThreads int
|
||||
var testFixedChunkSize bool
|
||||
var testRSAEncryption bool
|
||||
var testErasureCoding bool
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
|
||||
@@ -34,6 +36,8 @@ func init() {
|
||||
flag.BoolVar(&testQuickMode, "quick", false, "quick test")
|
||||
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
|
||||
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
|
||||
flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption")
|
||||
flag.BoolVar(&testErasureCoding, "erasure-coding", false, "enable Erasure Coding")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
@@ -107,7 +111,7 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "b2" {
|
||||
storage, err := CreateB2Storage(config["account"], config["key"], config["bucket"], threads)
|
||||
storage, err := CreateB2Storage(config["account"], config["key"], "", config["bucket"], config["directory"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "gcs-s3" {
|
||||
@@ -131,11 +135,23 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "gcd" {
|
||||
storage, err := CreateGCDStorage(config["token_file"], config["storage_path"], threads)
|
||||
storage, err := CreateGCDStorage(config["token_file"], "", config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "gcd-shared" {
|
||||
storage, err := CreateGCDStorage(config["token_file"], config["drive"], config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "one" {
|
||||
storage, err := CreateOneDriveStorage(config["token_file"], config["storage_path"], threads)
|
||||
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "odb" {
|
||||
storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "one" {
|
||||
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "hubic" {
|
||||
@@ -296,7 +312,8 @@ func TestStorage(t *testing.T) {
|
||||
|
||||
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
|
||||
|
||||
storage, err := loadStorage(testDir, 1)
|
||||
threads := 8
|
||||
storage, err := loadStorage(testDir, threads)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create storage: %v", err)
|
||||
return
|
||||
@@ -326,16 +343,16 @@ func TestStorage(t *testing.T) {
|
||||
storage.CreateDirectory(0, "shared")
|
||||
|
||||
// Upload to the same directory by multiple goroutines
|
||||
count := 8
|
||||
count := threads
|
||||
finished := make(chan int, count)
|
||||
for i := 0; i < count; i++ {
|
||||
go func(name string) {
|
||||
err := storage.UploadFile(0, name, []byte("this is a test file"))
|
||||
go func(threadIndex int, name string) {
|
||||
err := storage.UploadFile(threadIndex, name, []byte("this is a test file"))
|
||||
if err != nil {
|
||||
t.Errorf("Error to upload '%s': %v", name, err)
|
||||
}
|
||||
finished <- 0
|
||||
}(fmt.Sprintf("shared/a/b/c/%d", i))
|
||||
}(i, fmt.Sprintf("shared/a/b/c/%d", i))
|
||||
}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
@@ -384,7 +401,6 @@ func TestStorage(t *testing.T) {
|
||||
|
||||
snapshotIDs := []string{}
|
||||
for _, snapshotDir := range snapshotDirs {
|
||||
LOG_INFO("debug", "snapshot dir: %s", snapshotDir)
|
||||
if len(snapshotDir) > 0 && snapshotDir[len(snapshotDir)-1] == '/' {
|
||||
snapshotIDs = append(snapshotIDs, snapshotDir[:len(snapshotDir)-1])
|
||||
}
|
||||
|
||||
@@ -129,6 +129,11 @@ func CreateSwiftStorage(storageURL string, key string, threads int) (storage *Sw
|
||||
TrustId: arguments["trust_id"],
|
||||
}
|
||||
|
||||
err = connection.Authenticate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, _, err = connection.Container(container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -55,7 +55,7 @@ func IsEmptyFilter(pattern string) bool {
|
||||
}
|
||||
|
||||
func IsUnspecifiedFilter(pattern string) bool {
|
||||
if pattern[0] != '+' && pattern[0] != '-' && pattern[0] != 'i' && pattern[0] != 'e' {
|
||||
if pattern[0] != '+' && pattern[0] != '-' && !strings.HasPrefix(pattern, "i:") && !strings.HasPrefix(pattern, "e:") {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
@@ -434,7 +434,7 @@ func PrettyTime(seconds int64) string {
|
||||
seconds/day, (seconds%day)/3600, (seconds%3600)/60, seconds%60)
|
||||
} else if seconds > day {
|
||||
return fmt.Sprintf("1 day %02d:%02d:%02d", (seconds%day)/3600, (seconds%3600)/60, seconds%60)
|
||||
} else if seconds > 0 {
|
||||
} else if seconds >= 0 {
|
||||
return fmt.Sprintf("%02d:%02d:%02d", seconds/3600, (seconds%3600)/60, seconds%60)
|
||||
} else {
|
||||
return "n/a"
|
||||
@@ -460,10 +460,3 @@ func AtoSize(sizeString string) int {
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
func MinInt(x, y int) int {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
14
src/duplicacy_utils_darwin.go
Normal file
14
src/duplicacy_utils_darwin.go
Normal file
@@ -0,0 +1,14 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func excludedByAttribute(attirbutes map[string][]byte) bool {
|
||||
value, ok := attirbutes["com.apple.metadata:com_apple_backup_excludeItem"]
|
||||
return ok && strings.Contains(string(value), "com.apple.backupd")
|
||||
}
|
||||
13
src/duplicacy_utils_freebsd.go
Normal file
13
src/duplicacy_utils_freebsd.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
)
|
||||
|
||||
func excludedByAttribute(attirbutes map[string][]byte) bool {
|
||||
_, ok := attirbutes["duplicacy_exclude"]
|
||||
return ok
|
||||
}
|
||||
13
src/duplicacy_utils_linux.go
Normal file
13
src/duplicacy_utils_linux.go
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
)
|
||||
|
||||
func excludedByAttribute(attirbutes map[string][]byte) bool {
|
||||
_, ok := attirbutes["duplicacy_exclude"]
|
||||
return ok
|
||||
}
|
||||
@@ -88,3 +88,7 @@ func (entry *Entry) SetAttributesToFile(fullPath string) {
|
||||
func joinPath(components ...string) string {
|
||||
return path.Join(components...)
|
||||
}
|
||||
|
||||
func SplitDir(fullPath string) (dir string, file string) {
|
||||
return path.Split(fullPath)
|
||||
}
|
||||
|
||||
@@ -92,6 +92,17 @@ func TestMatchPattern(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for _, pattern := range []string{ "+", "-", "i:", "e:", "+a", "-a", "i:a", "e:a"} {
|
||||
if IsUnspecifiedFilter(pattern) {
|
||||
t.Errorf("pattern %s has a specified filter", pattern)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pattern := range []string{ "i", "e", "ia", "ib", "a", "b"} {
|
||||
if !IsUnspecifiedFilter(pattern) {
|
||||
t.Errorf("pattern %s does not have a specified filter", pattern)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRateLimit(t *testing.T) {
|
||||
|
||||
@@ -126,3 +126,12 @@ func joinPath(components ...string) string {
|
||||
}
|
||||
return combinedPath
|
||||
}
|
||||
|
||||
func SplitDir(fullPath string) (dir string, file string) {
|
||||
i := strings.LastIndex(fullPath, "\\")
|
||||
return fullPath[:i+1], fullPath[i+1:]
|
||||
}
|
||||
|
||||
func excludedByAttribute(attirbutes map[string][]byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -93,49 +93,49 @@ func (storage *WasabiStorage) DeleteFile(
|
||||
// rename. It's designed to get the job done with as few dependencies
|
||||
// on other packages as possible rather than being somethng
|
||||
// general-purpose and reusable.
|
||||
func (storage *WasabiStorage) MoveFile(
|
||||
threadIndex int, from string, to string,
|
||||
) (err error) {
|
||||
func (storage *WasabiStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||
|
||||
var from_path string
|
||||
var fromPath string
|
||||
// The from path includes the bucket. Take care not to include an empty storageDir
|
||||
// string as Wasabi's backend will return 404 on URLs with double slashes.
|
||||
if storage.storageDir == "" {
|
||||
from_path = fmt.Sprintf("/%s/%s", storage.bucket, from)
|
||||
fromPath = fmt.Sprintf("/%s/%s", storage.bucket, from)
|
||||
} else {
|
||||
from_path = fmt.Sprintf("/%s/%s/%s", storage.bucket, storage.storageDir, from)
|
||||
fromPath = fmt.Sprintf("/%s/%s/%s", storage.bucket, storage.storageDir, from)
|
||||
}
|
||||
|
||||
object := fmt.Sprintf("https://%s@%s%s",
|
||||
storage.region, storage.endpoint, from_path)
|
||||
object := fmt.Sprintf("https://%s@%s%s", storage.region, storage.endpoint, fromPath)
|
||||
|
||||
toPath := to
|
||||
// The object's new name is relative to the top of the bucket.
|
||||
new_name := fmt.Sprintf("%s/%s", storage.storageDir, to)
|
||||
if storage.storageDir != "" {
|
||||
toPath = fmt.Sprintf("%s/%s", storage.storageDir, to)
|
||||
}
|
||||
|
||||
timestamp := time.Now().Format(time.RFC1123Z)
|
||||
|
||||
signing_string := fmt.Sprintf("MOVE\n\n\n%s\n%s", timestamp, from_path)
|
||||
signingString := fmt.Sprintf("MOVE\n\n\n%s\n%s", timestamp, fromPath)
|
||||
|
||||
signer := hmac.New(sha1.New, []byte(storage.secret))
|
||||
signer.Write([]byte(signing_string))
|
||||
signer.Write([]byte(signingString))
|
||||
|
||||
signature := base64.StdEncoding.EncodeToString(signer.Sum(nil))
|
||||
|
||||
authorization := fmt.Sprintf("AWS %s:%s", storage.key, signature)
|
||||
|
||||
request, error := http.NewRequest("MOVE", object, nil)
|
||||
if error != nil {
|
||||
return error
|
||||
request, err := http.NewRequest("MOVE", object, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
request.Header.Add("Authorization", authorization)
|
||||
request.Header.Add("Date", timestamp)
|
||||
request.Header.Add("Destination", new_name)
|
||||
request.Header.Add("Destination", toPath)
|
||||
request.Header.Add("Host", storage.endpoint)
|
||||
request.Header.Add("Overwrite", "true")
|
||||
|
||||
response, error := storage.client.Do(request)
|
||||
if error != nil {
|
||||
return error
|
||||
response, err := storage.client.Do(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
//"net/http/httputil"
|
||||
@@ -22,6 +21,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
type WebDAVStorage struct {
|
||||
@@ -49,7 +49,7 @@ var (
|
||||
)
|
||||
|
||||
func CreateWebDAVStorage(host string, port int, username string, password string, storageDir string, useHTTP bool, threads int) (storage *WebDAVStorage, err error) {
|
||||
if storageDir[len(storageDir)-1] != '/' {
|
||||
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||
storageDir += "/"
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ func CreateWebDAVStorage(host string, port int, username string, password string
|
||||
username: username,
|
||||
password: password,
|
||||
storageDir: "",
|
||||
useHTTP: false,
|
||||
useHTTP: useHTTP,
|
||||
|
||||
client: http.DefaultClient,
|
||||
threads: threads,
|
||||
@@ -128,7 +128,12 @@ func (storage *WebDAVStorage) sendRequest(method string, uri string, depth int,
|
||||
dataReader = bytes.NewReader(data)
|
||||
} else if method == "PUT" {
|
||||
headers["Content-Type"] = "application/octet-stream"
|
||||
dataReader = CreateRateLimitedReader(data, storage.UploadRateLimit/storage.threads)
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", len(data))
|
||||
if storage.UploadRateLimit <= 0 {
|
||||
dataReader = bytes.NewReader(data)
|
||||
} else {
|
||||
dataReader = CreateRateLimitedReader(data, storage.UploadRateLimit/storage.threads)
|
||||
}
|
||||
} else if method == "MOVE" {
|
||||
headers["Destination"] = storage.createConnectionString(string(data))
|
||||
headers["Content-Type"] = "application/octet-stream"
|
||||
@@ -151,12 +156,16 @@ func (storage *WebDAVStorage) sendRequest(method string, uri string, depth int,
|
||||
request.Header.Set(key, value)
|
||||
}
|
||||
|
||||
if method == "PUT" {
|
||||
request.ContentLength = int64(len(data))
|
||||
}
|
||||
|
||||
//requestDump, err := httputil.DumpRequest(request, true)
|
||||
//LOG_INFO("debug", "Request: %s", requestDump)
|
||||
|
||||
response, err := storage.client.Do(request)
|
||||
if err != nil {
|
||||
LOG_TRACE("WEBDAV_RETRY", "URL request '%s %s' returned an error (%v)", method, uri, err)
|
||||
LOG_TRACE("WEBDAV_ERROR", "URL request '%s %s' returned an error (%v)", method, uri, err)
|
||||
backoff = storage.retry(backoff)
|
||||
continue
|
||||
}
|
||||
@@ -165,11 +174,13 @@ func (storage *WebDAVStorage) sendRequest(method string, uri string, depth int,
|
||||
return response.Body, response.Header, nil
|
||||
}
|
||||
|
||||
io.Copy(ioutil.Discard, response.Body)
|
||||
response.Body.Close()
|
||||
|
||||
if response.StatusCode == 301 {
|
||||
return nil, nil, errWebDAVMovedPermanently
|
||||
}
|
||||
|
||||
response.Body.Close()
|
||||
if response.StatusCode == 404 {
|
||||
// Retry if it is UPLOAD, otherwise return immediately
|
||||
if method != "PUT" {
|
||||
@@ -210,53 +221,57 @@ type WebDAVMultiStatus struct {
|
||||
|
||||
func (storage *WebDAVStorage) getProperties(uri string, depth int, properties ...string) (map[string]WebDAVProperties, error) {
|
||||
|
||||
propfind := "<prop>"
|
||||
for _, p := range properties {
|
||||
propfind += fmt.Sprintf("<%s/>", p)
|
||||
}
|
||||
propfind += "</prop>"
|
||||
maxTries := 3
|
||||
for tries := 0; ; tries++ {
|
||||
propfind := "<prop>"
|
||||
for _, p := range properties {
|
||||
propfind += fmt.Sprintf("<%s/>", p)
|
||||
}
|
||||
propfind += "</prop>"
|
||||
|
||||
body := fmt.Sprintf(`<?xml version="1.0" encoding="utf-8" ?><propfind xmlns="DAV:">%s</propfind>`, propfind)
|
||||
body := fmt.Sprintf(`<?xml version="1.0" encoding="utf-8" ?><propfind xmlns="DAV:">%s</propfind>`, propfind)
|
||||
|
||||
readCloser, _, err := storage.sendRequest("PROPFIND", uri, depth, []byte(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer readCloser.Close()
|
||||
content, err := ioutil.ReadAll(readCloser)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
readCloser, _, err := storage.sendRequest("PROPFIND", uri, depth, []byte(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
object := WebDAVMultiStatus{}
|
||||
err = xml.Unmarshal(content, &object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if object.Responses == nil || len(object.Responses) == 0 {
|
||||
return nil, errors.New("no WebDAV responses")
|
||||
}
|
||||
|
||||
responses := make(map[string]WebDAVProperties)
|
||||
|
||||
for _, responseTag := range object.Responses {
|
||||
|
||||
if responseTag.PropStat == nil || responseTag.PropStat.Prop == nil || responseTag.PropStat.Prop.PropList == nil {
|
||||
return nil, errors.New("no WebDAV properties")
|
||||
object := WebDAVMultiStatus{}
|
||||
err = xml.NewDecoder(readCloser).Decode(&object)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "unexpected EOF") && tries < maxTries {
|
||||
LOG_WARN("WEBDAV_RETRY", "Retrying on %v", err)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
properties := make(WebDAVProperties)
|
||||
for _, prop := range responseTag.PropStat.Prop.PropList {
|
||||
properties[prop.XMLName.Local] = prop.Value
|
||||
if object.Responses == nil || len(object.Responses) == 0 {
|
||||
return nil, errors.New("no WebDAV responses")
|
||||
}
|
||||
|
||||
responseKey := responseTag.Href
|
||||
responses[responseKey] = properties
|
||||
responses := make(map[string]WebDAVProperties)
|
||||
|
||||
for _, responseTag := range object.Responses {
|
||||
|
||||
if responseTag.PropStat == nil || responseTag.PropStat.Prop == nil || responseTag.PropStat.Prop.PropList == nil {
|
||||
return nil, errors.New("no WebDAV properties")
|
||||
}
|
||||
|
||||
properties := make(WebDAVProperties)
|
||||
for _, prop := range responseTag.PropStat.Prop.PropList {
|
||||
properties[prop.XMLName.Local] = prop.Value
|
||||
}
|
||||
|
||||
responseKey := responseTag.Href
|
||||
responses[responseKey] = properties
|
||||
|
||||
}
|
||||
|
||||
return responses, nil
|
||||
}
|
||||
|
||||
return responses, nil
|
||||
}
|
||||
|
||||
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
|
||||
@@ -305,6 +320,12 @@ func (storage *WebDAVStorage) ListFiles(threadIndex int, dir string) (files []st
|
||||
}
|
||||
files = append(files, file)
|
||||
sizes = append(sizes, int64(0))
|
||||
|
||||
// Add the directory to the directory cache
|
||||
storage.directoryCacheLock.Lock()
|
||||
storage.directoryCache[dir + file] = 1
|
||||
storage.directoryCacheLock.Unlock()
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
@@ -313,6 +334,7 @@ func (storage *WebDAVStorage) ListFiles(threadIndex int, dir string) (files []st
|
||||
|
||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||
func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||
|
||||
properties, err := storage.getProperties(filePath, 0, "getcontentlength", "resourcetype")
|
||||
if err != nil {
|
||||
if err == errWebDAVNotExist {
|
||||
@@ -325,7 +347,14 @@ func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exi
|
||||
return false, false, 0, err
|
||||
}
|
||||
|
||||
if m, exist := properties["/"+storage.storageDir+filePath]; !exist {
|
||||
m, exist := properties["/"+storage.storageDir+filePath]
|
||||
|
||||
// If no properties exist for the given filePath, remove the trailing / from filePath and search again
|
||||
if !exist && filePath != "" && filePath[len(filePath) - 1] == '/' {
|
||||
m, exist = properties["/"+storage.storageDir+filePath[:len(filePath) - 1]]
|
||||
}
|
||||
|
||||
if !exist {
|
||||
return false, false, 0, nil
|
||||
} else if resourceType, exist := m["resourcetype"]; exist && strings.Contains(resourceType, "collection") {
|
||||
return true, true, 0, nil
|
||||
@@ -343,6 +372,7 @@ func (storage *WebDAVStorage) DeleteFile(threadIndex int, filePath string) (err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
io.Copy(ioutil.Discard, readCloser)
|
||||
readCloser.Close()
|
||||
return nil
|
||||
}
|
||||
@@ -353,6 +383,7 @@ func (storage *WebDAVStorage) MoveFile(threadIndex int, from string, to string)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
io.Copy(ioutil.Discard, readCloser)
|
||||
readCloser.Close()
|
||||
return nil
|
||||
}
|
||||
@@ -366,21 +397,7 @@ func (storage *WebDAVStorage) createParentDirectory(threadIndex int, dir string)
|
||||
}
|
||||
parent := dir[:found]
|
||||
|
||||
storage.directoryCacheLock.Lock()
|
||||
_, exist := storage.directoryCache[parent]
|
||||
storage.directoryCacheLock.Unlock()
|
||||
|
||||
if exist {
|
||||
return nil
|
||||
}
|
||||
|
||||
err = storage.CreateDirectory(threadIndex, parent)
|
||||
if err == nil {
|
||||
storage.directoryCacheLock.Lock()
|
||||
storage.directoryCache[parent] = 1
|
||||
storage.directoryCacheLock.Unlock()
|
||||
}
|
||||
return err
|
||||
return storage.CreateDirectory(threadIndex, parent)
|
||||
}
|
||||
|
||||
// CreateDirectory creates a new directory.
|
||||
@@ -393,18 +410,35 @@ func (storage *WebDAVStorage) CreateDirectory(threadIndex int, dir string) (err
|
||||
return nil
|
||||
}
|
||||
|
||||
storage.directoryCacheLock.Lock()
|
||||
_, exist := storage.directoryCache[dir]
|
||||
storage.directoryCacheLock.Unlock()
|
||||
|
||||
if exist {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If there is an error in creating the parent directory, proceed anyway
|
||||
storage.createParentDirectory(threadIndex, dir)
|
||||
|
||||
readCloser, _, err := storage.sendRequest("MKCOL", dir, 0, []byte(""))
|
||||
if err != nil {
|
||||
if err == errWebDAVMethodNotAllowed || err == errWebDAVMovedPermanently {
|
||||
if err == errWebDAVMethodNotAllowed || err == errWebDAVMovedPermanently || err == io.EOF {
|
||||
// We simply ignore these errors and assume that the directory already exists
|
||||
LOG_TRACE("WEBDAV_MKDIR", "Can't create directory %s: %v; error ignored", dir, err)
|
||||
storage.directoryCacheLock.Lock()
|
||||
storage.directoryCache[dir] = 1
|
||||
storage.directoryCacheLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
io.Copy(ioutil.Discard, readCloser)
|
||||
readCloser.Close()
|
||||
|
||||
storage.directoryCacheLock.Lock()
|
||||
storage.directoryCache[dir] = 1
|
||||
storage.directoryCacheLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -429,6 +463,7 @@ func (storage *WebDAVStorage) UploadFile(threadIndex int, filePath string, conte
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
io.Copy(ioutil.Discard, readCloser)
|
||||
readCloser.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user