mirror of
https://github.com/gilbertchen/duplicacy
synced 2025-12-18 17:23:22 +00:00
Merge branch 'master' into mac-exclude
This commit is contained in:
18
.github/ISSUE_TEMPLATE.md
vendored
18
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,5 +1,17 @@
|
|||||||
Please submit an issue for bug reports or feature requests. If you have any questions please post them on https://forum.duplicacy.com.
|
---
|
||||||
|
name: Please use the official forum
|
||||||
|
about: Please use the official forum instead of Github
|
||||||
|
title: 'Please use the official forum'
|
||||||
|
labels: ''
|
||||||
|
assignees: ''
|
||||||
|
|
||||||
When you're reporting a bug, please specify the OS, version, command line arguments, or any info that you think is helpful for the diagnosis. If Duplicacy reports an error, please post the program output here.
|
---
|
||||||
|
|
||||||
Note that this repository hosts the CLI version of Duplicacy only. If you're reporting anything related to the GUI version, please visit https://forum.duplicacy.com.
|
|
||||||
|
Please **use the [Duplicacy Forum](https://forum.duplicacy.com/)** when reporting bugs, making feature requests, asking for help or simply praising Duplicacy for its ease of use.
|
||||||
|
|
||||||
|
We strongly encourage you to create an account on the forum and use that platform for discussion as there is a higher chance that someone there will talk to you.
|
||||||
|
|
||||||
|
There is a handful of people watching the Github Issues and we are in the process of moving **all** of them to the forum as well. Most likely you will not receive an answer here or it will be very slow and you will be pointed to the forum.
|
||||||
|
|
||||||
|
We have already created a comprehensive [Guide](https://forum.duplicacy.com/t/duplicacy-user-guide/1197), and a [How-To](https://forum.duplicacy.com/c/how-to) category which stores more wisdom than these issues on Github.
|
||||||
|
|||||||
@@ -14,3 +14,4 @@ Duplicacy is based on the following open source projects:
|
|||||||
|https://github.com/pcwizz/xattr | BSD-2-Clause |
|
|https://github.com/pcwizz/xattr | BSD-2-Clause |
|
||||||
|https://github.com/minio/blake2b-simd | Apache-2.0 |
|
|https://github.com/minio/blake2b-simd | Apache-2.0 |
|
||||||
|https://github.com/go-ole/go-ole | MIT |
|
|https://github.com/go-ole/go-ole | MIT |
|
||||||
|
https://github.com/ncw/swift | MIT |
|
||||||
|
|||||||
116
Gopkg.lock
generated
116
Gopkg.lock
generated
@@ -7,17 +7,11 @@
|
|||||||
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
|
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
|
||||||
version = "v0.16.0"
|
version = "v0.16.0"
|
||||||
|
|
||||||
[[projects]]
|
|
||||||
name = "github.com/Azure/azure-sdk-for-go"
|
|
||||||
packages = ["version"]
|
|
||||||
revision = "b7fadebe0e7f5c5720986080a01495bd8d27be37"
|
|
||||||
version = "v14.2.0"
|
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/Azure/go-autorest"
|
name = "github.com/Azure/go-autorest"
|
||||||
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
|
packages = ["autorest","autorest/adal","autorest/azure","autorest/date","logger","version"]
|
||||||
revision = "0ae36a9e544696de46fdadb7b0d5fb38af48c063"
|
revision = "9bc4033dd347c7f416fca46b2f42a043dc1fbdf6"
|
||||||
version = "v10.2.0"
|
version = "v10.15.5"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@@ -27,9 +21,9 @@
|
|||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/aws/aws-sdk-go"
|
name = "github.com/aws/aws-sdk-go"
|
||||||
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/sts"]
|
packages = ["aws","aws/arn","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/processcreds","aws/credentials/stscreds","aws/csm","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/context","internal/ini","internal/s3err","internal/sdkio","internal/sdkmath","internal/sdkrand","internal/sdkuri","internal/shareddefaults","internal/strings","internal/sync/singleflight","private/protocol","private/protocol/eventstream","private/protocol/eventstream/eventstreamapi","private/protocol/json/jsonutil","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/internal/arn","service/sts","service/sts/stsiface"]
|
||||||
revision = "a32b1dcd091264b5dee7b386149b6cc3823395c9"
|
revision = "851d5ffb66720c2540cc68020d4d8708950686c8"
|
||||||
version = "v1.12.31"
|
version = "v1.30.7"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/bkaradzic/go-lz4"
|
name = "github.com/bkaradzic/go-lz4"
|
||||||
@@ -40,14 +34,14 @@
|
|||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/dgrijalva/jwt-go"
|
name = "github.com/dgrijalva/jwt-go"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29"
|
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||||
version = "v3.1.0"
|
version = "v3.2.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/gilbertchen/azure-sdk-for-go"
|
name = "github.com/gilbertchen/azure-sdk-for-go"
|
||||||
packages = ["storage"]
|
packages = ["storage","version"]
|
||||||
revision = "bbf89bd4d716c184f158d1e1428c2dbef4a18307"
|
revision = "8fd4663cab7c7c1c46d00449291c92ad23b0d0d9"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@@ -56,10 +50,9 @@
|
|||||||
revision = "1de0a1836ce9c3ae1bf737a0869c4f04f28a7f98"
|
revision = "1de0a1836ce9c3ae1bf737a0869c4f04f28a7f98"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
|
||||||
name = "github.com/gilbertchen/go-dropbox"
|
name = "github.com/gilbertchen/go-dropbox"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "90711b603312b1f973f3a5da3793ac4f1e5c2f2a"
|
revision = "0baa9015ac2547d8b69b2e88c709aa90cfb8fbc1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/gilbertchen/go-ole"
|
name = "github.com/gilbertchen/go-ole"
|
||||||
@@ -71,7 +64,7 @@
|
|||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/gilbertchen/go.dbus"
|
name = "github.com/gilbertchen/go.dbus"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "9e442e6378618c083fd3b85b703ffd202721fb17"
|
revision = "8591994fa32f1dbe3fa9486bc6f4d4361ac16649"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@@ -98,33 +91,33 @@
|
|||||||
revision = "68e7a6806b0137a396d7d05601d7403ae1abac58"
|
revision = "68e7a6806b0137a396d7d05601d7403ae1abac58"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/go-ini/ini"
|
branch = "master"
|
||||||
packages = ["."]
|
name = "github.com/golang/groupcache"
|
||||||
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
|
packages = ["lru"]
|
||||||
version = "v1.32.0"
|
revision = "8c9f03a8e57eb486e42badaed3fb287da51807ba"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
|
||||||
name = "github.com/golang/protobuf"
|
name = "github.com/golang/protobuf"
|
||||||
packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
|
packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
|
||||||
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
revision = "84668698ea25b64748563aa20726db66a6b8d299"
|
||||||
|
version = "v1.3.5"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/googleapis/gax-go"
|
name = "github.com/googleapis/gax-go"
|
||||||
packages = ["."]
|
packages = [".","v2"]
|
||||||
revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f"
|
revision = "c8a15bac9b9fe955bd9f900272f9a306465d28cf"
|
||||||
version = "v2.0.0"
|
version = "v2.0.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/jmespath/go-jmespath"
|
name = "github.com/jmespath/go-jmespath"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "0b12d6b5"
|
revision = "c2b33e84"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
|
||||||
name = "github.com/kr/fs"
|
name = "github.com/kr/fs"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
|
||||||
|
version = "v0.1.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/marstr/guid"
|
name = "github.com/marstr/guid"
|
||||||
@@ -139,22 +132,22 @@
|
|||||||
revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4"
|
revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
|
||||||
name = "github.com/ncw/swift"
|
name = "github.com/ncw/swift"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "ae9f0ea1605b9aa6434ed5c731ca35d83ba67c55"
|
revision = "3e1a09f21340e4828e7265aa89f4dc1495fa7ccc"
|
||||||
|
version = "v1.0.50"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/pkg/errors"
|
name = "github.com/pkg/errors"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
revision = "614d223910a179a466c1767a985424175c39b465"
|
||||||
version = "v0.8.0"
|
version = "v0.9.1"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/pkg/sftp"
|
name = "github.com/pkg/sftp"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "98203f5a8333288eb3163b7c667d4260fe1333e9"
|
revision = "5616182052227b951e76d9c9b79a616c608bd91b"
|
||||||
version = "1.0.0"
|
version = "v1.11.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/satori/go.uuid"
|
name = "github.com/satori/go.uuid"
|
||||||
@@ -168,63 +161,68 @@
|
|||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
|
revision = "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "go.opencensus.io"
|
||||||
|
packages = [".","internal","internal/tagencoding","metric/metricdata","metric/metricproducer","plugin/ochttp","plugin/ochttp/propagation/b3","resource","stats","stats/internal","stats/view","tag","trace","trace/internal","trace/propagation","trace/tracestate"]
|
||||||
|
revision = "d835ff86be02193d324330acdb7d65546b05f814"
|
||||||
|
version = "v0.22.3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/crypto"
|
name = "golang.org/x/crypto"
|
||||||
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","pbkdf2","ssh","ssh/agent","ssh/terminal"]
|
packages = ["blowfish","chacha20","curve25519","ed25519","ed25519/internal/edwards25519","internal/subtle","pbkdf2","poly1305","ssh","ssh/agent","ssh/internal/bcrypt_pbkdf","ssh/terminal"]
|
||||||
revision = "9f005a07e0d31d45e6656d241bb5c0f2efd4bc94"
|
revision = "056763e48d71961566155f089ac0f02f1dda9b5a"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/net"
|
name = "golang.org/x/net"
|
||||||
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
|
packages = ["context","context/ctxhttp","http/httpguts","http2","http2/hpack","idna","internal/timeseries","trace"]
|
||||||
revision = "9dfe39835686865bff950a07b394c12a98ddc811"
|
revision = "d3edc9973b7eb1fb302b0ff2c62357091cea9a30"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
|
||||||
name = "golang.org/x/oauth2"
|
name = "golang.org/x/oauth2"
|
||||||
packages = [".","google","internal","jws","jwt"]
|
packages = [".","google","internal","jws","jwt"]
|
||||||
revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28"
|
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/sys"
|
name = "golang.org/x/sys"
|
||||||
packages = ["unix","windows"]
|
packages = ["cpu","unix","windows"]
|
||||||
revision = "82aafbf43bf885069dc71b7e7c2f9d7a614d47da"
|
revision = "59c9f1ba88faf592b225274f69c5ef1e4ebacf82"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
|
||||||
name = "golang.org/x/text"
|
name = "golang.org/x/text"
|
||||||
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/language","internal/language/compact","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||||
revision = "88f656faf3f37f690df1a32515b479415e1a6769"
|
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
|
||||||
|
version = "v0.3.2"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
|
||||||
name = "google.golang.org/api"
|
name = "google.golang.org/api"
|
||||||
packages = ["drive/v3","gensupport","googleapi","googleapi/internal/uritemplates","googleapi/transport","internal","iterator","option","storage/v1","transport/http"]
|
packages = ["drive/v3","googleapi","googleapi/transport","internal","internal/gensupport","internal/third_party/uritemplates","iterator","option","option/internaloption","storage/v1","transport/cert","transport/http","transport/http/internal/propagation"]
|
||||||
revision = "17b5f22a248d6d3913171c1a557552ace0d9c806"
|
revision = "52f0532eadbcc6f6b82d6f5edf66e610d10bfde6"
|
||||||
|
version = "v0.21.0"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "google.golang.org/appengine"
|
name = "google.golang.org/appengine"
|
||||||
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
|
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
|
||||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
revision = "971852bfffca25b069c31162ae8f247a3dba083b"
|
||||||
version = "v1.0.0"
|
version = "v1.6.5"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "google.golang.org/genproto"
|
name = "google.golang.org/genproto"
|
||||||
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status"]
|
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status","googleapis/type/expr"]
|
||||||
revision = "891aceb7c239e72692819142dfca057bdcbfcb96"
|
revision = "baae70f3302d3efdff74db41e48a5d476d036906"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "google.golang.org/grpc"
|
name = "google.golang.org/grpc"
|
||||||
packages = [".","balancer","balancer/roundrobin","codes","connectivity","credentials","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
|
packages = [".","attributes","backoff","balancer","balancer/base","balancer/roundrobin","binarylog/grpc_binarylog_v1","codes","connectivity","credentials","credentials/internal","encoding","encoding/proto","grpclog","internal","internal/backoff","internal/balancerload","internal/binarylog","internal/buffer","internal/channelz","internal/envconfig","internal/grpclog","internal/grpcrand","internal/grpcsync","internal/grpcutil","internal/resolver/dns","internal/resolver/passthrough","internal/syscall","internal/transport","keepalive","metadata","naming","peer","resolver","serviceconfig","stats","status","tap"]
|
||||||
revision = "5a9f7b402fe85096d2e1d0383435ee1876e863d0"
|
revision = "ac54eec90516cee50fc6b9b113b34628a85f976f"
|
||||||
version = "v1.8.0"
|
version = "v1.28.1"
|
||||||
|
|
||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
analyzer-version = 1
|
analyzer-version = 1
|
||||||
inputs-digest = "eff5ae2d9507f0d62cd2e5bdedebb5c59d64f70f476b087c01c35d4a5e1be72d"
|
inputs-digest = "e462352e0b0c726247078462e30a79330ac7a8b9dc62e9ed9d1e097b684224e9"
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
solver-version = 1
|
solver-version = 1
|
||||||
|
|||||||
14
Gopkg.toml
14
Gopkg.toml
@@ -31,7 +31,7 @@
|
|||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/aws/aws-sdk-go"
|
name = "github.com/aws/aws-sdk-go"
|
||||||
version = "1.12.31"
|
version = "1.30.7"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/bkaradzic/go-lz4"
|
name = "github.com/bkaradzic/go-lz4"
|
||||||
@@ -46,8 +46,8 @@
|
|||||||
name = "github.com/gilbertchen/cli"
|
name = "github.com/gilbertchen/cli"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
branch = "master"
|
|
||||||
name = "github.com/gilbertchen/go-dropbox"
|
name = "github.com/gilbertchen/go-dropbox"
|
||||||
|
revision = "0baa9015ac2547d8b69b2e88c709aa90cfb8fbc1"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/gilbertchen/go-ole"
|
name = "github.com/gilbertchen/go-ole"
|
||||||
@@ -75,7 +75,7 @@
|
|||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
name = "github.com/pkg/sftp"
|
name = "github.com/pkg/sftp"
|
||||||
version = "1.0.0"
|
version = "1.10.1"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
@@ -86,9 +86,13 @@
|
|||||||
name = "golang.org/x/net"
|
name = "golang.org/x/net"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
branch = "master"
|
|
||||||
name = "golang.org/x/oauth2"
|
name = "golang.org/x/oauth2"
|
||||||
|
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
|
||||||
|
|
||||||
[[constraint]]
|
[[constraint]]
|
||||||
branch = "master"
|
|
||||||
name = "google.golang.org/api"
|
name = "google.golang.org/api"
|
||||||
|
version = "0.21.0"
|
||||||
|
|
||||||
|
[[constraint]]
|
||||||
|
name = "google.golang.org/grpc"
|
||||||
|
version = "1.28.0"
|
||||||
|
|||||||
@@ -1,8 +1,7 @@
|
|||||||
Copyright © 2017 Acrosync LLC
|
Copyright © 2017 Acrosync LLC
|
||||||
|
|
||||||
* Free for personal use or commercial trial
|
* Free for personal use or commercial trial
|
||||||
* Non-trial commercial use requires per-user CLI licenses available from [duplicacy.com](https://duplicacy.com/buy) at a cost of $20 per year
|
* Non-trial commercial use requires per-computer CLI licenses available from [duplicacy.com](https://duplicacy.com/buy.html) at a cost of $50 per year
|
||||||
* A user is defined as the computer account that creates or edits the files to be backed up; if a backup contains files created or edited by multiple users for commercial purposes, one CLI license is required for each user
|
|
||||||
* The computer with a valid commercial license for the GUI version may run the CLI version without a CLI license
|
* The computer with a valid commercial license for the GUI version may run the CLI version without a CLI license
|
||||||
* CLI licenses are not required to restore or manage backups; only the backup command requires valid CLI licenses
|
* CLI licenses are not required to restore or manage backups; only the backup command requires valid CLI licenses
|
||||||
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
||||||
|
|||||||
@@ -90,8 +90,7 @@ The following table compares the feature lists of all these backup tools:
|
|||||||
## License
|
## License
|
||||||
|
|
||||||
* Free for personal use or commercial trial
|
* Free for personal use or commercial trial
|
||||||
* Non-trial commercial use requires per-user CLI licenses available from [duplicacy.com](https://duplicacy.com/buy) at a cost of $20 per year
|
* Non-trial commercial use requires per-computer CLI licenses available from [duplicacy.com](https://duplicacy.com/buy.html) at a cost of $50 per year
|
||||||
* A user is defined as the computer account that creates or edits the files to be backed up; if a backup contains files created or edited by multiple users for commercial purposes, one CLI license is required for each user
|
|
||||||
* The computer with a valid commercial license for the GUI version may run the CLI version without a CLI license
|
* The computer with a valid commercial license for the GUI version may run the CLI version without a CLI license
|
||||||
* CLI licenses are not required to restore or manage backups; only the backup command requires valid CLI licenses
|
* CLI licenses are not required to restore or manage backups; only the backup command requires valid CLI licenses
|
||||||
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
@@ -16,7 +17,6 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"net/http"
|
|
||||||
|
|
||||||
_ "net/http/pprof"
|
_ "net/http/pprof"
|
||||||
|
|
||||||
@@ -159,7 +159,9 @@ func setGlobalOptions(context *cli.Context) {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, logID := range context.GlobalStringSlice("suppress") {
|
||||||
|
duplicacy.SuppressLog(logID)
|
||||||
|
}
|
||||||
|
|
||||||
duplicacy.RunInBackground = context.GlobalBool("background")
|
duplicacy.RunInBackground = context.GlobalBool("background")
|
||||||
}
|
}
|
||||||
@@ -203,13 +205,29 @@ func runScript(context *cli.Context, storageName string, phase string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
duplicacy.LOG_WARN("SCRIPT_ERROR", "Failed to run script: %v", err)
|
duplicacy.LOG_ERROR("SCRIPT_ERROR", "Failed to run %s script: %v", script, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func loadRSAPrivateKey(keyFile string, passphrase string, preference *duplicacy.Preference, backupManager *duplicacy.BackupManager, resetPasswords bool) {
|
||||||
|
if keyFile == "" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
prompt := fmt.Sprintf("Enter the passphrase for %s:", keyFile)
|
||||||
|
if passphrase == "" {
|
||||||
|
passphrase = duplicacy.GetPassword(*preference, "rsa_passphrase", prompt, false, resetPasswords)
|
||||||
|
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
|
||||||
|
duplicacy.SavePassword(*preference, "rsa_passphrase", passphrase)
|
||||||
|
} else {
|
||||||
|
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func initRepository(context *cli.Context) {
|
func initRepository(context *cli.Context) {
|
||||||
configRepository(context, true)
|
configRepository(context, true)
|
||||||
}
|
}
|
||||||
@@ -321,6 +339,11 @@ func configRepository(context *cli.Context, init bool) {
|
|||||||
if preference.Encrypted {
|
if preference.Encrypted {
|
||||||
prompt := fmt.Sprintf("Enter storage password for %s:", preference.StorageURL)
|
prompt := fmt.Sprintf("Enter storage password for %s:", preference.StorageURL)
|
||||||
storagePassword = duplicacy.GetPassword(preference, "password", prompt, false, true)
|
storagePassword = duplicacy.GetPassword(preference, "password", prompt, false, true)
|
||||||
|
} else {
|
||||||
|
if context.String("key") != "" {
|
||||||
|
duplicacy.LOG_ERROR("STORAGE_CONFIG", "RSA encryption can't be enabled with an unencrypted storage")
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
existingConfig, _, err := duplicacy.DownloadConfig(storage, storagePassword)
|
existingConfig, _, err := duplicacy.DownloadConfig(storage, storagePassword)
|
||||||
@@ -435,8 +458,26 @@ func configRepository(context *cli.Context, init bool) {
|
|||||||
if iterations == 0 {
|
if iterations == 0 {
|
||||||
iterations = duplicacy.CONFIG_DEFAULT_ITERATIONS
|
iterations = duplicacy.CONFIG_DEFAULT_ITERATIONS
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dataShards := 0
|
||||||
|
parityShards := 0
|
||||||
|
shards := context.String("erasure-coding")
|
||||||
|
if shards != "" {
|
||||||
|
shardsRegex := regexp.MustCompile(`^([0-9]+):([0-9]+)$`)
|
||||||
|
matched := shardsRegex.FindStringSubmatch(shards)
|
||||||
|
if matched == nil {
|
||||||
|
duplicacy.LOG_ERROR("STORAGE_ERASURECODE", "Invalid erasure coding parameters: %s", shards)
|
||||||
|
} else {
|
||||||
|
dataShards, _ = strconv.Atoi(matched[1])
|
||||||
|
parityShards, _ = strconv.Atoi(matched[2])
|
||||||
|
if dataShards == 0 || dataShards > 256 || parityShards == 0 || parityShards > dataShards {
|
||||||
|
duplicacy.LOG_ERROR("STORAGE_ERASURECODE", "Invalid erasure coding parameters: %s", shards)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
duplicacy.ConfigStorage(storage, iterations, compressionLevel, averageChunkSize, maximumChunkSize,
|
duplicacy.ConfigStorage(storage, iterations, compressionLevel, averageChunkSize, maximumChunkSize,
|
||||||
minimumChunkSize, storagePassword, otherConfig, bitCopy)
|
minimumChunkSize, storagePassword, otherConfig, bitCopy, context.String("key"), dataShards, parityShards)
|
||||||
}
|
}
|
||||||
|
|
||||||
duplicacy.Preferences = append(duplicacy.Preferences, preference)
|
duplicacy.Preferences = append(duplicacy.Preferences, preference)
|
||||||
@@ -534,7 +575,13 @@ func setPreference(context *cli.Context) {
|
|||||||
newPreference.DoNotSavePassword = triBool.IsTrue()
|
newPreference.DoNotSavePassword = triBool.IsTrue()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if context.String("nobackup-file") != "" {
|
||||||
newPreference.NobackupFile = context.String("nobackup-file")
|
newPreference.NobackupFile = context.String("nobackup-file")
|
||||||
|
}
|
||||||
|
|
||||||
|
if context.String("filters") != "" {
|
||||||
|
newPreference.FiltersFile = context.String("filters")
|
||||||
|
}
|
||||||
|
|
||||||
triBool = context.Generic("exclude-by-attribute").(*TriBool)
|
triBool = context.Generic("exclude-by-attribute").(*TriBool)
|
||||||
if triBool.IsSet() {
|
if triBool.IsSet() {
|
||||||
@@ -722,7 +769,7 @@ func backupRepository(context *cli.Context) {
|
|||||||
uploadRateLimit := context.Int("limit-rate")
|
uploadRateLimit := context.Int("limit-rate")
|
||||||
enumOnly := context.Bool("enum-only")
|
enumOnly := context.Bool("enum-only")
|
||||||
storage.SetRateLimits(0, uploadRateLimit)
|
storage.SetRateLimits(0, uploadRateLimit)
|
||||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.ExcludeByAttribute)
|
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile, preference.ExcludeByAttribute)
|
||||||
duplicacy.SavePassword(*preference, "password", password)
|
duplicacy.SavePassword(*preference, "password", password)
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
@@ -775,6 +822,7 @@ func restoreRepository(context *cli.Context) {
|
|||||||
setOwner := !context.Bool("ignore-owner")
|
setOwner := !context.Bool("ignore-owner")
|
||||||
|
|
||||||
showStatistics := context.Bool("stats")
|
showStatistics := context.Bool("stats")
|
||||||
|
persist := context.Bool("persist")
|
||||||
|
|
||||||
var patterns []string
|
var patterns []string
|
||||||
for _, pattern := range context.Args() {
|
for _, pattern := range context.Args() {
|
||||||
@@ -789,33 +837,27 @@ func restoreRepository(context *cli.Context) {
|
|||||||
pattern = pattern[1:]
|
pattern = pattern[1:]
|
||||||
}
|
}
|
||||||
|
|
||||||
if duplicacy.IsUnspecifiedFilter(pattern) {
|
|
||||||
pattern = "+" + pattern
|
|
||||||
}
|
|
||||||
|
|
||||||
if duplicacy.IsEmptyFilter(pattern) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
|
||||||
valid, err := duplicacy.IsValidRegex(pattern[2:])
|
|
||||||
if !valid || err != nil {
|
|
||||||
duplicacy.LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
patterns = append(patterns, pattern)
|
patterns = append(patterns, pattern)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
patterns = duplicacy.ProcessFilterLines(patterns, make([]string, 0))
|
||||||
|
|
||||||
duplicacy.LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(duplicacy.RegexMap))
|
duplicacy.LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(duplicacy.RegexMap))
|
||||||
|
|
||||||
|
duplicacy.LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
|
||||||
|
|
||||||
storage.SetRateLimits(context.Int("limit-rate"), 0)
|
storage.SetRateLimits(context.Int("limit-rate"), 0)
|
||||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.ExcludeByAttribute)
|
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile, preference.ExcludeByAttribute)
|
||||||
duplicacy.SavePassword(*preference, "password", password)
|
duplicacy.SavePassword(*preference, "password", password)
|
||||||
|
|
||||||
|
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, setOwner, showStatistics, patterns)
|
failed := backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, setOwner, showStatistics, patterns, persist)
|
||||||
|
if failed > 0 {
|
||||||
|
duplicacy.LOG_ERROR("RESTORE_FAIL", "%d file(s) were not restored correctly", failed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
runScript(context, preference.Name, "post")
|
runScript(context, preference.Name, "post")
|
||||||
}
|
}
|
||||||
@@ -851,7 +893,7 @@ func listSnapshots(context *cli.Context) {
|
|||||||
tag := context.String("t")
|
tag := context.String("t")
|
||||||
revisions := getRevisions(context)
|
revisions := getRevisions(context)
|
||||||
|
|
||||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.ExcludeByAttribute)
|
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", preference.ExcludeByAttribute)
|
||||||
duplicacy.SavePassword(*preference, "password", password)
|
duplicacy.SavePassword(*preference, "password", password)
|
||||||
|
|
||||||
id := preference.SnapshotID
|
id := preference.SnapshotID
|
||||||
@@ -864,6 +906,9 @@ func listSnapshots(context *cli.Context) {
|
|||||||
showFiles := context.Bool("files")
|
showFiles := context.Bool("files")
|
||||||
showChunks := context.Bool("chunks")
|
showChunks := context.Bool("chunks")
|
||||||
|
|
||||||
|
// list doesn't need to decrypt file chunks; but we need -key here so we can reset the passphrase for the private key
|
||||||
|
loadRSAPrivateKey(context.String("key"), "", preference, backupManager, resetPassword)
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
backupManager.SnapshotManager.ListSnapshots(id, revisions, tag, showFiles, showChunks)
|
backupManager.SnapshotManager.ListSnapshots(id, revisions, tag, showFiles, showChunks)
|
||||||
|
|
||||||
@@ -886,7 +931,12 @@ func checkSnapshots(context *cli.Context) {
|
|||||||
|
|
||||||
runScript(context, preference.Name, "pre")
|
runScript(context, preference.Name, "pre")
|
||||||
|
|
||||||
storage := duplicacy.CreateStorage(*preference, false, 1)
|
threads := context.Int("threads")
|
||||||
|
if threads < 1 {
|
||||||
|
threads = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
storage := duplicacy.CreateStorage(*preference, false, threads)
|
||||||
if storage == nil {
|
if storage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -899,9 +949,11 @@ func checkSnapshots(context *cli.Context) {
|
|||||||
tag := context.String("t")
|
tag := context.String("t")
|
||||||
revisions := getRevisions(context)
|
revisions := getRevisions(context)
|
||||||
|
|
||||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.ExcludeByAttribute)
|
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
|
||||||
duplicacy.SavePassword(*preference, "password", password)
|
duplicacy.SavePassword(*preference, "password", password)
|
||||||
|
|
||||||
|
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
|
||||||
|
|
||||||
id := preference.SnapshotID
|
id := preference.SnapshotID
|
||||||
if context.Bool("all") {
|
if context.Bool("all") {
|
||||||
id = ""
|
id = ""
|
||||||
@@ -912,11 +964,13 @@ func checkSnapshots(context *cli.Context) {
|
|||||||
showStatistics := context.Bool("stats")
|
showStatistics := context.Bool("stats")
|
||||||
showTabular := context.Bool("tabular")
|
showTabular := context.Bool("tabular")
|
||||||
checkFiles := context.Bool("files")
|
checkFiles := context.Bool("files")
|
||||||
|
checkChunks := context.Bool("chunks")
|
||||||
searchFossils := context.Bool("fossils")
|
searchFossils := context.Bool("fossils")
|
||||||
resurrect := context.Bool("resurrect")
|
resurrect := context.Bool("resurrect")
|
||||||
|
persist := context.Bool("persist")
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
|
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, checkChunks, searchFossils, resurrect, threads, persist)
|
||||||
|
|
||||||
runScript(context, preference.Name, "post")
|
runScript(context, preference.Name, "post")
|
||||||
}
|
}
|
||||||
@@ -954,9 +1008,12 @@ func printFile(context *cli.Context) {
|
|||||||
snapshotID = context.String("id")
|
snapshotID = context.String("id")
|
||||||
}
|
}
|
||||||
|
|
||||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.ExcludeByAttribute)
|
|
||||||
|
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
|
||||||
duplicacy.SavePassword(*preference, "password", password)
|
duplicacy.SavePassword(*preference, "password", password)
|
||||||
|
|
||||||
|
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
|
|
||||||
file := ""
|
file := ""
|
||||||
@@ -1010,11 +1067,13 @@ func diff(context *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
compareByHash := context.Bool("hash")
|
compareByHash := context.Bool("hash")
|
||||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.ExcludeByAttribute)
|
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
|
||||||
duplicacy.SavePassword(*preference, "password", password)
|
duplicacy.SavePassword(*preference, "password", password)
|
||||||
|
|
||||||
|
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile, preference.ExcludeByAttribute)
|
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile, preference.FiltersFile, preference.ExcludeByAttribute)
|
||||||
|
|
||||||
runScript(context, preference.Name, "post")
|
runScript(context, preference.Name, "post")
|
||||||
}
|
}
|
||||||
@@ -1053,7 +1112,7 @@ func showHistory(context *cli.Context) {
|
|||||||
|
|
||||||
revisions := getRevisions(context)
|
revisions := getRevisions(context)
|
||||||
showLocalHash := context.Bool("hash")
|
showLocalHash := context.Bool("hash")
|
||||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.ExcludeByAttribute)
|
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
|
||||||
duplicacy.SavePassword(*preference, "password", password)
|
duplicacy.SavePassword(*preference, "password", password)
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
@@ -1116,7 +1175,7 @@ func pruneSnapshots(context *cli.Context) {
|
|||||||
os.Exit(ArgumentExitCode)
|
os.Exit(ArgumentExitCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.ExcludeByAttribute)
|
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
|
||||||
duplicacy.SavePassword(*preference, "password", password)
|
duplicacy.SavePassword(*preference, "password", password)
|
||||||
|
|
||||||
backupManager.SetupSnapshotCache(preference.Name)
|
backupManager.SetupSnapshotCache(preference.Name)
|
||||||
@@ -1136,9 +1195,14 @@ func copySnapshots(context *cli.Context) {
|
|||||||
os.Exit(ArgumentExitCode)
|
os.Exit(ArgumentExitCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
threads := context.Int("threads")
|
uploadingThreads := context.Int("threads")
|
||||||
if threads < 1 {
|
if uploadingThreads < 1 {
|
||||||
threads = 1
|
uploadingThreads = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
downloadingThreads := context.Int("download-threads")
|
||||||
|
if downloadingThreads < 1 {
|
||||||
|
downloadingThreads = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
repository, source := getRepositoryPreference(context, context.String("from"))
|
repository, source := getRepositoryPreference(context, context.String("from"))
|
||||||
@@ -1146,7 +1210,7 @@ func copySnapshots(context *cli.Context) {
|
|||||||
runScript(context, source.Name, "pre")
|
runScript(context, source.Name, "pre")
|
||||||
|
|
||||||
duplicacy.LOG_INFO("STORAGE_SET", "Source storage set to %s", source.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "Source storage set to %s", source.StorageURL)
|
||||||
sourceStorage := duplicacy.CreateStorage(*source, false, threads)
|
sourceStorage := duplicacy.CreateStorage(*source, false, downloadingThreads)
|
||||||
if sourceStorage == nil {
|
if sourceStorage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1156,10 +1220,12 @@ func copySnapshots(context *cli.Context) {
|
|||||||
sourcePassword = duplicacy.GetPassword(*source, "password", "Enter source storage password:", false, false)
|
sourcePassword = duplicacy.GetPassword(*source, "password", "Enter source storage password:", false, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, source.NobackupFile, source.ExcludeByAttribute)
|
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, "", "", false)
|
||||||
sourceManager.SetupSnapshotCache(source.Name)
|
sourceManager.SetupSnapshotCache(source.Name)
|
||||||
duplicacy.SavePassword(*source, "password", sourcePassword)
|
duplicacy.SavePassword(*source, "password", sourcePassword)
|
||||||
|
|
||||||
|
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), source, sourceManager, false)
|
||||||
|
|
||||||
_, destination := getRepositoryPreference(context, context.String("to"))
|
_, destination := getRepositoryPreference(context, context.String("to"))
|
||||||
|
|
||||||
if destination.Name == source.Name {
|
if destination.Name == source.Name {
|
||||||
@@ -1174,7 +1240,7 @@ func copySnapshots(context *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
duplicacy.LOG_INFO("STORAGE_SET", "Destination storage set to %s", destination.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "Destination storage set to %s", destination.StorageURL)
|
||||||
destinationStorage := duplicacy.CreateStorage(*destination, false, threads)
|
destinationStorage := duplicacy.CreateStorage(*destination, false, uploadingThreads)
|
||||||
if destinationStorage == nil {
|
if destinationStorage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1189,7 +1255,7 @@ func copySnapshots(context *cli.Context) {
|
|||||||
destinationStorage.SetRateLimits(0, context.Int("upload-limit-rate"))
|
destinationStorage.SetRateLimits(0, context.Int("upload-limit-rate"))
|
||||||
|
|
||||||
destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository,
|
destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository,
|
||||||
destinationPassword, destination.NobackupFile, destination.ExcludeByAttribute)
|
destinationPassword, "", "", fasle)
|
||||||
duplicacy.SavePassword(*destination, "password", destinationPassword)
|
duplicacy.SavePassword(*destination, "password", destinationPassword)
|
||||||
destinationManager.SetupSnapshotCache(destination.Name)
|
destinationManager.SetupSnapshotCache(destination.Name)
|
||||||
|
|
||||||
@@ -1199,7 +1265,7 @@ func copySnapshots(context *cli.Context) {
|
|||||||
snapshotID = context.String("id")
|
snapshotID = context.String("id")
|
||||||
}
|
}
|
||||||
|
|
||||||
sourceManager.CopySnapshots(destinationManager, snapshotID, revisions, threads)
|
sourceManager.CopySnapshots(destinationManager, snapshotID, revisions, uploadingThreads, downloadingThreads)
|
||||||
runScript(context, source.Name, "post")
|
runScript(context, source.Name, "post")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1303,7 +1369,7 @@ func benchmark(context *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
threads := downloadThreads
|
threads := downloadThreads
|
||||||
if (threads < uploadThreads) {
|
if threads < uploadThreads {
|
||||||
threads = uploadThreads
|
threads = uploadThreads
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1314,7 +1380,7 @@ func benchmark(context *cli.Context) {
|
|||||||
if storage == nil {
|
if storage == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
duplicacy.Benchmark(repository, storage, int64(fileSize) * 1000000, chunkSize * 1024 * 1024, chunkCount, uploadThreads, downloadThreads)
|
duplicacy.Benchmark(repository, storage, int64(fileSize) * 1024 * 1024, chunkSize * 1024 * 1024, chunkCount, uploadThreads, downloadThreads)
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
@@ -1367,6 +1433,16 @@ func main() {
|
|||||||
Usage: "initialize a new repository at the specified path rather than the current working directory",
|
Usage: "initialize a new repository at the specified path rather than the current working directory",
|
||||||
Argument: "<path>",
|
Argument: "<path>",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "key",
|
||||||
|
Usage: "the RSA public key to encrypt file chunks",
|
||||||
|
Argument: "<public key>",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "erasure-coding",
|
||||||
|
Usage: "enable erasure coding to protect against storage corruption",
|
||||||
|
Argument: "<data shards>:<parity shards>",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Usage: "Initialize the storage if necessary and the current directory as the repository",
|
Usage: "Initialize the storage if necessary and the current directory as the repository",
|
||||||
ArgsUsage: "<snapshot id> <storage url>",
|
ArgsUsage: "<snapshot id> <storage url>",
|
||||||
@@ -1474,6 +1550,20 @@ func main() {
|
|||||||
Usage: "restore from the specified storage instead of the default one",
|
Usage: "restore from the specified storage instead of the default one",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "key",
|
||||||
|
Usage: "the RSA private key to decrypt file chunks",
|
||||||
|
Argument: "<private key>",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "persist",
|
||||||
|
Usage: "continue processing despite chunk errors or existing files (without -overwrite), reporting any affected files",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "key-passphrase",
|
||||||
|
Usage: "the passphrase to decrypt the RSA private key",
|
||||||
|
Argument: "<private key passphrase>",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Usage: "Restore the repository to a previously saved snapshot",
|
Usage: "Restore the repository to a previously saved snapshot",
|
||||||
ArgsUsage: "[--] [pattern] ...",
|
ArgsUsage: "[--] [pattern] ...",
|
||||||
@@ -1519,6 +1609,11 @@ func main() {
|
|||||||
Usage: "retrieve snapshots from the specified storage",
|
Usage: "retrieve snapshots from the specified storage",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "key",
|
||||||
|
Usage: "the RSA private key to decrypt file chunks",
|
||||||
|
Argument: "<private key>",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Usage: "List snapshots",
|
Usage: "List snapshots",
|
||||||
ArgsUsage: " ",
|
ArgsUsage: " ",
|
||||||
@@ -1558,6 +1653,10 @@ func main() {
|
|||||||
Name: "files",
|
Name: "files",
|
||||||
Usage: "verify the integrity of every file",
|
Usage: "verify the integrity of every file",
|
||||||
},
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "chunks",
|
||||||
|
Usage: "verify the integrity of every chunk",
|
||||||
|
},
|
||||||
cli.BoolFlag{
|
cli.BoolFlag{
|
||||||
Name: "stats",
|
Name: "stats",
|
||||||
Usage: "show deduplication statistics (imply -all and all revisions)",
|
Usage: "show deduplication statistics (imply -all and all revisions)",
|
||||||
@@ -1571,6 +1670,26 @@ func main() {
|
|||||||
Usage: "retrieve snapshots from the specified storage",
|
Usage: "retrieve snapshots from the specified storage",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "key",
|
||||||
|
Usage: "the RSA private key to decrypt file chunks",
|
||||||
|
Argument: "<private key>",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "key-passphrase",
|
||||||
|
Usage: "the passphrase to decrypt the RSA private key",
|
||||||
|
Argument: "<private key passphrase>",
|
||||||
|
},
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "threads",
|
||||||
|
Value: 1,
|
||||||
|
Usage: "number of threads used to verify chunks",
|
||||||
|
Argument: "<n>",
|
||||||
|
},
|
||||||
|
cli.BoolFlag{
|
||||||
|
Name: "persist",
|
||||||
|
Usage: "continue processing despite chunk errors, reporting any affected (corrupted) files",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Usage: "Check the integrity of snapshots",
|
Usage: "Check the integrity of snapshots",
|
||||||
ArgsUsage: " ",
|
ArgsUsage: " ",
|
||||||
@@ -1594,6 +1713,16 @@ func main() {
|
|||||||
Usage: "retrieve the file from the specified storage",
|
Usage: "retrieve the file from the specified storage",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "key",
|
||||||
|
Usage: "the RSA private key to decrypt file chunks",
|
||||||
|
Argument: "<private key>",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "key-passphrase",
|
||||||
|
Usage: "the passphrase to decrypt the RSA private key",
|
||||||
|
Argument: "<private key passphrase>",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Usage: "Print to stdout the specified file, or the snapshot content if no file is specified",
|
Usage: "Print to stdout the specified file, or the snapshot content if no file is specified",
|
||||||
ArgsUsage: "[<file>]",
|
ArgsUsage: "[<file>]",
|
||||||
@@ -1622,6 +1751,16 @@ func main() {
|
|||||||
Usage: "retrieve files from the specified storage",
|
Usage: "retrieve files from the specified storage",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "key",
|
||||||
|
Usage: "the RSA private key to decrypt file chunks",
|
||||||
|
Argument: "<private key>",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "key-passphrase",
|
||||||
|
Usage: "the passphrase to decrypt the RSA private key",
|
||||||
|
Argument: "<private key passphrase>",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Usage: "Compare two snapshots or two revisions of a file",
|
Usage: "Compare two snapshots or two revisions of a file",
|
||||||
ArgsUsage: "[<file>]",
|
ArgsUsage: "[<file>]",
|
||||||
@@ -1786,6 +1925,16 @@ func main() {
|
|||||||
Usage: "specify the path of the repository (instead of the current working directory)",
|
Usage: "specify the path of the repository (instead of the current working directory)",
|
||||||
Argument: "<path>",
|
Argument: "<path>",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "key",
|
||||||
|
Usage: "the RSA public key to encrypt file chunks",
|
||||||
|
Argument: "<public key>",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "erasure-coding",
|
||||||
|
Usage: "enable erasure coding to protect against storage corruption",
|
||||||
|
Argument: "<data shards>:<parity shards>",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Usage: "Add an additional storage to be used for the existing repository",
|
Usage: "Add an additional storage to be used for the existing repository",
|
||||||
ArgsUsage: "<storage name> <snapshot id> <storage url>",
|
ArgsUsage: "<storage name> <snapshot id> <storage url>",
|
||||||
@@ -1823,6 +1972,7 @@ func main() {
|
|||||||
Name: "nobackup-file",
|
Name: "nobackup-file",
|
||||||
Usage: "Directories containing a file with this name will not be backed up",
|
Usage: "Directories containing a file with this name will not be backed up",
|
||||||
Argument: "<file name>",
|
Argument: "<file name>",
|
||||||
|
Value: "",
|
||||||
},
|
},
|
||||||
cli.GenericFlag{
|
cli.GenericFlag{
|
||||||
Name: "exclude-by-attribute",
|
Name: "exclude-by-attribute",
|
||||||
@@ -1843,6 +1993,11 @@ func main() {
|
|||||||
Usage: "use the specified storage instead of the default one",
|
Usage: "use the specified storage instead of the default one",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "filters",
|
||||||
|
Usage: "specify the path of the filters file containing include/exclude patterns",
|
||||||
|
Argument: "<file path>",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Usage: "Change the options for the default or specified storage",
|
Usage: "Change the options for the default or specified storage",
|
||||||
ArgsUsage: " ",
|
ArgsUsage: " ",
|
||||||
@@ -1889,6 +2044,22 @@ func main() {
|
|||||||
Usage: "number of uploading threads",
|
Usage: "number of uploading threads",
|
||||||
Argument: "<n>",
|
Argument: "<n>",
|
||||||
},
|
},
|
||||||
|
cli.IntFlag{
|
||||||
|
Name: "download-threads",
|
||||||
|
Value: 1,
|
||||||
|
Usage: "number of downloading threads",
|
||||||
|
Argument: "<n>",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "key",
|
||||||
|
Usage: "the RSA private key to decrypt file chunks from the source storage",
|
||||||
|
Argument: "<private key>",
|
||||||
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "key-passphrase",
|
||||||
|
Usage: "the passphrase to decrypt the RSA private key",
|
||||||
|
Argument: "<private key passphrase>",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Usage: "Copy snapshots between compatible storages",
|
Usage: "Copy snapshots between compatible storages",
|
||||||
ArgsUsage: " ",
|
ArgsUsage: " ",
|
||||||
@@ -1997,19 +2168,24 @@ func main() {
|
|||||||
Name: "comment",
|
Name: "comment",
|
||||||
Usage: "add a comment to identify the process",
|
Usage: "add a comment to identify the process",
|
||||||
},
|
},
|
||||||
|
cli.StringSliceFlag{
|
||||||
|
Name: "suppress, s",
|
||||||
|
Usage: "suppress logs with the specified id",
|
||||||
|
Argument: "<id>",
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
app.HideVersion = true
|
app.HideVersion = true
|
||||||
app.Name = "duplicacy"
|
app.Name = "duplicacy"
|
||||||
app.HelpName = "duplicacy"
|
app.HelpName = "duplicacy"
|
||||||
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
||||||
app.Version = "2.1.1" + " (" + GitCommit + ")"
|
app.Version = "2.6.2" + " (" + GitCommit + ")"
|
||||||
|
|
||||||
// If the program is interrupted, call the RunAtError function.
|
// If the program is interrupted, call the RunAtError function.
|
||||||
c := make(chan os.Signal, 1)
|
c := make(chan os.Signal, 1)
|
||||||
signal.Notify(c, os.Interrupt)
|
signal.Notify(c, os.Interrupt)
|
||||||
go func() {
|
go func() {
|
||||||
for _ = range c {
|
for range c {
|
||||||
duplicacy.RunAtError()
|
duplicacy.RunAtError()
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files
|
|||||||
|
|
||||||
if dir == "snapshots/" {
|
if dir == "snapshots/" {
|
||||||
|
|
||||||
for subDir, _ := range subDirs {
|
for subDir := range subDirs {
|
||||||
files = append(files, subDir)
|
files = append(files, subDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -166,9 +166,21 @@ func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chun
|
|||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
|
tries := 0
|
||||||
|
|
||||||
|
for {
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.containers))
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.containers))
|
||||||
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||||
return blob.CreateBlockBlobFromReader(reader, nil)
|
err = blob.CreateBlockBlobFromReader(reader, nil)
|
||||||
|
|
||||||
|
if err == nil || !strings.Contains(err.Error(), "write: broken pipe") || tries >= 3 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("AZURE_RETRY", "Connection unexpectedly terminated: %v; retrying", err)
|
||||||
|
tries++
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,19 +5,22 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"crypto/sha1"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"os"
|
||||||
"math/rand"
|
"fmt"
|
||||||
"net/http"
|
"bytes"
|
||||||
|
"time"
|
||||||
|
"sync"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"net/url"
|
||||||
|
"net/http"
|
||||||
|
"math/rand"
|
||||||
|
"io/ioutil"
|
||||||
|
"crypto/sha1"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"encoding/base64"
|
||||||
)
|
)
|
||||||
|
|
||||||
type B2Error struct {
|
type B2Error struct {
|
||||||
@@ -39,67 +42,115 @@ var B2AuthorizationURL = "https://api.backblazeb2.com/b2api/v1/b2_authorize_acco
|
|||||||
|
|
||||||
type B2Client struct {
|
type B2Client struct {
|
||||||
HTTPClient *http.Client
|
HTTPClient *http.Client
|
||||||
|
|
||||||
AccountID string
|
AccountID string
|
||||||
ApplicationKeyID string
|
ApplicationKeyID string
|
||||||
ApplicationKey string
|
ApplicationKey string
|
||||||
|
BucketName string
|
||||||
|
BucketID string
|
||||||
|
StorageDir string
|
||||||
|
|
||||||
|
Lock sync.Mutex
|
||||||
AuthorizationToken string
|
AuthorizationToken string
|
||||||
APIURL string
|
APIURL string
|
||||||
DownloadURL string
|
DownloadURL string
|
||||||
BucketName string
|
IsAuthorized bool
|
||||||
BucketID string
|
|
||||||
|
|
||||||
UploadURL string
|
UploadURLs []string
|
||||||
UploadToken string
|
UploadTokens []string
|
||||||
|
|
||||||
|
Threads int
|
||||||
|
MaximumRetries int
|
||||||
TestMode bool
|
TestMode bool
|
||||||
|
|
||||||
|
LastAuthorizationTime int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// URL encode the given path but keep the slashes intact
|
||||||
|
func B2Escape(path string) string {
|
||||||
|
var components []string
|
||||||
|
for _, c := range strings.Split(path, "/") {
|
||||||
|
components = append(components, url.QueryEscape(c))
|
||||||
|
}
|
||||||
|
return strings.Join(components, "/")
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewB2Client(applicationKeyID string, applicationKey string, downloadURL string, storageDir string, threads int) *B2Client {
|
||||||
|
|
||||||
|
for storageDir != "" && storageDir[0] == '/' {
|
||||||
|
storageDir = storageDir[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
if storageDir != "" && storageDir[len(storageDir) - 1] != '/' {
|
||||||
|
storageDir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
maximumRetries := 15
|
||||||
|
if value, found := os.LookupEnv("DUPLICACY_B2_RETRIES"); found && value != "" {
|
||||||
|
maximumRetries, _ = strconv.Atoi(value)
|
||||||
|
LOG_INFO("B2_RETRIES", "Setting maximum retries for B2 to %d", maximumRetries)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewB2Client(applicationKeyID string, applicationKey string) *B2Client {
|
|
||||||
client := &B2Client{
|
client := &B2Client{
|
||||||
HTTPClient: http.DefaultClient,
|
HTTPClient: http.DefaultClient,
|
||||||
ApplicationKeyID: applicationKeyID,
|
ApplicationKeyID: applicationKeyID,
|
||||||
ApplicationKey: applicationKey,
|
ApplicationKey: applicationKey,
|
||||||
|
DownloadURL: downloadURL,
|
||||||
|
StorageDir: storageDir,
|
||||||
|
UploadURLs: make([]string, threads),
|
||||||
|
UploadTokens: make([]string, threads),
|
||||||
|
Threads: threads,
|
||||||
|
MaximumRetries: maximumRetries,
|
||||||
}
|
}
|
||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) retry(backoff int, response *http.Response) int {
|
func (client *B2Client) getAPIURL() string {
|
||||||
|
client.Lock.Lock()
|
||||||
|
defer client.Lock.Unlock()
|
||||||
|
return client.APIURL
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *B2Client) getDownloadURL() string {
|
||||||
|
client.Lock.Lock()
|
||||||
|
defer client.Lock.Unlock()
|
||||||
|
return client.DownloadURL
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *B2Client) retry(retries int, response *http.Response) int {
|
||||||
if response != nil {
|
if response != nil {
|
||||||
if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 {
|
if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 {
|
||||||
retryAfter, _ := strconv.Atoi(backoffList[0])
|
retryAfter, _ := strconv.Atoi(backoffList[0])
|
||||||
if retryAfter >= 1 {
|
if retryAfter >= 1 {
|
||||||
time.Sleep(time.Duration(retryAfter) * time.Second)
|
time.Sleep(time.Duration(retryAfter) * time.Second)
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if retries >= client.MaximumRetries + 1 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
retries++
|
||||||
|
delay := 1 << uint(retries)
|
||||||
|
if delay > 64 {
|
||||||
|
delay = 64
|
||||||
}
|
}
|
||||||
}
|
delayInSeconds := (rand.Float32() + 1.0) * float32(delay) / 2.0
|
||||||
if backoff == 0 {
|
|
||||||
backoff = 1
|
time.Sleep(time.Duration(delayInSeconds) * time.Second)
|
||||||
} else {
|
return retries
|
||||||
backoff *= 2
|
|
||||||
}
|
|
||||||
time.Sleep(time.Duration(backoff) * time.Second)
|
|
||||||
return backoff
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) call(url string, method string, requestHeaders map[string]string, input interface{}) (io.ReadCloser, http.Header, int64, error) {
|
func (client *B2Client) call(threadIndex int, requestURL string, method string, requestHeaders map[string]string, input interface{}) (
|
||||||
|
io.ReadCloser, http.Header, int64, error) {
|
||||||
switch method {
|
|
||||||
case http.MethodGet:
|
|
||||||
break
|
|
||||||
case http.MethodHead:
|
|
||||||
break
|
|
||||||
case http.MethodPost:
|
|
||||||
break
|
|
||||||
default:
|
|
||||||
return nil, nil, 0, fmt.Errorf("unhandled http request method: " + method)
|
|
||||||
}
|
|
||||||
|
|
||||||
var response *http.Response
|
var response *http.Response
|
||||||
|
|
||||||
backoff := 0
|
retries := 0
|
||||||
for i := 0; i < 8; i++ {
|
for {
|
||||||
var inputReader *bytes.Reader
|
var inputReader io.Reader
|
||||||
|
isUpload := false
|
||||||
|
|
||||||
switch input.(type) {
|
switch input.(type) {
|
||||||
default:
|
default:
|
||||||
@@ -108,21 +159,43 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
|
|||||||
return nil, nil, 0, err
|
return nil, nil, 0, err
|
||||||
}
|
}
|
||||||
inputReader = bytes.NewReader(jsonInput)
|
inputReader = bytes.NewReader(jsonInput)
|
||||||
case []byte:
|
|
||||||
inputReader = bytes.NewReader(input.([]byte))
|
|
||||||
case int:
|
case int:
|
||||||
inputReader = bytes.NewReader([]byte(""))
|
inputReader = bytes.NewReader([]byte(""))
|
||||||
|
case []byte:
|
||||||
|
isUpload = true
|
||||||
|
inputReader = bytes.NewReader(input.([]byte))
|
||||||
|
case *RateLimitedReader:
|
||||||
|
isUpload = true
|
||||||
|
rateLimitedReader := input.(*RateLimitedReader)
|
||||||
|
rateLimitedReader.Reset()
|
||||||
|
inputReader = rateLimitedReader
|
||||||
}
|
}
|
||||||
|
|
||||||
request, err := http.NewRequest(method, url, inputReader)
|
|
||||||
|
if isUpload {
|
||||||
|
if client.UploadURLs[threadIndex] == "" || client.UploadTokens[threadIndex] == "" {
|
||||||
|
err := client.getUploadURL(threadIndex)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
requestURL = client.UploadURLs[threadIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
request, err := http.NewRequest(method, requestURL, inputReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, 0, err
|
return nil, nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if url == B2AuthorizationURL {
|
if requestURL == B2AuthorizationURL {
|
||||||
request.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(client.ApplicationKeyID+":"+client.ApplicationKey)))
|
request.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(client.ApplicationKeyID+":"+client.ApplicationKey)))
|
||||||
|
} else if isUpload {
|
||||||
|
request.ContentLength, _ = strconv.ParseInt(requestHeaders["Content-Length"], 10, 64)
|
||||||
|
request.Header.Set("Authorization", client.UploadTokens[threadIndex])
|
||||||
} else {
|
} else {
|
||||||
|
client.Lock.Lock()
|
||||||
request.Header.Set("Authorization", client.AuthorizationToken)
|
request.Header.Set("Authorization", client.AuthorizationToken)
|
||||||
|
client.Lock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
if requestHeaders != nil {
|
if requestHeaders != nil {
|
||||||
@@ -133,7 +206,9 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
|
|||||||
|
|
||||||
if client.TestMode {
|
if client.TestMode {
|
||||||
r := rand.Float32()
|
r := rand.Float32()
|
||||||
if r < 0.5 {
|
if r < 0.5 && isUpload {
|
||||||
|
request.Header.Set("X-Bz-Test-Mode", "fail_some_uploads")
|
||||||
|
} else if r < 0.75 {
|
||||||
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
|
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
|
||||||
} else {
|
} else {
|
||||||
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
|
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
|
||||||
@@ -142,28 +217,51 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
|
|||||||
|
|
||||||
response, err = client.HTTPClient.Do(request)
|
response, err = client.HTTPClient.Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if url != B2AuthorizationURL {
|
|
||||||
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned an error: %v", url, err)
|
// Don't retry when the first authorization request fails
|
||||||
backoff = client.retry(backoff, response)
|
if requestURL == B2AuthorizationURL && !client.IsAuthorized {
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil, nil, 0, err
|
return nil, nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s' returned an error: %v", threadIndex, requestURL, err)
|
||||||
|
|
||||||
|
retries = client.retry(retries, response)
|
||||||
|
if retries <= 0 {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear the upload url to requrest a new one on retry
|
||||||
|
if isUpload {
|
||||||
|
client.UploadURLs[threadIndex] = ""
|
||||||
|
client.UploadTokens[threadIndex] = ""
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
if response.StatusCode < 300 {
|
if response.StatusCode < 300 {
|
||||||
return response.Body, response.Header, response.ContentLength, nil
|
return response.Body, response.Header, response.ContentLength, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s %s' returned status code %d", method, url, response.StatusCode)
|
e := &B2Error{}
|
||||||
|
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
|
||||||
|
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s %s' returned status code %d", threadIndex, method, requestURL, response.StatusCode)
|
||||||
|
} else {
|
||||||
|
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s %s' returned %d %s", threadIndex, method, requestURL, response.StatusCode, e.Message)
|
||||||
|
}
|
||||||
|
|
||||||
io.Copy(ioutil.Discard, response.Body)
|
|
||||||
response.Body.Close()
|
response.Body.Close()
|
||||||
|
|
||||||
if response.StatusCode == 401 {
|
if response.StatusCode == 401 {
|
||||||
if url == B2AuthorizationURL {
|
if requestURL == B2AuthorizationURL {
|
||||||
return nil, nil, 0, fmt.Errorf("Authorization failure")
|
return nil, nil, 0, fmt.Errorf("Authorization failure")
|
||||||
}
|
}
|
||||||
client.AuthorizeAccount()
|
|
||||||
|
// Attempt authorization again. If authorization is actually not done, run the random backoff
|
||||||
|
_, allowed := client.AuthorizeAccount(threadIndex)
|
||||||
|
if allowed {
|
||||||
continue
|
continue
|
||||||
|
}
|
||||||
} else if response.StatusCode == 403 {
|
} else if response.StatusCode == 403 {
|
||||||
if !client.TestMode {
|
if !client.TestMode {
|
||||||
return nil, nil, 0, fmt.Errorf("B2 cap exceeded")
|
return nil, nil, 0, fmt.Errorf("B2 cap exceeded")
|
||||||
@@ -176,32 +274,21 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
|
|||||||
} else if response.StatusCode == 416 {
|
} else if response.StatusCode == 416 {
|
||||||
if http.MethodHead == method {
|
if http.MethodHead == method {
|
||||||
// 416 Requested Range Not Satisfiable
|
// 416 Requested Range Not Satisfiable
|
||||||
return nil, nil, 0, fmt.Errorf("URL request '%s' returned status code %d", url, response.StatusCode)
|
return nil, nil, 0, fmt.Errorf("URL request '%s' returned %d %s", requestURL, response.StatusCode, e.Message)
|
||||||
}
|
}
|
||||||
} else if response.StatusCode == 429 || response.StatusCode == 408 {
|
|
||||||
backoff = client.retry(backoff, response)
|
|
||||||
continue
|
|
||||||
} else if response.StatusCode >= 500 && response.StatusCode <= 599 {
|
|
||||||
backoff = client.retry(backoff, response)
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
LOG_INFO("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
|
|
||||||
backoff = client.retry(backoff, response)
|
|
||||||
continue
|
|
||||||
}
|
}
|
||||||
|
|
||||||
defer response.Body.Close()
|
retries = client.retry(retries, response)
|
||||||
|
if retries <= 0 {
|
||||||
e := &B2Error{}
|
return nil, nil, 0, fmt.Errorf("URL request '%s' returned %d %s", requestURL, response.StatusCode, e.Message)
|
||||||
|
|
||||||
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
|
|
||||||
return nil, nil, 0, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil, 0, e
|
if isUpload {
|
||||||
|
client.UploadURLs[threadIndex] = ""
|
||||||
|
client.UploadTokens[threadIndex] = ""
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, nil, 0, fmt.Errorf("Maximum backoff reached")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type B2AuthorizeAccountOutput struct {
|
type B2AuthorizeAccountOutput struct {
|
||||||
@@ -211,11 +298,18 @@ type B2AuthorizeAccountOutput struct {
|
|||||||
DownloadURL string
|
DownloadURL string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) AuthorizeAccount() (err error) {
|
func (client *B2Client) AuthorizeAccount(threadIndex int) (err error, allowed bool) {
|
||||||
|
client.Lock.Lock()
|
||||||
|
defer client.Lock.Unlock()
|
||||||
|
|
||||||
readCloser, _, _, err := client.call(B2AuthorizationURL, http.MethodPost, nil, make(map[string]string))
|
// Don't authorize if the previous one was done less than 30 seconds ago
|
||||||
|
if client.LastAuthorizationTime != 0 && client.LastAuthorizationTime > time.Now().Unix() - 30 {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser, _, _, err := client.call(threadIndex, B2AuthorizationURL, http.MethodPost, nil, make(map[string]string))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err, true
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
@@ -223,7 +317,7 @@ func (client *B2Client) AuthorizeAccount() (err error) {
|
|||||||
output := &B2AuthorizeAccountOutput{}
|
output := &B2AuthorizeAccountOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return err
|
return err, true
|
||||||
}
|
}
|
||||||
|
|
||||||
// The account id may be different from the application key id so we're getting the account id from the returned
|
// The account id may be different from the application key id so we're getting the account id from the returned
|
||||||
@@ -232,9 +326,15 @@ func (client *B2Client) AuthorizeAccount() (err error) {
|
|||||||
|
|
||||||
client.AuthorizationToken = output.AuthorizationToken
|
client.AuthorizationToken = output.AuthorizationToken
|
||||||
client.APIURL = output.APIURL
|
client.APIURL = output.APIURL
|
||||||
|
if client.DownloadURL == "" {
|
||||||
client.DownloadURL = output.DownloadURL
|
client.DownloadURL = output.DownloadURL
|
||||||
|
}
|
||||||
|
LOG_INFO("BACKBLAZE_URL", "download URL is: %s", client.DownloadURL)
|
||||||
|
client.IsAuthorized = true
|
||||||
|
|
||||||
return nil
|
client.LastAuthorizationTime = time.Now().Unix()
|
||||||
|
|
||||||
|
return nil, true
|
||||||
}
|
}
|
||||||
|
|
||||||
type ListBucketOutput struct {
|
type ListBucketOutput struct {
|
||||||
@@ -248,10 +348,11 @@ func (client *B2Client) FindBucket(bucketName string) (err error) {
|
|||||||
|
|
||||||
input := make(map[string]string)
|
input := make(map[string]string)
|
||||||
input["accountId"] = client.AccountID
|
input["accountId"] = client.AccountID
|
||||||
|
input["bucketName"] = bucketName
|
||||||
|
|
||||||
url := client.APIURL + "/b2api/v1/b2_list_buckets"
|
url := client.getAPIURL() + "/b2api/v1/b2_list_buckets"
|
||||||
|
|
||||||
readCloser, _, _, err := client.call(url, http.MethodPost, nil, input)
|
readCloser, _, _, err := client.call(0, url, http.MethodPost, nil, input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -293,7 +394,7 @@ type B2ListFileNamesOutput struct {
|
|||||||
NextFileId string
|
NextFileId string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) ListFileNames(startFileName string, singleFile bool, includeVersions bool) (files []*B2Entry, err error) {
|
func (client *B2Client) ListFileNames(threadIndex int, startFileName string, singleFile bool, includeVersions bool) (files []*B2Entry, err error) {
|
||||||
|
|
||||||
maxFileCount := 1000
|
maxFileCount := 1000
|
||||||
if singleFile {
|
if singleFile {
|
||||||
@@ -311,20 +412,21 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
|||||||
|
|
||||||
input := make(map[string]interface{})
|
input := make(map[string]interface{})
|
||||||
input["bucketId"] = client.BucketID
|
input["bucketId"] = client.BucketID
|
||||||
input["startFileName"] = startFileName
|
input["startFileName"] = client.StorageDir + startFileName
|
||||||
input["maxFileCount"] = maxFileCount
|
input["maxFileCount"] = maxFileCount
|
||||||
|
input["prefix"] = client.StorageDir
|
||||||
|
|
||||||
for {
|
for {
|
||||||
url := client.APIURL + "/b2api/v1/b2_list_file_names"
|
apiURL := client.getAPIURL() + "/b2api/v1/b2_list_file_names"
|
||||||
requestHeaders := map[string]string{}
|
requestHeaders := map[string]string{}
|
||||||
requestMethod := http.MethodPost
|
requestMethod := http.MethodPost
|
||||||
var requestInput interface{}
|
var requestInput interface{}
|
||||||
requestInput = input
|
requestInput = input
|
||||||
if includeVersions {
|
if includeVersions {
|
||||||
url = client.APIURL + "/b2api/v1/b2_list_file_versions"
|
apiURL = client.getAPIURL() + "/b2api/v1/b2_list_file_versions"
|
||||||
} else if singleFile {
|
} else if singleFile {
|
||||||
// handle a single file with no versions as a special case to download the last byte of the file
|
// handle a single file with no versions as a special case to download the last byte of the file
|
||||||
url = client.DownloadURL + "/file/" + client.BucketName + "/" + startFileName
|
apiURL = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + startFileName)
|
||||||
// requesting byte -1 works for empty files where 0-0 fails with a 416 error
|
// requesting byte -1 works for empty files where 0-0 fails with a 416 error
|
||||||
requestHeaders["Range"] = "bytes=-1"
|
requestHeaders["Range"] = "bytes=-1"
|
||||||
// HEAD request
|
// HEAD request
|
||||||
@@ -334,7 +436,7 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
|||||||
var readCloser io.ReadCloser
|
var readCloser io.ReadCloser
|
||||||
var responseHeader http.Header
|
var responseHeader http.Header
|
||||||
var err error
|
var err error
|
||||||
readCloser, responseHeader, _, err = client.call(url, requestMethod, requestHeaders, requestInput)
|
readCloser, responseHeader, _, err = client.call(threadIndex, apiURL, requestMethod, requestHeaders, requestInput)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -347,7 +449,7 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
|||||||
|
|
||||||
if singleFile && !includeVersions {
|
if singleFile && !includeVersions {
|
||||||
if responseHeader == nil {
|
if responseHeader == nil {
|
||||||
LOG_DEBUG("BACKBLAZE_LIST", "b2_download_file_by_name did not return headers")
|
LOG_DEBUG("BACKBLAZE_LIST", "%s did not return headers", apiURL)
|
||||||
return []*B2Entry{}, nil
|
return []*B2Entry{}, nil
|
||||||
}
|
}
|
||||||
requiredHeaders := []string{
|
requiredHeaders := []string{
|
||||||
@@ -361,11 +463,17 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(missingKeys) > 0 {
|
if len(missingKeys) > 0 {
|
||||||
return nil, fmt.Errorf("b2_download_file_by_name missing headers: %s", missingKeys)
|
return nil, fmt.Errorf("%s missing headers: %s", apiURL, missingKeys)
|
||||||
}
|
}
|
||||||
// construct the B2Entry from the response headers of the download request
|
// construct the B2Entry from the response headers of the download request
|
||||||
fileID := responseHeader.Get("x-bz-file-id")
|
fileID := responseHeader.Get("x-bz-file-id")
|
||||||
fileName := responseHeader.Get("x-bz-file-name")
|
fileName := responseHeader.Get("x-bz-file-name")
|
||||||
|
unescapedFileName, err := url.QueryUnescape(fileName)
|
||||||
|
if err == nil {
|
||||||
|
fileName = unescapedFileName
|
||||||
|
} else {
|
||||||
|
LOG_WARN("BACKBLAZE_UNESCAPE", "Failed to unescape the file name %s", fileName)
|
||||||
|
}
|
||||||
fileAction := "upload"
|
fileAction := "upload"
|
||||||
// byte range that is returned: "bytes #-#/#
|
// byte range that is returned: "bytes #-#/#
|
||||||
rangeString := responseHeader.Get("Content-Range")
|
rangeString := responseHeader.Get("Content-Range")
|
||||||
@@ -378,14 +486,14 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
|||||||
// this should only execute if the requested file is empty and the range request didn't result in a Content-Range header
|
// this should only execute if the requested file is empty and the range request didn't result in a Content-Range header
|
||||||
fileSize, _ = strconv.ParseInt(lengthString, 0, 64)
|
fileSize, _ = strconv.ParseInt(lengthString, 0, 64)
|
||||||
if fileSize != 0 {
|
if fileSize != 0 {
|
||||||
return nil, fmt.Errorf("b2_download_file_by_name returned non-zero file length")
|
return nil, fmt.Errorf("%s returned non-zero file length", apiURL)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf("could not parse b2_download_file_by_name headers")
|
return nil, fmt.Errorf("could not parse headers returned by %s", apiURL)
|
||||||
}
|
}
|
||||||
fileUploadTimestamp, _ := strconv.ParseInt(responseHeader.Get("X-Bz-Upload-Timestamp"), 0, 64)
|
fileUploadTimestamp, _ := strconv.ParseInt(responseHeader.Get("X-Bz-Upload-Timestamp"), 0, 64)
|
||||||
|
|
||||||
return []*B2Entry{&B2Entry{fileID, fileName, fileAction, fileSize, fileUploadTimestamp}}, nil
|
return []*B2Entry{{fileID, fileName[len(client.StorageDir):], fileAction, fileSize, fileUploadTimestamp}}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
@@ -394,10 +502,8 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
|||||||
|
|
||||||
ioutil.ReadAll(readCloser)
|
ioutil.ReadAll(readCloser)
|
||||||
|
|
||||||
if startFileName == "" {
|
|
||||||
files = append(files, output.Files...)
|
|
||||||
} else {
|
|
||||||
for _, file := range output.Files {
|
for _, file := range output.Files {
|
||||||
|
file.FileName = file.FileName[len(client.StorageDir):]
|
||||||
if singleFile {
|
if singleFile {
|
||||||
if file.FileName == startFileName {
|
if file.FileName == startFileName {
|
||||||
files = append(files, file)
|
files = append(files, file)
|
||||||
@@ -419,8 +525,6 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(output.NextFileName) == 0 {
|
if len(output.NextFileName) == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@@ -434,14 +538,14 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
|||||||
return files, nil
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) DeleteFile(fileName string, fileID string) (err error) {
|
func (client *B2Client) DeleteFile(threadIndex int, fileName string, fileID string) (err error) {
|
||||||
|
|
||||||
input := make(map[string]string)
|
input := make(map[string]string)
|
||||||
input["fileName"] = fileName
|
input["fileName"] = client.StorageDir + fileName
|
||||||
input["fileId"] = fileID
|
input["fileId"] = fileID
|
||||||
|
|
||||||
url := client.APIURL + "/b2api/v1/b2_delete_file_version"
|
url := client.getAPIURL() + "/b2api/v1/b2_delete_file_version"
|
||||||
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
|
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -454,14 +558,14 @@ type B2HideFileOutput struct {
|
|||||||
FileID string
|
FileID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) HideFile(fileName string) (fileID string, err error) {
|
func (client *B2Client) HideFile(threadIndex int, fileName string) (fileID string, err error) {
|
||||||
|
|
||||||
input := make(map[string]string)
|
input := make(map[string]string)
|
||||||
input["bucketId"] = client.BucketID
|
input["bucketId"] = client.BucketID
|
||||||
input["fileName"] = fileName
|
input["fileName"] = client.StorageDir + fileName
|
||||||
|
|
||||||
url := client.APIURL + "/b2api/v1/b2_hide_file"
|
url := client.getAPIURL() + "/b2api/v1/b2_hide_file"
|
||||||
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
|
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -478,11 +582,11 @@ func (client *B2Client) HideFile(fileName string) (fileID string, err error) {
|
|||||||
return output.FileID, nil
|
return output.FileID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) DownloadFile(filePath string) (io.ReadCloser, int64, error) {
|
func (client *B2Client) DownloadFile(threadIndex int, filePath string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
url := client.DownloadURL + "/file/" + client.BucketName + "/" + filePath
|
url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + filePath)
|
||||||
|
|
||||||
readCloser, _, len, err := client.call(url, http.MethodGet, make(map[string]string), 0)
|
readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0)
|
||||||
return readCloser, len, err
|
return readCloser, len, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -492,12 +596,12 @@ type B2GetUploadArgumentOutput struct {
|
|||||||
AuthorizationToken string
|
AuthorizationToken string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) getUploadURL() error {
|
func (client *B2Client) getUploadURL(threadIndex int) error {
|
||||||
input := make(map[string]string)
|
input := make(map[string]string)
|
||||||
input["bucketId"] = client.BucketID
|
input["bucketId"] = client.BucketID
|
||||||
|
|
||||||
url := client.APIURL + "/b2api/v1/b2_get_upload_url"
|
url := client.getAPIURL() + "/b2api/v1/b2_get_upload_url"
|
||||||
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
|
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -510,96 +614,29 @@ func (client *B2Client) getUploadURL() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
client.UploadURL = output.UploadURL
|
client.UploadURLs[threadIndex] = output.UploadURL
|
||||||
client.UploadToken = output.AuthorizationToken
|
client.UploadTokens[threadIndex] = output.AuthorizationToken
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit int) (err error) {
|
func (client *B2Client) UploadFile(threadIndex int, filePath string, content []byte, rateLimit int) (err error) {
|
||||||
|
|
||||||
hasher := sha1.New()
|
hasher := sha1.New()
|
||||||
hasher.Write(content)
|
hasher.Write(content)
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
headers := make(map[string]string)
|
headers := make(map[string]string)
|
||||||
headers["X-Bz-File-Name"] = filePath
|
headers["X-Bz-File-Name"] = B2Escape(client.StorageDir + filePath)
|
||||||
|
headers["Content-Length"] = fmt.Sprintf("%d", len(content))
|
||||||
headers["Content-Type"] = "application/octet-stream"
|
headers["Content-Type"] = "application/octet-stream"
|
||||||
headers["X-Bz-Content-Sha1"] = hash
|
headers["X-Bz-Content-Sha1"] = hash
|
||||||
|
|
||||||
var response *http.Response
|
readCloser, _, _, err := client.call(threadIndex, "", http.MethodPost, headers, CreateRateLimitedReader(content, rateLimit))
|
||||||
|
|
||||||
backoff := 0
|
|
||||||
for i := 0; i < 8; i++ {
|
|
||||||
|
|
||||||
if client.UploadURL == "" || client.UploadToken == "" {
|
|
||||||
err = client.getUploadURL()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
request, err := http.NewRequest("POST", client.UploadURL, CreateRateLimitedReader(content, rateLimit))
|
readCloser.Close()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
request.ContentLength = int64(len(content))
|
|
||||||
|
|
||||||
request.Header.Set("Authorization", client.UploadToken)
|
|
||||||
request.Header.Set("X-Bz-File-Name", filePath)
|
|
||||||
request.Header.Set("Content-Type", "application/octet-stream")
|
|
||||||
request.Header.Set("X-Bz-Content-Sha1", hash)
|
|
||||||
|
|
||||||
for key, value := range headers {
|
|
||||||
request.Header.Set(key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
if client.TestMode {
|
|
||||||
r := rand.Float32()
|
|
||||||
if r < 0.8 {
|
|
||||||
request.Header.Set("X-Bz-Test-Mode", "fail_some_uploads")
|
|
||||||
} else if r < 0.9 {
|
|
||||||
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
|
|
||||||
} else {
|
|
||||||
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err = client.HTTPClient.Do(request)
|
|
||||||
if err != nil {
|
|
||||||
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned an error: %v", client.UploadURL, err)
|
|
||||||
backoff = client.retry(backoff, response)
|
|
||||||
client.UploadURL = ""
|
|
||||||
client.UploadToken = ""
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
io.Copy(ioutil.Discard, response.Body)
|
|
||||||
response.Body.Close()
|
|
||||||
|
|
||||||
if response.StatusCode < 300 {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
|
|
||||||
|
|
||||||
if response.StatusCode == 401 {
|
|
||||||
LOG_INFO("BACKBLAZE_UPLOAD", "Re-authorization required")
|
|
||||||
client.UploadURL = ""
|
|
||||||
client.UploadToken = ""
|
|
||||||
continue
|
|
||||||
} else if response.StatusCode == 403 {
|
|
||||||
if !client.TestMode {
|
|
||||||
return fmt.Errorf("B2 cap exceeded")
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
LOG_INFO("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
|
|
||||||
backoff = client.retry(backoff, response)
|
|
||||||
client.UploadURL = ""
|
|
||||||
client.UploadToken = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Maximum backoff reached")
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ func createB2ClientForTest(t *testing.T) (*B2Client, string) {
|
|||||||
return nil, ""
|
return nil, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewB2Client(b2["account"], b2["key"]), b2["bucket"]
|
return NewB2Client(b2["account"], b2["key"], "", b2["directory"], 1), b2["bucket"]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -50,7 +50,7 @@ func TestB2Client(t *testing.T) {
|
|||||||
|
|
||||||
b2Client.TestMode = true
|
b2Client.TestMode = true
|
||||||
|
|
||||||
err := b2Client.AuthorizeAccount()
|
err, _ := b2Client.AuthorizeAccount(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to authorize the b2 account: %v", err)
|
t.Errorf("Failed to authorize the b2 account: %v", err)
|
||||||
return
|
return
|
||||||
@@ -64,14 +64,14 @@ func TestB2Client(t *testing.T) {
|
|||||||
|
|
||||||
testDirectory := "b2client_test/"
|
testDirectory := "b2client_test/"
|
||||||
|
|
||||||
files, err := b2Client.ListFileNames(testDirectory, false, false)
|
files, err := b2Client.ListFileNames(0, testDirectory, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list files: %v", err)
|
t.Errorf("Failed to list files: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
err = b2Client.DeleteFile(file.FileName, file.FileID)
|
err = b2Client.DeleteFile(0, file.FileName, file.FileID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
|
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
|
||||||
}
|
}
|
||||||
@@ -90,14 +90,14 @@ func TestB2Client(t *testing.T) {
|
|||||||
hash := sha256.Sum256(content)
|
hash := sha256.Sum256(content)
|
||||||
name := hex.EncodeToString(hash[:])
|
name := hex.EncodeToString(hash[:])
|
||||||
|
|
||||||
err = b2Client.UploadFile(testDirectory+name, content, 100)
|
err = b2Client.UploadFile(0, testDirectory+name, content, 100)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error uploading file '%s': %v", name, err)
|
t.Errorf("Error uploading file '%s': %v", name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
files, err = b2Client.ListFileNames(testDirectory, false, false)
|
files, err = b2Client.ListFileNames(0, testDirectory, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list files: %v", err)
|
t.Errorf("Failed to list files: %v", err)
|
||||||
return
|
return
|
||||||
@@ -105,7 +105,7 @@ func TestB2Client(t *testing.T) {
|
|||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
|
|
||||||
readCloser, _, err := b2Client.DownloadFile(file.FileName)
|
readCloser, _, err := b2Client.DownloadFile(0, file.FileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error downloading file '%s': %v", file.FileName, err)
|
t.Errorf("Error downloading file '%s': %v", file.FileName, err)
|
||||||
return
|
return
|
||||||
@@ -125,7 +125,7 @@ func TestB2Client(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
err = b2Client.DeleteFile(file.FileName, file.FileID)
|
err = b2Client.DeleteFile(0, file.FileName, file.FileID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
|
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,18 +11,15 @@ import (
|
|||||||
type B2Storage struct {
|
type B2Storage struct {
|
||||||
StorageBase
|
StorageBase
|
||||||
|
|
||||||
clients []*B2Client
|
client *B2Client
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateB2Storage creates a B2 storage object.
|
// CreateB2Storage creates a B2 storage object.
|
||||||
func CreateB2Storage(accountID string, applicationKey string, bucket string, threads int) (storage *B2Storage, err error) {
|
func CreateB2Storage(accountID string, applicationKey string, downloadURL string, bucket string, storageDir string, threads int) (storage *B2Storage, err error) {
|
||||||
|
|
||||||
var clients []*B2Client
|
client := NewB2Client(accountID, applicationKey, downloadURL, storageDir, threads)
|
||||||
|
|
||||||
for i := 0; i < threads; i++ {
|
err, _ = client.AuthorizeAccount(0)
|
||||||
client := NewB2Client(accountID, applicationKey)
|
|
||||||
|
|
||||||
err = client.AuthorizeAccount()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -32,11 +29,8 @@ func CreateB2Storage(accountID string, applicationKey string, bucket string, thr
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
clients = append(clients, client)
|
|
||||||
}
|
|
||||||
|
|
||||||
storage = &B2Storage{
|
storage = &B2Storage{
|
||||||
clients: clients,
|
client: client,
|
||||||
}
|
}
|
||||||
|
|
||||||
storage.DerivedStorage = storage
|
storage.DerivedStorage = storage
|
||||||
@@ -56,7 +50,7 @@ func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string
|
|||||||
includeVersions = true
|
includeVersions = true
|
||||||
}
|
}
|
||||||
|
|
||||||
entries, err := storage.clients[threadIndex].ListFileNames(dir, false, includeVersions)
|
entries, err := storage.client.ListFileNames(threadIndex, dir, false, includeVersions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -71,7 +65,7 @@ func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string
|
|||||||
subDirs[subDir+"/"] = true
|
subDirs[subDir+"/"] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
for subDir, _ := range subDirs {
|
for subDir := range subDirs {
|
||||||
files = append(files, subDir)
|
files = append(files, subDir)
|
||||||
}
|
}
|
||||||
} else if dir == "chunks" {
|
} else if dir == "chunks" {
|
||||||
@@ -102,7 +96,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
|
|||||||
|
|
||||||
if strings.HasSuffix(filePath, ".fsl") {
|
if strings.HasSuffix(filePath, ".fsl") {
|
||||||
filePath = filePath[:len(filePath)-len(".fsl")]
|
filePath = filePath[:len(filePath)-len(".fsl")]
|
||||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
|
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -116,7 +110,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
|
|||||||
|
|
||||||
toBeDeleted = true
|
toBeDeleted = true
|
||||||
|
|
||||||
err = storage.clients[threadIndex].DeleteFile(filePath, entry.FileID)
|
err = storage.client.DeleteFile(threadIndex, filePath, entry.FileID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -125,7 +119,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
|
|||||||
return nil
|
return nil
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, false)
|
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -133,7 +127,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
|
|||||||
if len(entries) == 0 {
|
if len(entries) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
|
return storage.client.DeleteFile(threadIndex, filePath, entries[0].FileID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -160,10 +154,10 @@ func (storage *B2Storage) MoveFile(threadIndex int, from string, to string) (err
|
|||||||
}
|
}
|
||||||
|
|
||||||
if filePath == from {
|
if filePath == from {
|
||||||
_, err = storage.clients[threadIndex].HideFile(from)
|
_, err = storage.client.HideFile(threadIndex, from)
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
|
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -171,7 +165,7 @@ func (storage *B2Storage) MoveFile(threadIndex int, from string, to string) (err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
|
return storage.client.DeleteFile(threadIndex, filePath, entries[0].FileID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -188,7 +182,7 @@ func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist b
|
|||||||
filePath = filePath[:len(filePath)-len(".fsl")]
|
filePath = filePath[:len(filePath)-len(".fsl")]
|
||||||
}
|
}
|
||||||
|
|
||||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, isFossil)
|
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, isFossil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, false, 0, err
|
return false, false, 0, err
|
||||||
}
|
}
|
||||||
@@ -210,22 +204,20 @@ func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist b
|
|||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
filePath = strings.Replace(filePath, " ", "%20", -1)
|
readCloser, _, err := storage.client.DownloadFile(threadIndex, filePath)
|
||||||
readCloser, _, err := storage.clients[threadIndex].DownloadFile(filePath)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.clients))
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.client.Threads)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
filePath = strings.Replace(filePath, " ", "%20", -1)
|
return storage.client.UploadFile(threadIndex, filePath, content, storage.UploadRateLimit/storage.client.Threads)
|
||||||
return storage.clients[threadIndex].UploadFile(filePath, content, storage.UploadRateLimit/len(storage.clients))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
@@ -243,7 +235,5 @@ func (storage *B2Storage) IsFastListing() bool { return true }
|
|||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *B2Storage) EnableTestMode() {
|
func (storage *B2Storage) EnableTestMode() {
|
||||||
for _, client := range storage.clients {
|
storage.client.TestMode = true
|
||||||
client.TestMode = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -36,7 +36,10 @@ type BackupManager struct {
|
|||||||
|
|
||||||
nobackupFile string // don't backup directory when this file name is found
|
nobackupFile string // don't backup directory when this file name is found
|
||||||
|
|
||||||
|
filtersFile string // the path to the filters file
|
||||||
|
|
||||||
excludeByAttribute bool // don't backup file based on file attribute
|
excludeByAttribute bool // don't backup file based on file attribute
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (manager *BackupManager) SetDryRun(dryRun bool) {
|
func (manager *BackupManager) SetDryRun(dryRun bool) {
|
||||||
@@ -46,7 +49,7 @@ func (manager *BackupManager) SetDryRun(dryRun bool) {
|
|||||||
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
|
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
|
||||||
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
|
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
|
||||||
// master key which can be nil if encryption is not enabled.
|
// master key which can be nil if encryption is not enabled.
|
||||||
func CreateBackupManager(snapshotID string, storage Storage, top string, password string, nobackupFile string, excludeByAttribute bool) *BackupManager {
|
func CreateBackupManager(snapshotID string, storage Storage, top string, password string, nobackupFile string, filtersFile string, excludeByAttribute bool) *BackupManager {
|
||||||
|
|
||||||
config, _, err := DownloadConfig(storage, password)
|
config, _, err := DownloadConfig(storage, password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -70,6 +73,8 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
|
|||||||
|
|
||||||
nobackupFile: nobackupFile,
|
nobackupFile: nobackupFile,
|
||||||
|
|
||||||
|
filtersFile: filtersFile,
|
||||||
|
|
||||||
excludeByAttribute: excludeByAttribute,
|
excludeByAttribute: excludeByAttribute,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -80,6 +85,11 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
|
|||||||
return backupManager
|
return backupManager
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// loadRSAPrivateKey loads the specifed private key file for decrypting file chunks
|
||||||
|
func (manager *BackupManager) LoadRSAPrivateKey(keyFile string, passphrase string) {
|
||||||
|
manager.config.loadRSAPrivateKey(keyFile, passphrase)
|
||||||
|
}
|
||||||
|
|
||||||
// SetupSnapshotCache creates the snapshot cache, which is merely a local storage under the default .duplicacy
|
// SetupSnapshotCache creates the snapshot cache, which is merely a local storage under the default .duplicacy
|
||||||
// directory
|
// directory
|
||||||
func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
|
func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
|
||||||
@@ -107,6 +117,7 @@ func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// setEntryContent sets the 4 content pointers for each entry in 'entries'. 'offset' indicates the value
|
// setEntryContent sets the 4 content pointers for each entry in 'entries'. 'offset' indicates the value
|
||||||
// to be added to the StartChunk and EndChunk points, used when intending to append 'entries' to the
|
// to be added to the StartChunk and EndChunk points, used when intending to append 'entries' to the
|
||||||
// original unchanged entry list.
|
// original unchanged entry list.
|
||||||
@@ -180,6 +191,15 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
|
|
||||||
LOG_DEBUG("BACKUP_PARAMETERS", "top: %s, quick: %t, tag: %s", top, quickMode, tag)
|
LOG_DEBUG("BACKUP_PARAMETERS", "top: %s, quick: %t, tag: %s", top, quickMode, tag)
|
||||||
|
|
||||||
|
if manager.config.DataShards != 0 && manager.config.ParityShards != 0 {
|
||||||
|
LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled with %d data shards and %d parity shards",
|
||||||
|
manager.config.DataShards, manager.config.ParityShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
if manager.config.rsaPublicKey != nil && len(manager.config.FileKey) > 0 {
|
||||||
|
LOG_INFO("BACKUP_KEY", "RSA encryption is enabled")
|
||||||
|
}
|
||||||
|
|
||||||
remoteSnapshot := manager.SnapshotManager.downloadLatestSnapshot(manager.snapshotID)
|
remoteSnapshot := manager.SnapshotManager.downloadLatestSnapshot(manager.snapshotID)
|
||||||
if remoteSnapshot == nil {
|
if remoteSnapshot == nil {
|
||||||
remoteSnapshot = CreateEmptySnapshot(manager.snapshotID)
|
remoteSnapshot = CreateEmptySnapshot(manager.snapshotID)
|
||||||
@@ -192,7 +212,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
defer DeleteShadowCopy()
|
defer DeleteShadowCopy()
|
||||||
|
|
||||||
LOG_INFO("BACKUP_INDEXING", "Indexing %s", top)
|
LOG_INFO("BACKUP_INDEXING", "Indexing %s", top)
|
||||||
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop, manager.nobackupFile, manager.excludeByAttribute)
|
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop,
|
||||||
|
manager.nobackupFile, manager.filtersFile, manager.excludeByAttribute)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
|
||||||
return false
|
return false
|
||||||
@@ -202,6 +223,11 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(localSnapshot.Files) == 0 {
|
||||||
|
LOG_ERROR("SNAPSHOT_EMPTY", "No files under the repository to be backed up")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// This cache contains all chunks referenced by last snasphot. Any other chunks will lead to a call to
|
// This cache contains all chunks referenced by last snasphot. Any other chunks will lead to a call to
|
||||||
// UploadChunk.
|
// UploadChunk.
|
||||||
chunkCache := make(map[string]bool)
|
chunkCache := make(map[string]bool)
|
||||||
@@ -506,6 +532,11 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
chunkID := chunk.GetID()
|
chunkID := chunk.GetID()
|
||||||
chunkSize := chunk.GetLength()
|
chunkSize := chunk.GetLength()
|
||||||
|
|
||||||
|
if chunkSize == 0 {
|
||||||
|
LOG_DEBUG("CHUNK_EMPTY", "Ignored chunk %s of size 0", chunkID)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
chunkIndex++
|
chunkIndex++
|
||||||
|
|
||||||
_, found := chunkCache[chunkID]
|
_, found := chunkCache[chunkID]
|
||||||
@@ -727,7 +758,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
// the same as 'top'. 'quickMode' will bypass files with unchanged sizes and timestamps. 'deleteMode' will
|
// the same as 'top'. 'quickMode' will bypass files with unchanged sizes and timestamps. 'deleteMode' will
|
||||||
// remove local files that don't exist in the snapshot. 'patterns' is used to include/exclude certain files.
|
// remove local files that don't exist in the snapshot. 'patterns' is used to include/exclude certain files.
|
||||||
func (manager *BackupManager) Restore(top string, revision int, inPlace bool, quickMode bool, threads int, overwrite bool,
|
func (manager *BackupManager) Restore(top string, revision int, inPlace bool, quickMode bool, threads int, overwrite bool,
|
||||||
deleteMode bool, setOwner bool, showStatistics bool, patterns []string) bool {
|
deleteMode bool, setOwner bool, showStatistics bool, patterns []string, allowFailures bool) int {
|
||||||
|
|
||||||
startTime := time.Now().Unix()
|
startTime := time.Now().Unix()
|
||||||
|
|
||||||
@@ -750,7 +781,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
err = os.Mkdir(top, 0744)
|
err = os.Mkdir(top, 0744)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("RESTORE_MKDIR", "Can't create the directory to be restored: %v", err)
|
LOG_ERROR("RESTORE_MKDIR", "Can't create the directory to be restored: %v", err)
|
||||||
return false
|
return 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -758,16 +789,17 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
err = os.Mkdir(path.Join(top, DUPLICACY_DIRECTORY), 0744)
|
err = os.Mkdir(path.Join(top, DUPLICACY_DIRECTORY), 0744)
|
||||||
if err != nil && !os.IsExist(err) {
|
if err != nil && !os.IsExist(err) {
|
||||||
LOG_ERROR("RESTORE_MKDIR", "Failed to create the preference directory: %v", err)
|
LOG_ERROR("RESTORE_MKDIR", "Failed to create the preference directory: %v", err)
|
||||||
return false
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision)
|
remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision)
|
||||||
manager.SnapshotManager.DownloadSnapshotContents(remoteSnapshot, patterns, true)
|
manager.SnapshotManager.DownloadSnapshotContents(remoteSnapshot, patterns, true)
|
||||||
|
|
||||||
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile, manager.excludeByAttribute)
|
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile,
|
||||||
|
manager.filtersFile, manager.excludeByAttribute)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the repository: %v", err)
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the repository: %v", err)
|
||||||
return false
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_INFO("RESTORE_START", "Restoring %s to revision %d", top, revision)
|
LOG_INFO("RESTORE_START", "Restoring %s to revision %d", top, revision)
|
||||||
@@ -794,6 +826,11 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
|
|
||||||
var totalFileSize int64
|
var totalFileSize int64
|
||||||
var downloadedFileSize int64
|
var downloadedFileSize int64
|
||||||
|
var failedFiles int
|
||||||
|
var skippedFileSize int64
|
||||||
|
var skippedFiles int64
|
||||||
|
|
||||||
|
var downloadedFiles []*Entry
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
for _, entry := range remoteSnapshot.Files {
|
for _, entry := range remoteSnapshot.Files {
|
||||||
@@ -811,6 +848,9 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
if compare == 0 {
|
if compare == 0 {
|
||||||
i++
|
i++
|
||||||
if quickMode && local.IsSameAs(entry) {
|
if quickMode && local.IsSameAs(entry) {
|
||||||
|
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", local.Path)
|
||||||
|
skippedFileSize += entry.Size
|
||||||
|
skippedFiles++
|
||||||
skipped = true
|
skipped = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -840,7 +880,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
err = os.Symlink(entry.Link, fullPath)
|
err = os.Symlink(entry.Link, fullPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("RESTORE_SYMLINK", "Can't create symlink %s: %v", entry.Path, err)
|
LOG_ERROR("RESTORE_SYMLINK", "Can't create symlink %s: %v", entry.Path, err)
|
||||||
return false
|
return 0
|
||||||
}
|
}
|
||||||
entry.RestoreMetadata(fullPath, nil, setOwner)
|
entry.RestoreMetadata(fullPath, nil, setOwner)
|
||||||
LOG_TRACE("DOWNLOAD_DONE", "Symlink %s updated", entry.Path)
|
LOG_TRACE("DOWNLOAD_DONE", "Symlink %s updated", entry.Path)
|
||||||
@@ -849,7 +889,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
|
|
||||||
if err == nil && !stat.IsDir() {
|
if err == nil && !stat.IsDir() {
|
||||||
LOG_ERROR("RESTORE_NOTDIR", "The path %s is not a directory", fullPath)
|
LOG_ERROR("RESTORE_NOTDIR", "The path %s is not a directory", fullPath)
|
||||||
return false
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
@@ -858,7 +898,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
err = os.MkdirAll(fullPath, 0700)
|
err = os.MkdirAll(fullPath, 0700)
|
||||||
if err != nil && !os.IsExist(err) {
|
if err != nil && !os.IsExist(err) {
|
||||||
LOG_ERROR("RESTORE_MKDIR", "%v", err)
|
LOG_ERROR("RESTORE_MKDIR", "%v", err)
|
||||||
return false
|
return 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -876,14 +916,13 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
// Sort entries by their starting chunks in order to linearize the access to the chunk chain.
|
// Sort entries by their starting chunks in order to linearize the access to the chunk chain.
|
||||||
sort.Sort(ByChunk(fileEntries))
|
sort.Sort(ByChunk(fileEntries))
|
||||||
|
|
||||||
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, showStatistics, threads)
|
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, showStatistics, threads, allowFailures)
|
||||||
chunkDownloader.AddFiles(remoteSnapshot, fileEntries)
|
chunkDownloader.AddFiles(remoteSnapshot, fileEntries)
|
||||||
|
|
||||||
chunkMaker := CreateChunkMaker(manager.config, true)
|
chunkMaker := CreateChunkMaker(manager.config, true)
|
||||||
|
|
||||||
startDownloadingTime := time.Now().Unix()
|
startDownloadingTime := time.Now().Unix()
|
||||||
|
|
||||||
var downloadedFiles []*Entry
|
|
||||||
// Now download files one by one
|
// Now download files one by one
|
||||||
for _, file := range fileEntries {
|
for _, file := range fileEntries {
|
||||||
|
|
||||||
@@ -893,16 +932,21 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
if quickMode {
|
if quickMode {
|
||||||
if file.IsSameAsFileInfo(stat) {
|
if file.IsSameAsFileInfo(stat) {
|
||||||
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", file.Path)
|
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", file.Path)
|
||||||
|
skippedFileSize += file.Size
|
||||||
|
skippedFiles++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if file.Size == 0 && file.IsSameAsFileInfo(stat) {
|
if file.Size == 0 && file.IsSameAsFileInfo(stat) {
|
||||||
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (size 0)", file.Path)
|
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (size 0)", file.Path)
|
||||||
|
skippedFileSize += file.Size
|
||||||
|
skippedFiles++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err = os.MkdirAll(path.Dir(fullPath), 0744)
|
parent, _ := SplitDir(fullPath)
|
||||||
|
err = os.MkdirAll(parent, 0744)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_MKDIR", "Failed to create directory: %v", err)
|
LOG_ERROR("DOWNLOAD_MKDIR", "Failed to create directory: %v", err)
|
||||||
}
|
}
|
||||||
@@ -913,22 +957,39 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
newFile, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.GetPermissions())
|
newFile, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.GetPermissions())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_OPEN", "Failed to create empty file: %v", err)
|
LOG_ERROR("DOWNLOAD_OPEN", "Failed to create empty file: %v", err)
|
||||||
return false
|
return 0
|
||||||
}
|
}
|
||||||
newFile.Close()
|
newFile.Close()
|
||||||
|
|
||||||
file.RestoreMetadata(fullPath, nil, setOwner)
|
file.RestoreMetadata(fullPath, nil, setOwner)
|
||||||
if !showStatistics {
|
if !showStatistics {
|
||||||
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (0)", file.Path)
|
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (0)", file.Path)
|
||||||
|
downloadedFileSize += file.Size
|
||||||
|
downloadedFiles = append(downloadedFiles, file)
|
||||||
}
|
}
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if manager.RestoreFile(chunkDownloader, chunkMaker, file, top, inPlace, overwrite, showStatistics,
|
downloaded, err := manager.RestoreFile(chunkDownloader, chunkMaker, file, top, inPlace, overwrite, showStatistics,
|
||||||
totalFileSize, downloadedFileSize, startDownloadingTime) {
|
totalFileSize, downloadedFileSize, startDownloadingTime, allowFailures)
|
||||||
|
if err != nil {
|
||||||
|
// RestoreFile returned an error; if allowFailures is false RestoerFile would error out and not return so here
|
||||||
|
// we just need to show a warning
|
||||||
|
failedFiles++
|
||||||
|
LOG_WARN("DOWNLOAD_FAIL", "Failed to restore %s: %v", file.Path, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// No error
|
||||||
|
if downloaded {
|
||||||
|
// No error, file was restored
|
||||||
downloadedFileSize += file.Size
|
downloadedFileSize += file.Size
|
||||||
downloadedFiles = append(downloadedFiles, file)
|
downloadedFiles = append(downloadedFiles, file)
|
||||||
|
} else {
|
||||||
|
// No error, file was skipped
|
||||||
|
skippedFileSize += file.Size
|
||||||
|
skippedFiles++
|
||||||
}
|
}
|
||||||
file.RestoreMetadata(fullPath, nil, setOwner)
|
file.RestoreMetadata(fullPath, nil, setOwner)
|
||||||
}
|
}
|
||||||
@@ -956,11 +1017,16 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if failedFiles > 0 {
|
||||||
|
return failedFiles
|
||||||
|
}
|
||||||
|
|
||||||
LOG_INFO("RESTORE_END", "Restored %s to revision %d", top, revision)
|
LOG_INFO("RESTORE_END", "Restored %s to revision %d", top, revision)
|
||||||
if showStatistics {
|
if showStatistics {
|
||||||
LOG_INFO("RESTORE_STATS", "Files: %d total, %s bytes", len(fileEntries), PrettySize(totalFileSize))
|
LOG_INFO("RESTORE_STATS", "Files: %d total, %s bytes", len(fileEntries), PrettySize(totalFileSize))
|
||||||
LOG_INFO("RESTORE_STATS", "Downloaded %d file, %s bytes, %d chunks",
|
LOG_INFO("RESTORE_STATS", "Downloaded %d file, %s bytes, %d chunks",
|
||||||
len(downloadedFiles), PrettySize(downloadedFileSize), chunkDownloader.numberOfDownloadedChunks)
|
len(downloadedFiles), PrettySize(downloadedFileSize), chunkDownloader.numberOfDownloadedChunks)
|
||||||
|
LOG_INFO("RESTORE_STATS", "Skipped %d file, %s bytes", skippedFiles, PrettySize(skippedFileSize))
|
||||||
}
|
}
|
||||||
|
|
||||||
runningTime := time.Now().Unix() - startTime
|
runningTime := time.Now().Unix() - startTime
|
||||||
@@ -972,7 +1038,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
|
|
||||||
chunkDownloader.Stop()
|
chunkDownloader.Stop()
|
||||||
|
|
||||||
return true
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// fileEncoder encodes one file at a time to avoid loading the full json description of the entire file tree
|
// fileEncoder encodes one file at a time to avoid loading the full json description of the entire file tree
|
||||||
@@ -985,12 +1051,12 @@ type fileEncoder struct {
|
|||||||
buffer *bytes.Buffer
|
buffer *bytes.Buffer
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read reads data from the embeded buffer
|
// Read reads data from the embedded buffer
|
||||||
func (encoder fileEncoder) Read(data []byte) (n int, err error) {
|
func (encoder fileEncoder) Read(data []byte) (n int, err error) {
|
||||||
return encoder.buffer.Read(data)
|
return encoder.buffer.Read(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NextFile switchs to the next file and generates its json description in the buffer. It also takes care of
|
// NextFile switches to the next file and generates its json description in the buffer. It also takes care of
|
||||||
// the ending ']' and the commas between files.
|
// the ending ']' and the commas between files.
|
||||||
func (encoder *fileEncoder) NextFile() (io.Reader, bool) {
|
func (encoder *fileEncoder) NextFile() (io.Reader, bool) {
|
||||||
if encoder.currentIndex == len(encoder.files) {
|
if encoder.currentIndex == len(encoder.files) {
|
||||||
@@ -1130,10 +1196,13 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Restore downloads a file from the storage. If 'inPlace' is false, the download file is saved first to a temporary
|
// Restore downloads a file from the storage. If 'inPlace' is false, the download file is saved first to a temporary
|
||||||
// file under the .duplicacy directory and then replaces the existing one. Otherwise, the exising file will be
|
// file under the .duplicacy directory and then replaces the existing one. Otherwise, the existing file will be
|
||||||
// overwritten directly.
|
// overwritten directly.
|
||||||
|
// Return: true, nil: Restored file;
|
||||||
|
// false, nil: Skipped file;
|
||||||
|
// false, error: Failure to restore file (only if allowFailures == true)
|
||||||
func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chunkMaker *ChunkMaker, entry *Entry, top string, inPlace bool, overwrite bool,
|
func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chunkMaker *ChunkMaker, entry *Entry, top string, inPlace bool, overwrite bool,
|
||||||
showStatistics bool, totalFileSize int64, downloadedFileSize int64, startTime int64) bool {
|
showStatistics bool, totalFileSize int64, downloadedFileSize int64, startTime int64, allowFailures bool) (bool, error) {
|
||||||
|
|
||||||
LOG_TRACE("DOWNLOAD_START", "Downloading %s", entry.Path)
|
LOG_TRACE("DOWNLOAD_START", "Downloading %s", entry.Path)
|
||||||
|
|
||||||
@@ -1166,6 +1235,9 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
lengthMap := make(map[string]int)
|
lengthMap := make(map[string]int)
|
||||||
var offset int64
|
var offset int64
|
||||||
|
|
||||||
|
// If the file is newly created (needed by sparse file optimization)
|
||||||
|
isNewFile := false
|
||||||
|
|
||||||
existingFile, err = os.Open(fullPath)
|
existingFile, err = os.Open(fullPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
@@ -1175,7 +1247,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
existingFile, err = os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
existingFile, err = os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing: %v", fullPath, err)
|
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing: %v", fullPath, err)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
n := int64(1)
|
n := int64(1)
|
||||||
@@ -1187,31 +1259,29 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
_, err = existingFile.Seek(entry.Size-n, 0)
|
_, err = existingFile.Seek(entry.Size-n, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to resize the initial file %s for in-place writing: %v", fullPath, err)
|
LOG_ERROR("DOWNLOAD_CREATE", "Failed to resize the initial file %s for in-place writing: %v", fullPath, err)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
_, err = existingFile.Write([]byte("\x00\x00")[:n])
|
_, err = existingFile.Write([]byte("\x00\x00")[:n])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to initialize the sparse file %s for in-place writing: %v", fullPath, err)
|
LOG_ERROR("DOWNLOAD_CREATE", "Failed to initialize the sparse file %s for in-place writing: %v", fullPath, err)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
existingFile.Close()
|
existingFile.Close()
|
||||||
existingFile, err = os.Open(fullPath)
|
existingFile, err = os.Open(fullPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_OPEN", "Can't reopen the initial file just created: %v", err)
|
LOG_ERROR("DOWNLOAD_OPEN", "Can't reopen the initial file just created: %v", err)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
|
isNewFile = true
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
LOG_TRACE("DOWNLOAD_OPEN", "Can't open the existing file: %v", err)
|
LOG_TRACE("DOWNLOAD_OPEN", "Can't open the existing file: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
if !overwrite {
|
|
||||||
LOG_ERROR("DOWNLOAD_OVERWRITE",
|
|
||||||
"File %s already exists. Please specify the -overwrite option to continue", entry.Path)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The key in this map is the number of zeroes. The value is the corresponding hash.
|
||||||
|
knownHashes := make(map[int]string)
|
||||||
|
|
||||||
fileHash := ""
|
fileHash := ""
|
||||||
if existingFile != nil {
|
if existingFile != nil {
|
||||||
|
|
||||||
@@ -1221,6 +1291,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
fileHasher := manager.config.NewFileHasher()
|
fileHasher := manager.config.NewFileHasher()
|
||||||
buffer := make([]byte, 64*1024)
|
buffer := make([]byte, 64*1024)
|
||||||
err = nil
|
err = nil
|
||||||
|
isSkipped := false
|
||||||
// We set to read one more byte so the file hash will be different if the file to be restored is a
|
// We set to read one more byte so the file hash will be different if the file to be restored is a
|
||||||
// truncated portion of the existing file
|
// truncated portion of the existing file
|
||||||
for i := entry.StartChunk; i <= entry.EndChunk+1; i++ {
|
for i := entry.StartChunk; i <= entry.EndChunk+1; i++ {
|
||||||
@@ -1236,6 +1307,28 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
chunkSize = 1 // the size of extra chunk beyond EndChunk
|
chunkSize = 1 // the size of extra chunk beyond EndChunk
|
||||||
}
|
}
|
||||||
count := 0
|
count := 0
|
||||||
|
|
||||||
|
if isNewFile {
|
||||||
|
if hash, found := knownHashes[chunkSize]; found {
|
||||||
|
// We have read the same number of zeros before, so we just retrieve the hash from the map
|
||||||
|
existingChunks = append(existingChunks, hash)
|
||||||
|
existingLengths = append(existingLengths, chunkSize)
|
||||||
|
offsetMap[hash] = offset
|
||||||
|
lengthMap[hash] = chunkSize
|
||||||
|
offset += int64(chunkSize)
|
||||||
|
isSkipped = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if isSkipped {
|
||||||
|
_, err := existingFile.Seek(offset, 0)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("DOWNLOAD_SEEK", "Failed to seek to offset %d: %v", offset, err)
|
||||||
|
}
|
||||||
|
isSkipped = false
|
||||||
|
}
|
||||||
|
|
||||||
for count < chunkSize {
|
for count < chunkSize {
|
||||||
n := chunkSize - count
|
n := chunkSize - count
|
||||||
if n > cap(buffer) {
|
if n > cap(buffer) {
|
||||||
@@ -1252,7 +1345,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_SPLIT", "Failed to read existing file: %v", err)
|
LOG_ERROR("DOWNLOAD_SPLIT", "Failed to read existing file: %v", err)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
@@ -1262,13 +1355,30 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
offsetMap[hash] = offset
|
offsetMap[hash] = offset
|
||||||
lengthMap[hash] = chunkSize
|
lengthMap[hash] = chunkSize
|
||||||
offset += int64(chunkSize)
|
offset += int64(chunkSize)
|
||||||
|
if isNewFile {
|
||||||
|
knownHashes[chunkSize] = hash
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fileHash = hex.EncodeToString(fileHasher.Sum(nil))
|
fileHash = hex.EncodeToString(fileHasher.Sum(nil))
|
||||||
|
|
||||||
|
if fileHash == entry.Hash && fileHash != "" {
|
||||||
|
LOG_TRACE("DOWNLOAD_SKIP", "File %s unchanged (by hash)", entry.Path)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// fileHash != entry.Hash, warn/error depending on -overwrite option
|
||||||
|
if !overwrite {
|
||||||
|
LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE",
|
||||||
|
"File %s already exists. Please specify the -overwrite option to overwrite", entry.Path)
|
||||||
|
return false, fmt.Errorf("file exists")
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
// If it is not inplace, we want to reuse any chunks in the existing file regardless their offets, so
|
// If it is not inplace, we want to reuse any chunks in the existing file regardless their offets, so
|
||||||
// we run the chunk maker to split the original file.
|
// we run the chunk maker to split the original file.
|
||||||
@@ -1288,9 +1398,11 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
return nil, false
|
return nil, false
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This is an additional check comparing fileHash to entry.Hash above, so this should no longer occur
|
||||||
if fileHash == entry.Hash && fileHash != "" {
|
if fileHash == entry.Hash && fileHash != "" {
|
||||||
LOG_TRACE("DOWNLOAD_SKIP", "File %s unchanged (by hash)", entry.Path)
|
LOG_TRACE("DOWNLOAD_SKIP", "File %s unchanged (by hash)", entry.Path)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1318,7 +1430,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
existingFile, err = os.OpenFile(fullPath, os.O_RDWR, 0)
|
existingFile, err = os.OpenFile(fullPath, os.O_RDWR, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open the file %s for in-place writing", fullPath)
|
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open the file %s for in-place writing", fullPath)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1350,7 +1462,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
_, err = existingFile.Seek(offset, 0)
|
_, err = existingFile.Seek(offset, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_SEEK", "Failed to set the offset to %d for file %s: %v", offset, fullPath, err)
|
LOG_ERROR("DOWNLOAD_SEEK", "Failed to set the offset to %d for file %s: %v", offset, fullPath, err)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the chunk is available in the existing file
|
// Check if the chunk is available in the existing file
|
||||||
@@ -1360,17 +1472,20 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
_, err := io.CopyN(hasher, existingFile, int64(existingLengths[j]))
|
_, err := io.CopyN(hasher, existingFile, int64(existingLengths[j]))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_READ", "Failed to read the existing chunk %s: %v", hash, err)
|
LOG_ERROR("DOWNLOAD_READ", "Failed to read the existing chunk %s: %v", hash, err)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
if IsDebugging() {
|
if IsDebugging() {
|
||||||
LOG_DEBUG("DOWNLOAD_UNCHANGED", "Chunk %s is unchanged", manager.config.GetChunkIDFromHash(hash))
|
LOG_DEBUG("DOWNLOAD_UNCHANGED", "Chunk %s is unchanged", manager.config.GetChunkIDFromHash(hash))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
chunk := chunkDownloader.WaitForChunk(i)
|
chunk := chunkDownloader.WaitForChunk(i)
|
||||||
|
if chunk.isBroken {
|
||||||
|
return false, fmt.Errorf("chunk %s is corrupted", manager.config.GetChunkIDFromHash(hash))
|
||||||
|
}
|
||||||
_, err = existingFile.Write(chunk.GetBytes()[start:end])
|
_, err = existingFile.Write(chunk.GetBytes()[start:end])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_WRITE", "Failed to write to the file: %v", err)
|
LOG_ERROR("DOWNLOAD_WRITE", "Failed to write to the file: %v", err)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
hasher.Write(chunk.GetBytes()[start:end])
|
hasher.Write(chunk.GetBytes()[start:end])
|
||||||
}
|
}
|
||||||
@@ -1381,15 +1496,15 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
// Must truncate the file if the new size is smaller
|
// Must truncate the file if the new size is smaller
|
||||||
if err = existingFile.Truncate(offset); err != nil {
|
if err = existingFile.Truncate(offset); err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_TRUNCATE", "Failed to truncate the file at %d: %v", offset, err)
|
LOG_ERROR("DOWNLOAD_TRUNCATE", "Failed to truncate the file at %d: %v", offset, err)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify the download by hash
|
// Verify the download by hash
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") {
|
if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") {
|
||||||
LOG_ERROR("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s (in-place)",
|
LOG_WERROR(allowFailures, "DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s (in-place)",
|
||||||
fullPath, "", entry.Hash)
|
fullPath, "", entry.Hash)
|
||||||
return false
|
return false, fmt.Errorf("file corrupt (hash mismatch)")
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@@ -1398,7 +1513,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
newFile, err = os.OpenFile(temporaryPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
newFile, err = os.OpenFile(temporaryPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open file for writing: %v", err)
|
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open file for writing: %v", err)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
hasher := manager.config.NewFileHasher()
|
hasher := manager.config.NewFileHasher()
|
||||||
@@ -1436,6 +1551,9 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
|
|
||||||
if !hasLocalCopy {
|
if !hasLocalCopy {
|
||||||
chunk := chunkDownloader.WaitForChunk(i)
|
chunk := chunkDownloader.WaitForChunk(i)
|
||||||
|
if chunk.isBroken {
|
||||||
|
return false, fmt.Errorf("chunk %s is corrupted", manager.config.GetChunkIDFromHash(hash))
|
||||||
|
}
|
||||||
// If the chunk was downloaded from the storage, we may still need a portion of it.
|
// If the chunk was downloaded from the storage, we may still need a portion of it.
|
||||||
start := 0
|
start := 0
|
||||||
if i == entry.StartChunk {
|
if i == entry.StartChunk {
|
||||||
@@ -1451,7 +1569,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
_, err = newFile.Write(data)
|
_, err = newFile.Write(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_WRITE", "Failed to write file: %v", err)
|
LOG_ERROR("DOWNLOAD_WRITE", "Failed to write file: %v", err)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
hasher.Write(data)
|
hasher.Write(data)
|
||||||
@@ -1460,9 +1578,9 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
|
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") {
|
if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") {
|
||||||
LOG_ERROR("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s",
|
LOG_WERROR(allowFailures, "DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s",
|
||||||
entry.Path, hash, entry.Hash)
|
entry.Path, hash, entry.Hash)
|
||||||
return false
|
return false, fmt.Errorf("file corrupt (hash mismatch)")
|
||||||
}
|
}
|
||||||
|
|
||||||
if existingFile != nil {
|
if existingFile != nil {
|
||||||
@@ -1476,31 +1594,40 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
err = os.Remove(fullPath)
|
err = os.Remove(fullPath)
|
||||||
if err != nil && !os.IsNotExist(err) {
|
if err != nil && !os.IsNotExist(err) {
|
||||||
LOG_ERROR("DOWNLOAD_REMOVE", "Failed to remove the old file: %v", err)
|
LOG_ERROR("DOWNLOAD_REMOVE", "Failed to remove the old file: %v", err)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
err = os.Rename(temporaryPath, fullPath)
|
err = os.Rename(temporaryPath, fullPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_RENAME", "Failed to rename the file %s to %s: %v", temporaryPath, fullPath, err)
|
LOG_ERROR("DOWNLOAD_RENAME", "Failed to rename the file %s to %s: %v", temporaryPath, fullPath, err)
|
||||||
return false
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !showStatistics {
|
if !showStatistics {
|
||||||
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (%d)", entry.Path, entry.Size)
|
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (%d)", entry.Path, entry.Size)
|
||||||
}
|
}
|
||||||
return true
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CopySnapshots copies the specified snapshots from one storage to the other.
|
// CopySnapshots copies the specified snapshots from one storage to the other.
|
||||||
func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapshotID string,
|
func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapshotID string,
|
||||||
revisionsToBeCopied []int, threads int) bool {
|
revisionsToBeCopied []int, uploadingThreads int, downloadingThreads int) bool {
|
||||||
|
|
||||||
if !manager.config.IsCompatiableWith(otherManager.config) {
|
if !manager.config.IsCompatiableWith(otherManager.config) {
|
||||||
LOG_ERROR("CONFIG_INCOMPATIABLE", "Two storages are not compatiable for the copy operation")
|
LOG_ERROR("CONFIG_INCOMPATIBLE", "Two storages are not compatible for the copy operation")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if otherManager.config.DataShards != 0 && otherManager.config.ParityShards != 0 {
|
||||||
|
LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled for the destination storage with %d data shards and %d parity shards",
|
||||||
|
otherManager.config.DataShards, otherManager.config.ParityShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
if otherManager.config.rsaPublicKey != nil && len(otherManager.config.FileKey) > 0 {
|
||||||
|
LOG_INFO("BACKUP_KEY", "RSA encryption is enabled for the destination")
|
||||||
|
}
|
||||||
|
|
||||||
if snapshotID == "" && len(revisionsToBeCopied) > 0 {
|
if snapshotID == "" && len(revisionsToBeCopied) > 0 {
|
||||||
LOG_ERROR("SNAPSHOT_ERROR", "You must specify the snapshot id when one or more revisions are specified.")
|
LOG_ERROR("SNAPSHOT_ERROR", "You must specify the snapshot id when one or more revisions are specified.")
|
||||||
return false
|
return false
|
||||||
@@ -1580,6 +1707,9 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// These two maps store hashes of chunks in the source and destination storages, respectively. Note that
|
||||||
|
// the value of 'chunks' is used to indicated if the chunk is a snapshot chunk, while the value of 'otherChunks'
|
||||||
|
// is not used.
|
||||||
chunks := make(map[string]bool)
|
chunks := make(map[string]bool)
|
||||||
otherChunks := make(map[string]bool)
|
otherChunks := make(map[string]bool)
|
||||||
|
|
||||||
@@ -1592,21 +1722,15 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
LOG_TRACE("SNAPSHOT_COPY", "Copying snapshot %s at revision %d", snapshot.ID, snapshot.Revision)
|
LOG_TRACE("SNAPSHOT_COPY", "Copying snapshot %s at revision %d", snapshot.ID, snapshot.Revision)
|
||||||
|
|
||||||
for _, chunkHash := range snapshot.FileSequence {
|
for _, chunkHash := range snapshot.FileSequence {
|
||||||
if _, found := chunks[chunkHash]; !found {
|
chunks[chunkHash] = true // The chunk is a snapshot chunk
|
||||||
chunks[chunkHash] = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, chunkHash := range snapshot.ChunkSequence {
|
for _, chunkHash := range snapshot.ChunkSequence {
|
||||||
if _, found := chunks[chunkHash]; !found {
|
chunks[chunkHash] = true // The chunk is a snapshot chunk
|
||||||
chunks[chunkHash] = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, chunkHash := range snapshot.LengthSequence {
|
for _, chunkHash := range snapshot.LengthSequence {
|
||||||
if _, found := chunks[chunkHash]; !found {
|
chunks[chunkHash] = true // The chunk is a snapshot chunk
|
||||||
chunks[chunkHash] = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
description := manager.SnapshotManager.DownloadSequence(snapshot.ChunkSequence)
|
description := manager.SnapshotManager.DownloadSequence(snapshot.ChunkSequence)
|
||||||
@@ -1619,9 +1743,11 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
|
|
||||||
for _, chunkHash := range snapshot.ChunkHashes {
|
for _, chunkHash := range snapshot.ChunkHashes {
|
||||||
if _, found := chunks[chunkHash]; !found {
|
if _, found := chunks[chunkHash]; !found {
|
||||||
chunks[chunkHash] = true
|
chunks[chunkHash] = false // The chunk is a file chunk
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
snapshot.ChunkHashes = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
otherChunkFiles, otherChunkSizes := otherManager.SnapshotManager.ListAllFiles(otherManager.storage, "chunks/")
|
otherChunkFiles, otherChunkSizes := otherManager.SnapshotManager.ListAllFiles(otherManager.storage, "chunks/")
|
||||||
@@ -1640,62 +1766,64 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
|
|
||||||
LOG_DEBUG("SNAPSHOT_COPY", "Found %d chunks on destination storage", len(otherChunks))
|
LOG_DEBUG("SNAPSHOT_COPY", "Found %d chunks on destination storage", len(otherChunks))
|
||||||
|
|
||||||
chunksToCopy := 0
|
var chunksToCopy []string
|
||||||
chunksToSkip := 0
|
|
||||||
|
|
||||||
for chunkHash, _ := range chunks {
|
for chunkHash := range chunks {
|
||||||
otherChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
otherChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
||||||
if _, found := otherChunks[otherChunkID]; found {
|
if _, found := otherChunks[otherChunkID]; !found {
|
||||||
chunksToSkip++
|
chunksToCopy = append(chunksToCopy, chunkHash)
|
||||||
} else {
|
|
||||||
chunksToCopy++
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_DEBUG("SNAPSHOT_COPY", "Chunks to copy = %d, to skip = %d, total = %d", chunksToCopy, chunksToSkip, chunksToCopy+chunksToSkip)
|
LOG_INFO("SNAPSHOT_COPY", "Chunks to copy: %d, to skip: %d, total: %d", len(chunksToCopy), len(chunks) - len(chunksToCopy), len(chunks))
|
||||||
LOG_DEBUG("SNAPSHOT_COPY", "Total chunks in source snapshot revisions = %d\n", len(chunks))
|
|
||||||
|
|
||||||
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, false, threads)
|
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, false, downloadingThreads, false)
|
||||||
|
|
||||||
chunkUploader := CreateChunkUploader(otherManager.config, otherManager.storage, nil, threads,
|
var uploadedBytes int64
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
copiedChunks := 0
|
||||||
|
chunkUploader := CreateChunkUploader(otherManager.config, otherManager.storage, nil, uploadingThreads,
|
||||||
func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
|
func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
|
||||||
if skipped {
|
action := "Skipped"
|
||||||
LOG_INFO("SNAPSHOT_COPY", "Chunk %s (%d/%d) exists at the destination", chunk.GetID(), chunkIndex, len(chunks))
|
if !skipped {
|
||||||
} else {
|
copiedChunks++
|
||||||
LOG_INFO("SNAPSHOT_COPY", "Chunk %s (%d/%d) copied to the destination", chunk.GetID(), chunkIndex, len(chunks))
|
action = "Copied"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
atomic.AddInt64(&uploadedBytes, int64(chunkSize))
|
||||||
|
|
||||||
|
elapsedTime := time.Now().Sub(startTime).Seconds()
|
||||||
|
speed := int64(float64(atomic.LoadInt64(&uploadedBytes)) / elapsedTime)
|
||||||
|
remainingTime := int64(float64(len(chunksToCopy) - chunkIndex - 1) / float64(chunkIndex + 1) * elapsedTime)
|
||||||
|
percentage := float64(chunkIndex + 1) / float64(len(chunksToCopy)) * 100.0
|
||||||
|
LOG_INFO("COPY_PROGRESS", "%s chunk %s (%d/%d) %sB/s %s %.1f%%",
|
||||||
|
action, chunk.GetID(), chunkIndex + 1, len(chunksToCopy),
|
||||||
|
PrettySize(speed), PrettyTime(remainingTime), percentage)
|
||||||
otherManager.config.PutChunk(chunk)
|
otherManager.config.PutChunk(chunk)
|
||||||
})
|
})
|
||||||
|
|
||||||
chunkUploader.Start()
|
chunkUploader.Start()
|
||||||
|
|
||||||
totalCopied := 0
|
for _, chunkHash := range chunksToCopy {
|
||||||
totalSkipped := 0
|
chunkDownloader.AddChunk(chunkHash)
|
||||||
chunkIndex := 0
|
}
|
||||||
|
for i, chunkHash := range chunksToCopy {
|
||||||
for chunkHash, _ := range chunks {
|
|
||||||
chunkIndex++
|
|
||||||
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
|
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
|
||||||
newChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
newChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
||||||
if _, found := otherChunks[newChunkID]; !found {
|
|
||||||
LOG_DEBUG("SNAPSHOT_COPY", "Copying chunk %s to %s", chunkID, newChunkID)
|
LOG_DEBUG("SNAPSHOT_COPY", "Copying chunk %s to %s", chunkID, newChunkID)
|
||||||
i := chunkDownloader.AddChunk(chunkHash)
|
|
||||||
chunk := chunkDownloader.WaitForChunk(i)
|
chunk := chunkDownloader.WaitForChunk(i)
|
||||||
newChunk := otherManager.config.GetChunk()
|
newChunk := otherManager.config.GetChunk()
|
||||||
newChunk.Reset(true)
|
newChunk.Reset(true)
|
||||||
newChunk.Write(chunk.GetBytes())
|
newChunk.Write(chunk.GetBytes())
|
||||||
chunkUploader.StartChunk(newChunk, chunkIndex)
|
newChunk.isSnapshot = chunks[chunkHash]
|
||||||
totalCopied++
|
chunkUploader.StartChunk(newChunk, i)
|
||||||
} else {
|
|
||||||
LOG_INFO("SNAPSHOT_COPY", "Chunk %s (%d/%d) skipped at the destination", chunkID, chunkIndex, len(chunks))
|
|
||||||
totalSkipped++
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkDownloader.Stop()
|
chunkDownloader.Stop()
|
||||||
chunkUploader.Stop()
|
chunkUploader.Stop()
|
||||||
|
|
||||||
LOG_INFO("SNAPSHOT_COPY", "Copy complete, %d total chunks, %d chunks copied, %d skipped", totalCopied+totalSkipped, totalCopied, totalSkipped)
|
LOG_INFO("SNAPSHOT_COPY", "Copied %d new chunks and skipped %d existing chunks", copiedChunks, len(chunks) - copiedChunks)
|
||||||
|
|
||||||
for _, snapshot := range snapshots {
|
for _, snapshot := range snapshots {
|
||||||
if revisionMap[snapshot.ID][snapshot.Revision] == false {
|
if revisionMap[snapshot.ID][snapshot.Revision] == false {
|
||||||
|
|||||||
@@ -169,6 +169,12 @@ func getFileHash(path string) (hash string) {
|
|||||||
return hex.EncodeToString(hasher.Sum(nil))
|
return hex.EncodeToString(hasher.Sum(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func assertRestoreFailures(t *testing.T, failedFiles int, expectedFailedFiles int) {
|
||||||
|
if failedFiles != expectedFailedFiles {
|
||||||
|
t.Errorf("Failed to restore %d instead of %d file(s)", failedFiles, expectedFailedFiles)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestBackupManager(t *testing.T) {
|
func TestBackupManager(t *testing.T) {
|
||||||
|
|
||||||
rand.Seed(time.Now().UnixNano())
|
rand.Seed(time.Now().UnixNano())
|
||||||
@@ -226,12 +232,20 @@ func TestBackupManager(t *testing.T) {
|
|||||||
cleanStorage(storage)
|
cleanStorage(storage)
|
||||||
|
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
dataShards := 0
|
||||||
|
parityShards := 0
|
||||||
|
if testErasureCoding {
|
||||||
|
dataShards = 5
|
||||||
|
parityShards = 2
|
||||||
|
}
|
||||||
|
|
||||||
if testFixedChunkSize {
|
if testFixedChunkSize {
|
||||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false) {
|
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "", dataShards, parityShards) {
|
||||||
t.Errorf("Failed to initialize the storage")
|
t.Errorf("Failed to initialize the storage")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false) {
|
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false, "", dataShards, parityShards) {
|
||||||
t.Errorf("Failed to initialize the storage")
|
t.Errorf("Failed to initialize the storage")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -239,15 +253,16 @@ func TestBackupManager(t *testing.T) {
|
|||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager := CreateBackupManager("host1", storage, testDir, password, "", false)
|
backupManager := CreateBackupManager("host1", storage, testDir, password, "", "", false)
|
||||||
backupManager.SetupSnapshotCache("default")
|
backupManager.SetupSnapshotCache("default")
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir+"/repository2", threads, /*inPlace=*/false, /*quickMode=*/false, threads, /*overwrite=*/true,
|
failedFiles := backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/ nil)
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
|
||||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
|
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
|
||||||
@@ -270,8 +285,9 @@ func TestBackupManager(t *testing.T) {
|
|||||||
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false)
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false)
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir+"/repository2", 2, /*inPlace=*/true, /*quickMode=*/true, threads, /*overwrite=*/true,
|
failedFiles = backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
|
||||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
@@ -298,8 +314,9 @@ func TestBackupManager(t *testing.T) {
|
|||||||
createRandomFile(testDir+"/repository2/dir5/file5", 100)
|
createRandomFile(testDir+"/repository2/dir5/file5", 100)
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir+"/repository2", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
failedFiles = backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/true, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
/*deleteMode=*/ true /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
|
||||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
@@ -325,8 +342,9 @@ func TestBackupManager(t *testing.T) {
|
|||||||
os.Remove(testDir + "/repository1/file2")
|
os.Remove(testDir + "/repository1/file2")
|
||||||
os.Remove(testDir + "/repository1/dir1/file3")
|
os.Remove(testDir + "/repository1/dir1/file3")
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Restore(testDir+"/repository1", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
failedFiles = backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/[]string{"+file2", "+dir1/file3", "-*"})
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"} /*allowFailures=*/, false)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
|
||||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
@@ -341,7 +359,7 @@ func TestBackupManager(t *testing.T) {
|
|||||||
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
|
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
|
||||||
}
|
}
|
||||||
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1, 2, 3} /*tag*/, "",
|
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1, 2, 3} /*tag*/, "",
|
||||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
|
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||||
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, []int{1} /*tags*/, nil /*retentions*/, nil,
|
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, []int{1} /*tags*/, nil /*retentions*/, nil,
|
||||||
/*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
|
/*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
|
||||||
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
|
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
|
||||||
@@ -349,7 +367,7 @@ func TestBackupManager(t *testing.T) {
|
|||||||
t.Errorf("Expected 2 snapshots but got %d", numberOfSnapshots)
|
t.Errorf("Expected 2 snapshots but got %d", numberOfSnapshots)
|
||||||
}
|
}
|
||||||
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3} /*tag*/, "",
|
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3} /*tag*/, "",
|
||||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
|
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||||
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false)
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false)
|
||||||
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, nil /*tags*/, nil /*retentions*/, nil,
|
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, nil /*tags*/, nil /*retentions*/, nil,
|
||||||
/*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
|
/*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
|
||||||
@@ -358,9 +376,348 @@ func TestBackupManager(t *testing.T) {
|
|||||||
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
|
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
|
||||||
}
|
}
|
||||||
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3, 4} /*tag*/, "",
|
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3, 4} /*tag*/, "",
|
||||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
|
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||||
|
|
||||||
/*buf := make([]byte, 1<<16)
|
/*buf := make([]byte, 1<<16)
|
||||||
runtime.Stack(buf, true)
|
runtime.Stack(buf, true)
|
||||||
fmt.Printf("%s", buf)*/
|
fmt.Printf("%s", buf)*/
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Create file with random file with certain seed
|
||||||
|
func createRandomFileSeeded(path string, maxSize int, seed int64) {
|
||||||
|
rand.Seed(seed)
|
||||||
|
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RANDOM_FILE", "Can't open %s for writing: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer file.Close()
|
||||||
|
|
||||||
|
size := maxSize/2 + rand.Int()%(maxSize/2)
|
||||||
|
|
||||||
|
buffer := make([]byte, 32*1024)
|
||||||
|
for size > 0 {
|
||||||
|
bytes := size
|
||||||
|
if bytes > cap(buffer) {
|
||||||
|
bytes = cap(buffer)
|
||||||
|
}
|
||||||
|
rand.Read(buffer[:bytes])
|
||||||
|
bytes, err = file.Write(buffer[:bytes])
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RANDOM_FILE", "Failed to write to %s: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
size -= bytes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func corruptFile(path string, start int, length int, seed int64) {
|
||||||
|
rand.Seed(seed)
|
||||||
|
|
||||||
|
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CORRUPT_FILE", "Can't open %s for writing: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if file != nil {
|
||||||
|
file.Close()
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
_, err = file.Seek(int64(start), 0)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CORRUPT_FILE", "Can't seek to the offset %d: %v", start, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer := make([]byte, length)
|
||||||
|
rand.Read(buffer)
|
||||||
|
|
||||||
|
_, err = file.Write(buffer)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("CORRUPT_FILE", "Failed to write to %s: %v", path, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestPersistRestore(t *testing.T) {
|
||||||
|
// We want deterministic output here so we can test the expected files are corrupted by missing or corrupt chunks
|
||||||
|
// There use rand functions with fixed seed, and known keys
|
||||||
|
|
||||||
|
setTestingT(t)
|
||||||
|
SetLoggingLevel(INFO)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
switch e := r.(type) {
|
||||||
|
case Exception:
|
||||||
|
t.Errorf("%s %s", e.LogID, e.Message)
|
||||||
|
debug.PrintStack()
|
||||||
|
default:
|
||||||
|
t.Errorf("%v", e)
|
||||||
|
debug.PrintStack()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
testDir := path.Join(os.TempDir(), "duplicacy_test")
|
||||||
|
os.RemoveAll(testDir)
|
||||||
|
os.MkdirAll(testDir, 0700)
|
||||||
|
os.Mkdir(testDir+"/repository1", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository1/dir1", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository1/.duplicacy", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository2", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository2/.duplicacy", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository3", 0700)
|
||||||
|
os.Mkdir(testDir+"/repository3/.duplicacy", 0700)
|
||||||
|
|
||||||
|
maxFileSize := 1000000
|
||||||
|
//maxFileSize := 200000
|
||||||
|
|
||||||
|
createRandomFileSeeded(testDir+"/repository1/file1", maxFileSize,1)
|
||||||
|
createRandomFileSeeded(testDir+"/repository1/file2", maxFileSize,2)
|
||||||
|
createRandomFileSeeded(testDir+"/repository1/dir1/file3", maxFileSize,3)
|
||||||
|
|
||||||
|
threads := 1
|
||||||
|
|
||||||
|
password := "duplicacy"
|
||||||
|
|
||||||
|
// We want deterministic output, plus ability to test encrypted storage
|
||||||
|
// So make unencrypted storage with default keys, and encrypted as bit-identical copy of this but with password
|
||||||
|
unencStorage, err := loadStorage(testDir+"/unenc_storage", threads)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create storage: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
delay := 0
|
||||||
|
if _, ok := unencStorage.(*ACDStorage); ok {
|
||||||
|
delay = 1
|
||||||
|
}
|
||||||
|
if _, ok := unencStorage.(*OneDriveStorage); ok {
|
||||||
|
delay = 5
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
cleanStorage(unencStorage)
|
||||||
|
|
||||||
|
if !ConfigStorage(unencStorage, 16384, 100, 64*1024, 256*1024, 16*1024, "", nil, false, "") {
|
||||||
|
t.Errorf("Failed to initialize the unencrypted storage")
|
||||||
|
}
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
unencConfig, _, err := DownloadConfig(unencStorage, "")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to download storage config: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make encrypted storage
|
||||||
|
storage, err := loadStorage(testDir+"/enc_storage", threads)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create encrypted storage: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
cleanStorage(storage)
|
||||||
|
|
||||||
|
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, unencConfig, true, "") {
|
||||||
|
t.Errorf("Failed to initialize the encrypted storage")
|
||||||
|
}
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
// do unencrypted backup
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
unencBackupManager := CreateBackupManager("host1", unencStorage, testDir, "", "", "")
|
||||||
|
unencBackupManager.SetupSnapshotCache("default")
|
||||||
|
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
unencBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
|
||||||
|
// do encrypted backup
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
encBackupManager := CreateBackupManager("host1", storage, testDir, password, "", "")
|
||||||
|
encBackupManager.SetupSnapshotCache("default")
|
||||||
|
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
encBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
|
||||||
|
// check snapshots
|
||||||
|
unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
|
||||||
|
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
|
||||||
|
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||||
|
|
||||||
|
encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
|
||||||
|
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
|
||||||
|
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, false)
|
||||||
|
|
||||||
|
// check functions
|
||||||
|
checkAllUncorrupted := func(cmpRepository string) {
|
||||||
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
|
if _, err := os.Stat(testDir + cmpRepository + "/" + f); os.IsNotExist(err) {
|
||||||
|
t.Errorf("File %s does not exist", f)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
|
hash2 := getFileHash(testDir + cmpRepository + "/" + f)
|
||||||
|
if hash1 != hash2 {
|
||||||
|
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
checkMissingFile := func(cmpRepository string, expectMissing string) {
|
||||||
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
|
_, err := os.Stat(testDir + cmpRepository + "/" + f)
|
||||||
|
if err==nil {
|
||||||
|
if f==expectMissing {
|
||||||
|
t.Errorf("File %s exists, expected to be missing", f)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
if f!=expectMissing {
|
||||||
|
t.Errorf("File %s does not exist", f)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
|
hash2 := getFileHash(testDir + cmpRepository + "/" + f)
|
||||||
|
if hash1 != hash2 {
|
||||||
|
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
checkCorruptedFile := func(cmpRepository string, expectCorrupted string) {
|
||||||
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
|
if _, err := os.Stat(testDir + cmpRepository + "/" + f); os.IsNotExist(err) {
|
||||||
|
t.Errorf("File %s does not exist", f)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
|
hash2 := getFileHash(testDir + cmpRepository + "/" + f)
|
||||||
|
if (f==expectCorrupted) {
|
||||||
|
if hash1 == hash2 {
|
||||||
|
t.Errorf("File %s has same hashes, expected to be corrupted: %s vs %s", f, hash1, hash2)
|
||||||
|
}
|
||||||
|
|
||||||
|
} else {
|
||||||
|
if hash1 != hash2 {
|
||||||
|
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// test restore all uncorrupted to repository3
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy")
|
||||||
|
failedFiles := unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
checkAllUncorrupted("/repository3")
|
||||||
|
|
||||||
|
// test for corrupt files and -persist
|
||||||
|
// corrupt a chunk
|
||||||
|
chunkToCorrupt1 := "/4d/538e5dfd2b08e782bfeb56d1360fb5d7eb9d8c4b2531cc2fca79efbaec910c"
|
||||||
|
// this should affect file1
|
||||||
|
chunkToCorrupt2 := "/2b/f953a766d0196ce026ae259e76e3c186a0e4bcd3ce10f1571d17f86f0a5497"
|
||||||
|
// this should affect dir1/file3
|
||||||
|
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
if i==0 {
|
||||||
|
// test corrupt chunks
|
||||||
|
corruptFile(testDir+"/unenc_storage"+"/chunks"+chunkToCorrupt1, 128, 128, 4)
|
||||||
|
corruptFile(testDir+"/enc_storage"+"/chunks"+chunkToCorrupt2, 128, 128, 4)
|
||||||
|
} else {
|
||||||
|
// test missing chunks
|
||||||
|
os.Remove(testDir+"/unenc_storage"+"/chunks"+chunkToCorrupt1)
|
||||||
|
os.Remove(testDir+"/enc_storage"+"/chunks"+chunkToCorrupt2)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check snapshots with --persist (allowFailures == true)
|
||||||
|
// this would cause a panic and os.Exit from duplicacy_log if allowFailures == false
|
||||||
|
unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
|
||||||
|
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
|
||||||
|
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, true)
|
||||||
|
|
||||||
|
encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
|
||||||
|
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
|
||||||
|
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, true)
|
||||||
|
|
||||||
|
|
||||||
|
// test restore corrupted, inPlace = true, corrupted files will have hash failures
|
||||||
|
os.RemoveAll(testDir+"/repository2")
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
|
failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||||
|
assertRestoreFailures(t, failedFiles, 1)
|
||||||
|
|
||||||
|
// check restore, expect file1 to be corrupted
|
||||||
|
checkCorruptedFile("/repository2", "file1")
|
||||||
|
|
||||||
|
|
||||||
|
os.RemoveAll(testDir+"/repository2")
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
|
failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||||
|
assertRestoreFailures(t, failedFiles, 1)
|
||||||
|
|
||||||
|
// check restore, expect file3 to be corrupted
|
||||||
|
checkCorruptedFile("/repository2", "dir1/file3")
|
||||||
|
|
||||||
|
//SetLoggingLevel(DEBUG)
|
||||||
|
// test restore corrupted, inPlace = false, corrupted files will be missing
|
||||||
|
os.RemoveAll(testDir+"/repository2")
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
|
failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||||
|
assertRestoreFailures(t, failedFiles, 1)
|
||||||
|
|
||||||
|
// check restore, expect file1 to be corrupted
|
||||||
|
checkMissingFile("/repository2", "file1")
|
||||||
|
|
||||||
|
|
||||||
|
os.RemoveAll(testDir+"/repository2")
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
|
failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||||
|
assertRestoreFailures(t, failedFiles, 1)
|
||||||
|
|
||||||
|
// check restore, expect file3 to be corrupted
|
||||||
|
checkMissingFile("/repository2", "dir1/file3")
|
||||||
|
|
||||||
|
// test restore corrupted files from different backups, inPlace = true
|
||||||
|
// with overwrite=true, corrupted file1 from unenc will be restored correctly from enc
|
||||||
|
// the latter will not touch the existing file3 with correct hash
|
||||||
|
os.RemoveAll(testDir+"/repository2")
|
||||||
|
failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||||
|
assertRestoreFailures(t, failedFiles, 1)
|
||||||
|
|
||||||
|
failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
checkAllUncorrupted("/repository2")
|
||||||
|
|
||||||
|
// restore to repository3, with overwrite and allowFailures (true/false), quickMode = false (use hashes)
|
||||||
|
// should always succeed as uncorrupted files already exist with correct hash, so these will be ignored
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy")
|
||||||
|
failedFiles = unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
checkAllUncorrupted("/repository3")
|
||||||
|
|
||||||
|
failedFiles = unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
|
||||||
|
assertRestoreFailures(t, failedFiles, 0)
|
||||||
|
checkAllUncorrupted("/repository3")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
@@ -41,7 +41,7 @@ func benchmarkSplit(reader *bytes.Reader, fileSize int64, chunkSize int, compres
|
|||||||
if encryption {
|
if encryption {
|
||||||
key = "0123456789abcdef0123456789abcdef"
|
key = "0123456789abcdef0123456789abcdef"
|
||||||
}
|
}
|
||||||
err := chunk.Encrypt([]byte(key), "")
|
err := chunk.Encrypt([]byte(key), "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("BENCHMARK_ENCRYPT", "Failed to encrypt the chunk: %v", err)
|
LOG_ERROR("BENCHMARK_ENCRYPT", "Failed to encrypt the chunk: %v", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,21 +5,25 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/zlib"
|
"compress/zlib"
|
||||||
"crypto/aes"
|
"crypto/aes"
|
||||||
|
"crypto/rsa"
|
||||||
"crypto/cipher"
|
"crypto/cipher"
|
||||||
"crypto/rand"
|
|
||||||
"crypto/hmac"
|
"crypto/hmac"
|
||||||
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/bkaradzic/go-lz4"
|
"github.com/bkaradzic/go-lz4"
|
||||||
|
"github.com/minio/highwayhash"
|
||||||
|
"github.com/klauspost/reedsolomon"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
|
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
|
||||||
@@ -60,10 +64,20 @@ type Chunk struct {
|
|||||||
|
|
||||||
config *Config // Every chunk is associated with a Config object. Which hashing algorithm to use is determined
|
config *Config // Every chunk is associated with a Config object. Which hashing algorithm to use is determined
|
||||||
// by the config
|
// by the config
|
||||||
|
|
||||||
|
isSnapshot bool // Indicates if the chunk is a snapshot chunk (instead of a file chunk). This is only used by RSA
|
||||||
|
// encryption, where a snapshot chunk is not encrypted by RSA
|
||||||
|
|
||||||
|
isBroken bool // Indicates the chunk did not download correctly. This is only used for -persist (allowFailures) mode
|
||||||
}
|
}
|
||||||
|
|
||||||
// Magic word to identify a duplicacy format encrypted file, plus a version number.
|
// Magic word to identify a duplicacy format encrypted file, plus a version number.
|
||||||
var ENCRYPTION_HEADER = "duplicacy\000"
|
var ENCRYPTION_BANNER = "duplicacy\000"
|
||||||
|
|
||||||
|
// RSA encrypted chunks start with "duplicacy\002"
|
||||||
|
var ENCRYPTION_VERSION_RSA byte = 2
|
||||||
|
|
||||||
|
var ERASURE_CODING_BANNER = "duplicacy\003"
|
||||||
|
|
||||||
// CreateChunk creates a new chunk.
|
// CreateChunk creates a new chunk.
|
||||||
func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
|
func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
|
||||||
@@ -113,6 +127,8 @@ func (chunk *Chunk) Reset(hashNeeded bool) {
|
|||||||
chunk.hash = nil
|
chunk.hash = nil
|
||||||
chunk.id = ""
|
chunk.id = ""
|
||||||
chunk.size = 0
|
chunk.size = 0
|
||||||
|
chunk.isSnapshot = false
|
||||||
|
chunk.isBroken = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write implements the Writer interface.
|
// Write implements the Writer interface.
|
||||||
@@ -170,7 +186,7 @@ func (chunk *Chunk) VerifyID() {
|
|||||||
|
|
||||||
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
|
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
|
||||||
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
|
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
|
||||||
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err error) {
|
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapshot bool) (err error) {
|
||||||
|
|
||||||
var aesBlock cipher.Block
|
var aesBlock cipher.Block
|
||||||
var gcm cipher.AEAD
|
var gcm cipher.AEAD
|
||||||
@@ -186,8 +202,17 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
if len(encryptionKey) > 0 {
|
if len(encryptionKey) > 0 {
|
||||||
|
|
||||||
key := encryptionKey
|
key := encryptionKey
|
||||||
|
usingRSA := false
|
||||||
if len(derivationKey) > 0 {
|
// Enable RSA encryption only when the chunk is not a snapshot chunk
|
||||||
|
if chunk.config.rsaPublicKey != nil && !isSnapshot && !chunk.isSnapshot {
|
||||||
|
randomKey := make([]byte, 32)
|
||||||
|
_, err := rand.Read(randomKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
key = randomKey
|
||||||
|
usingRSA = true
|
||||||
|
} else if len(derivationKey) > 0 {
|
||||||
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
|
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
|
||||||
hasher.Write(encryptionKey)
|
hasher.Write(encryptionKey)
|
||||||
key = hasher.Sum(nil)
|
key = hasher.Sum(nil)
|
||||||
@@ -204,7 +229,21 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Start with the magic number and the version number.
|
// Start with the magic number and the version number.
|
||||||
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER))
|
if usingRSA {
|
||||||
|
// RSA encryption starts "duplicacy\002"
|
||||||
|
encryptedBuffer.Write([]byte(ENCRYPTION_BANNER)[:len(ENCRYPTION_BANNER) - 1])
|
||||||
|
encryptedBuffer.Write([]byte{ENCRYPTION_VERSION_RSA})
|
||||||
|
|
||||||
|
// Then the encrypted key
|
||||||
|
encryptedKey, err := rsa.EncryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPublicKey, key, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
binary.Write(encryptedBuffer, binary.LittleEndian, uint16(len(encryptedKey)))
|
||||||
|
encryptedBuffer.Write(encryptedKey)
|
||||||
|
} else {
|
||||||
|
encryptedBuffer.Write([]byte(ENCRYPTION_BANNER))
|
||||||
|
}
|
||||||
|
|
||||||
// Followed by the nonce
|
// Followed by the nonce
|
||||||
nonce = make([]byte, gcm.NonceSize())
|
nonce = make([]byte, gcm.NonceSize())
|
||||||
@@ -214,10 +253,9 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
}
|
}
|
||||||
encryptedBuffer.Write(nonce)
|
encryptedBuffer.Write(nonce)
|
||||||
offset = encryptedBuffer.Len()
|
offset = encryptedBuffer.Len()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// offset is either 0 or the length of header + nonce
|
// offset is either 0 or the length of banner + nonce
|
||||||
|
|
||||||
if chunk.config.CompressionLevel >= -1 && chunk.config.CompressionLevel <= 9 {
|
if chunk.config.CompressionLevel >= -1 && chunk.config.CompressionLevel <= 9 {
|
||||||
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
|
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
|
||||||
@@ -242,35 +280,85 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
return fmt.Errorf("Invalid compression level: %d", chunk.config.CompressionLevel)
|
return fmt.Errorf("Invalid compression level: %d", chunk.config.CompressionLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(encryptionKey) == 0 {
|
if len(encryptionKey) > 0 {
|
||||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes
|
// PKCS7 is used. The sizes of compressed chunks leak information about the original chunks so we want the padding sizes
|
||||||
// to be the maximum allowed by PKCS7
|
// to be the maximum allowed by PKCS7
|
||||||
dataLength := encryptedBuffer.Len() - offset
|
dataLength := encryptedBuffer.Len() - offset
|
||||||
paddingLength := dataLength % 256
|
paddingLength := 256 - dataLength%256
|
||||||
if paddingLength == 0 {
|
|
||||||
paddingLength = 256
|
|
||||||
}
|
|
||||||
|
|
||||||
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
|
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
|
||||||
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
|
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
|
||||||
|
|
||||||
// The encrypted data will be appended to the duplicacy header and the once.
|
// The encrypted data will be appended to the duplicacy banner and the once.
|
||||||
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
|
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
|
||||||
encryptedBuffer.Bytes()[offset:offset+dataLength+paddingLength], nil)
|
encryptedBuffer.Bytes()[offset:offset+dataLength+paddingLength], nil)
|
||||||
|
|
||||||
encryptedBuffer.Truncate(len(encryptedBytes))
|
encryptedBuffer.Truncate(len(encryptedBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
if chunk.config.DataShards == 0 || chunk.config.ParityShards == 0 {
|
||||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start erasure coding
|
||||||
|
encoder, err := reedsolomon.New(chunk.config.DataShards, chunk.config.ParityShards)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
chunkSize := len(encryptedBuffer.Bytes())
|
||||||
|
shardSize := (chunkSize + chunk.config.DataShards - 1) / chunk.config.DataShards
|
||||||
|
// Append zeros to make the last shard to have the same size as other
|
||||||
|
encryptedBuffer.Write(make([]byte, shardSize * chunk.config.DataShards - chunkSize))
|
||||||
|
// Grow the buffer for parity shards
|
||||||
|
encryptedBuffer.Grow(shardSize * chunk.config.ParityShards)
|
||||||
|
// Now create one slice for each shard, reusing the data in the buffer
|
||||||
|
data := make([][]byte, chunk.config.DataShards + chunk.config.ParityShards)
|
||||||
|
for i := 0; i < chunk.config.DataShards + chunk.config.ParityShards; i++ {
|
||||||
|
data[i] = encryptedBuffer.Bytes()[i * shardSize: (i + 1) * shardSize]
|
||||||
|
}
|
||||||
|
// This populates the parity shard
|
||||||
|
encoder.Encode(data)
|
||||||
|
|
||||||
|
// Prepare the chunk to be uploaded
|
||||||
|
chunk.buffer.Reset()
|
||||||
|
// First the banner
|
||||||
|
chunk.buffer.Write([]byte(ERASURE_CODING_BANNER))
|
||||||
|
// Then the header which includes the chunk size, data/parity and a 2-byte checksum
|
||||||
|
header := make([]byte, 14)
|
||||||
|
binary.LittleEndian.PutUint64(header[0:], uint64(chunkSize))
|
||||||
|
binary.LittleEndian.PutUint16(header[8:], uint16(chunk.config.DataShards))
|
||||||
|
binary.LittleEndian.PutUint16(header[10:], uint16(chunk.config.ParityShards))
|
||||||
|
header[12] = header[0] ^ header[2] ^ header[4] ^ header[6] ^ header[8] ^ header[10]
|
||||||
|
header[13] = header[1] ^ header[3] ^ header[5] ^ header[7] ^ header[9] ^ header[11]
|
||||||
|
chunk.buffer.Write(header)
|
||||||
|
// Calculate the highway hash for each shard
|
||||||
|
hashKey := make([]byte, 32)
|
||||||
|
for _, part := range data {
|
||||||
|
hasher, err := highwayhash.New(hashKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = hasher.Write(part)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
chunk.buffer.Write(hasher.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the data
|
||||||
|
for _, part := range data {
|
||||||
|
chunk.buffer.Write(part)
|
||||||
|
}
|
||||||
|
// Append the header again for redundancy
|
||||||
|
chunk.buffer.Write(header)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is to ensure compability with Vertical Backup, which still uses HMAC-SHA256 (instead of HMAC-BLAKE2) to
|
// This is to ensure compatibility with Vertical Backup, which still uses HMAC-SHA256 (instead of HMAC-BLAKE2) to
|
||||||
// derive the key used to encrypt/decrypt files and chunks.
|
// derive the key used to encrypt/decrypt files and chunks.
|
||||||
|
|
||||||
var DecryptWithHMACSHA256 = false
|
var DecryptWithHMACSHA256 = false
|
||||||
@@ -294,6 +382,122 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||||
|
bannerLength := len(ENCRYPTION_BANNER)
|
||||||
|
|
||||||
|
if len(encryptedBuffer.Bytes()) > bannerLength && string(encryptedBuffer.Bytes()[:bannerLength]) == ERASURE_CODING_BANNER {
|
||||||
|
|
||||||
|
// The chunk was encoded with erasure coding
|
||||||
|
if len(encryptedBuffer.Bytes()) < bannerLength + 14 {
|
||||||
|
return fmt.Errorf("Erasure coding header truncated (%d bytes)", len(encryptedBuffer.Bytes()))
|
||||||
|
}
|
||||||
|
// Check the header checksum
|
||||||
|
header := encryptedBuffer.Bytes()[bannerLength: bannerLength + 14]
|
||||||
|
if header[12] != header[0] ^ header[2] ^ header[4] ^ header[6] ^ header[8] ^ header[10] ||
|
||||||
|
header[13] != header[1] ^ header[3] ^ header[5] ^ header[7] ^ header[9] ^ header[11] {
|
||||||
|
return fmt.Errorf("Erasure coding header corrupted (%x)", header)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the parameters
|
||||||
|
chunkSize := int(binary.LittleEndian.Uint64(header[0:8]))
|
||||||
|
dataShards := int(binary.LittleEndian.Uint16(header[8:10]))
|
||||||
|
parityShards := int(binary.LittleEndian.Uint16(header[10:12]))
|
||||||
|
shardSize := (chunkSize + chunk.config.DataShards - 1) / chunk.config.DataShards
|
||||||
|
// This is the length the chunk file should have
|
||||||
|
expectedLength := bannerLength + 2 * len(header) + (dataShards + parityShards) * (shardSize + 32)
|
||||||
|
// The minimum length that can be recovered from
|
||||||
|
minimumLength := bannerLength + len(header) + (dataShards + parityShards) * 32 + dataShards * shardSize
|
||||||
|
LOG_DEBUG("CHUNK_ERASURECODE", "Chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards)
|
||||||
|
if len(encryptedBuffer.Bytes()) > expectedLength {
|
||||||
|
LOG_WARN("CHUNK_ERASURECODE", "Chunk has %d bytes (instead of %d)", len(encryptedBuffer.Bytes()), expectedLength)
|
||||||
|
} else if len(encryptedBuffer.Bytes()) == expectedLength {
|
||||||
|
// Correct size; fall through
|
||||||
|
} else if len(encryptedBuffer.Bytes()) > minimumLength {
|
||||||
|
LOG_WARN("CHUNK_ERASURECODE", "Chunk is truncated (%d out of %d bytes)", len(encryptedBuffer.Bytes()), expectedLength)
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("Not enough chunk data for recovery; chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where the hashes start
|
||||||
|
hashOffset := bannerLength + len(header)
|
||||||
|
// Where the data start
|
||||||
|
dataOffset := hashOffset + (dataShards + parityShards) * 32
|
||||||
|
|
||||||
|
data := make([][]byte, dataShards + parityShards)
|
||||||
|
recoveryNeeded := false
|
||||||
|
hashKey := make([]byte, 32)
|
||||||
|
availableShards := 0
|
||||||
|
for i := 0; i < dataShards + parityShards; i++ {
|
||||||
|
start := dataOffset + i * shardSize
|
||||||
|
if start + shardSize > len(encryptedBuffer.Bytes()) {
|
||||||
|
// the current shard is incomplete
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Now verify the hash
|
||||||
|
hasher, err := highwayhash.New(hashKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = hasher.Write(encryptedBuffer.Bytes()[start: start + shardSize])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if bytes.Compare(hasher.Sum(nil), encryptedBuffer.Bytes()[hashOffset + i * 32: hashOffset + (i + 1) * 32]) != 0 {
|
||||||
|
if i < dataShards {
|
||||||
|
recoveryNeeded = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// The shard is good
|
||||||
|
data[i] = encryptedBuffer.Bytes()[start: start + shardSize]
|
||||||
|
availableShards++
|
||||||
|
if availableShards >= dataShards {
|
||||||
|
// We have enough shards to recover; skip the remaining shards
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !recoveryNeeded {
|
||||||
|
// Remove the padding zeros from the last shard
|
||||||
|
encryptedBuffer.Truncate(dataOffset + chunkSize)
|
||||||
|
// Skip the header and hashes
|
||||||
|
encryptedBuffer.Read(encryptedBuffer.Bytes()[:dataOffset])
|
||||||
|
} else {
|
||||||
|
if availableShards < dataShards {
|
||||||
|
return fmt.Errorf("Not enough chunk data for recover; only %d out of %d shards are complete", availableShards, dataShards + parityShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show the validity of shards using a string of * and -
|
||||||
|
slots := ""
|
||||||
|
for _, part := range data {
|
||||||
|
if len(part) != 0 {
|
||||||
|
slots += "*"
|
||||||
|
} else {
|
||||||
|
slots += "-"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_WARN("CHUNK_ERASURECODE", "Recovering a %d byte chunk from %d byte shards: %s", chunkSize, shardSize, slots)
|
||||||
|
encoder, err := reedsolomon.New(dataShards, parityShards)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = encoder.Reconstruct(data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
LOG_DEBUG("CHUNK_ERASURECODE", "Chunk data successfully recovered")
|
||||||
|
buffer := AllocateChunkBuffer()
|
||||||
|
buffer.Reset()
|
||||||
|
for i := 0; i < dataShards; i++ {
|
||||||
|
buffer.Write(data[i])
|
||||||
|
}
|
||||||
|
buffer.Truncate(chunkSize)
|
||||||
|
|
||||||
|
ReleaseChunkBuffer(encryptedBuffer)
|
||||||
|
encryptedBuffer = buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
if len(encryptionKey) > 0 {
|
if len(encryptionKey) > 0 {
|
||||||
|
|
||||||
@@ -311,6 +515,41 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
key = hasher.Sum(nil)
|
key = hasher.Sum(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(encryptedBuffer.Bytes()) < bannerLength + 12 {
|
||||||
|
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(encryptedBuffer.Bytes()[:bannerLength-1]) != ENCRYPTION_BANNER[:bannerLength-1] {
|
||||||
|
return fmt.Errorf("The storage doesn't seem to be encrypted")
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptionVersion := encryptedBuffer.Bytes()[bannerLength-1]
|
||||||
|
if encryptionVersion != 0 && encryptionVersion != ENCRYPTION_VERSION_RSA {
|
||||||
|
return fmt.Errorf("Unsupported encryption version %d", encryptionVersion)
|
||||||
|
}
|
||||||
|
|
||||||
|
if encryptionVersion == ENCRYPTION_VERSION_RSA {
|
||||||
|
if chunk.config.rsaPrivateKey == nil {
|
||||||
|
LOG_ERROR("CHUNK_DECRYPT", "An RSA private key is required to decrypt the chunk")
|
||||||
|
return fmt.Errorf("An RSA private key is required to decrypt the chunk")
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedKeyLength := binary.LittleEndian.Uint16(encryptedBuffer.Bytes()[bannerLength:bannerLength+2])
|
||||||
|
|
||||||
|
if len(encryptedBuffer.Bytes()) < bannerLength + 14 + int(encryptedKeyLength) {
|
||||||
|
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedKey := encryptedBuffer.Bytes()[bannerLength + 2:bannerLength + 2 + int(encryptedKeyLength)]
|
||||||
|
bannerLength += 2 + int(encryptedKeyLength)
|
||||||
|
|
||||||
|
decryptedKey, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPrivateKey, encryptedKey, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
key = decryptedKey
|
||||||
|
}
|
||||||
|
|
||||||
aesBlock, err := aes.NewCipher(key)
|
aesBlock, err := aes.NewCipher(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -321,22 +560,8 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
headerLength := len(ENCRYPTION_HEADER)
|
offset = bannerLength + gcm.NonceSize()
|
||||||
offset = headerLength + gcm.NonceSize()
|
nonce := encryptedBuffer.Bytes()[bannerLength:offset]
|
||||||
|
|
||||||
if len(encryptedBuffer.Bytes()) < offset {
|
|
||||||
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if string(encryptedBuffer.Bytes()[:headerLength-1]) != ENCRYPTION_HEADER[:headerLength-1] {
|
|
||||||
return fmt.Errorf("The storage doesn't seem to be encrypted")
|
|
||||||
}
|
|
||||||
|
|
||||||
if encryptedBuffer.Bytes()[headerLength-1] != 0 {
|
|
||||||
return fmt.Errorf("Unsupported encryption version %d", encryptedBuffer.Bytes()[headerLength-1])
|
|
||||||
}
|
|
||||||
|
|
||||||
nonce := encryptedBuffer.Bytes()[headerLength:offset]
|
|
||||||
|
|
||||||
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
|
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
|
||||||
encryptedBuffer.Bytes()[offset:], nil)
|
encryptedBuffer.Bytes()[offset:], nil)
|
||||||
@@ -345,7 +570,6 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
paddingLength := int(decryptedBytes[len(decryptedBytes)-1])
|
paddingLength := int(decryptedBytes[len(decryptedBytes)-1])
|
||||||
if paddingLength == 0 {
|
if paddingLength == 0 {
|
||||||
paddingLength = 256
|
paddingLength = 256
|
||||||
|
|||||||
@@ -7,11 +7,51 @@ package duplicacy
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
|
"crypto/rsa"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestChunk(t *testing.T) {
|
func TestErasureCoding(t *testing.T) {
|
||||||
|
key := []byte("duplicacydefault")
|
||||||
|
|
||||||
|
config := CreateConfig()
|
||||||
|
config.HashKey = key
|
||||||
|
config.IDKey = key
|
||||||
|
config.MinimumChunkSize = 100
|
||||||
|
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
||||||
|
config.DataShards = 5
|
||||||
|
config.ParityShards = 2
|
||||||
|
|
||||||
|
chunk := CreateChunk(config, true)
|
||||||
|
chunk.Reset(true)
|
||||||
|
data := make([]byte, 100)
|
||||||
|
for i := 0; i < len(data); i++ {
|
||||||
|
data[i] = byte(i)
|
||||||
|
}
|
||||||
|
chunk.Write(data)
|
||||||
|
err := chunk.Encrypt([]byte(""), "", false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to encrypt the test data: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedData := make([]byte, chunk.GetLength())
|
||||||
|
copy(encryptedData, chunk.GetBytes())
|
||||||
|
|
||||||
|
crypto_rand.Read(encryptedData[280:300])
|
||||||
|
|
||||||
|
chunk.Reset(false)
|
||||||
|
chunk.Write(encryptedData)
|
||||||
|
err = chunk.Decrypt([]byte(""), "")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to decrypt the data: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChunkBasic(t *testing.T) {
|
||||||
|
|
||||||
key := []byte("duplicacydefault")
|
key := []byte("duplicacydefault")
|
||||||
|
|
||||||
@@ -22,6 +62,20 @@ func TestChunk(t *testing.T) {
|
|||||||
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
||||||
maxSize := 1000000
|
maxSize := 1000000
|
||||||
|
|
||||||
|
if testRSAEncryption {
|
||||||
|
privateKey, err := rsa.GenerateKey(crypto_rand.Reader, 2048)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to generate a random private key: %v", err)
|
||||||
|
}
|
||||||
|
config.rsaPrivateKey = privateKey
|
||||||
|
config.rsaPublicKey = privateKey.Public().(*rsa.PublicKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
if testErasureCoding {
|
||||||
|
config.DataShards = 5
|
||||||
|
config.ParityShards = 2
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < 500; i++ {
|
for i := 0; i < 500; i++ {
|
||||||
|
|
||||||
size := rand.Int() % maxSize
|
size := rand.Int() % maxSize
|
||||||
@@ -35,7 +89,7 @@ func TestChunk(t *testing.T) {
|
|||||||
hash := chunk.GetHash()
|
hash := chunk.GetHash()
|
||||||
id := chunk.GetID()
|
id := chunk.GetID()
|
||||||
|
|
||||||
err := chunk.Encrypt(key, "")
|
err := chunk.Encrypt(key, "", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to encrypt the data: %v", err)
|
t.Errorf("Failed to encrypt the data: %v", err)
|
||||||
continue
|
continue
|
||||||
@@ -44,6 +98,16 @@ func TestChunk(t *testing.T) {
|
|||||||
encryptedData := make([]byte, chunk.GetLength())
|
encryptedData := make([]byte, chunk.GetLength())
|
||||||
copy(encryptedData, chunk.GetBytes())
|
copy(encryptedData, chunk.GetBytes())
|
||||||
|
|
||||||
|
if testErasureCoding {
|
||||||
|
offset := 24 + 32 * 7
|
||||||
|
start := rand.Int() % (len(encryptedData) - offset) + offset
|
||||||
|
length := (len(encryptedData) - offset) / 7
|
||||||
|
if start + length > len(encryptedData) {
|
||||||
|
length = len(encryptedData) - start
|
||||||
|
}
|
||||||
|
crypto_rand.Read(encryptedData[start: start+length])
|
||||||
|
}
|
||||||
|
|
||||||
chunk.Reset(false)
|
chunk.Reset(false)
|
||||||
chunk.Write(encryptedData)
|
chunk.Write(encryptedData)
|
||||||
err = chunk.Decrypt(key, "")
|
err = chunk.Decrypt(key, "")
|
||||||
@@ -63,7 +127,7 @@ func TestChunk(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if bytes.Compare(plainData, decryptedData) != 0 {
|
if bytes.Compare(plainData, decryptedData) != 0 {
|
||||||
t.Logf("orginal length: %d, decrypted length: %d", len(plainData), len(decryptedData))
|
t.Logf("Original length: %d, decrypted length: %d", len(plainData), len(decryptedData))
|
||||||
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
|
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ type ChunkDownloader struct {
|
|||||||
snapshotCache *FileStorage // Used as cache if not nil; usually for downloading snapshot chunks
|
snapshotCache *FileStorage // Used as cache if not nil; usually for downloading snapshot chunks
|
||||||
showStatistics bool // Show a stats log for each chunk if true
|
showStatistics bool // Show a stats log for each chunk if true
|
||||||
threads int // Number of threads
|
threads int // Number of threads
|
||||||
|
allowFailures bool // Whether to failfast on download error, or continue
|
||||||
|
|
||||||
taskList []ChunkDownloadTask // The list of chunks to be downloaded
|
taskList []ChunkDownloadTask // The list of chunks to be downloaded
|
||||||
completedTasks map[int]bool // Store downloaded chunks
|
completedTasks map[int]bool // Store downloaded chunks
|
||||||
@@ -51,15 +52,18 @@ type ChunkDownloader struct {
|
|||||||
numberOfDownloadedChunks int // The number of chunks that have been downloaded
|
numberOfDownloadedChunks int // The number of chunks that have been downloaded
|
||||||
numberOfDownloadingChunks int // The number of chunks still being downloaded
|
numberOfDownloadingChunks int // The number of chunks still being downloaded
|
||||||
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
|
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
|
||||||
|
|
||||||
|
NumberOfFailedChunks int // The number of chunks that can't be downloaded
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int) *ChunkDownloader {
|
func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int, allowFailures bool) *ChunkDownloader {
|
||||||
downloader := &ChunkDownloader{
|
downloader := &ChunkDownloader{
|
||||||
config: config,
|
config: config,
|
||||||
storage: storage,
|
storage: storage,
|
||||||
snapshotCache: snapshotCache,
|
snapshotCache: snapshotCache,
|
||||||
showStatistics: showStatistics,
|
showStatistics: showStatistics,
|
||||||
threads: threads,
|
threads: threads,
|
||||||
|
allowFailures: allowFailures,
|
||||||
|
|
||||||
taskList: nil,
|
taskList: nil,
|
||||||
completedTasks: make(map[int]bool),
|
completedTasks: make(map[int]bool),
|
||||||
@@ -126,6 +130,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files []*Entry)
|
|||||||
|
|
||||||
// AddChunk adds a single chunk the download list.
|
// AddChunk adds a single chunk the download list.
|
||||||
func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
|
func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
|
||||||
|
|
||||||
task := ChunkDownloadTask{
|
task := ChunkDownloadTask{
|
||||||
chunkIndex: len(downloader.taskList),
|
chunkIndex: len(downloader.taskList),
|
||||||
chunkHash: chunkHash,
|
chunkHash: chunkHash,
|
||||||
@@ -178,7 +183,7 @@ func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, _ := range downloader.completedTasks {
|
for i := range downloader.completedTasks {
|
||||||
if i < chunkIndex && downloader.taskList[i].chunk != nil {
|
if i < chunkIndex && downloader.taskList[i].chunk != nil {
|
||||||
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
||||||
downloader.taskList[i].chunk = nil
|
downloader.taskList[i].chunk = nil
|
||||||
@@ -197,6 +202,16 @@ func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
|
|||||||
downloader.lastChunkIndex = chunkIndex
|
downloader.lastChunkIndex = chunkIndex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Return the chunk last downloaded and its hash
|
||||||
|
func (downloader *ChunkDownloader) GetLastDownloadedChunk() (chunk *Chunk, chunkHash string) {
|
||||||
|
if downloader.lastChunkIndex >= len(downloader.taskList) {
|
||||||
|
return nil, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
task := downloader.taskList[downloader.lastChunkIndex]
|
||||||
|
return task.chunk, task.chunkHash
|
||||||
|
}
|
||||||
|
|
||||||
// WaitForChunk waits until the specified chunk is ready
|
// WaitForChunk waits until the specified chunk is ready
|
||||||
func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
|
func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
|
||||||
|
|
||||||
@@ -239,10 +254,57 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
|
|||||||
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
||||||
downloader.numberOfDownloadedChunks++
|
downloader.numberOfDownloadedChunks++
|
||||||
downloader.numberOfDownloadingChunks--
|
downloader.numberOfDownloadingChunks--
|
||||||
|
if completion.chunk.isBroken {
|
||||||
|
downloader.NumberOfFailedChunks++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return downloader.taskList[chunkIndex].chunk
|
return downloader.taskList[chunkIndex].chunk
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WaitForCompletion waits until all chunks have been downloaded
|
||||||
|
func (downloader *ChunkDownloader) WaitForCompletion() {
|
||||||
|
|
||||||
|
// Tasks in completedTasks have not been counted by numberOfActiveChunks
|
||||||
|
downloader.numberOfActiveChunks -= len(downloader.completedTasks)
|
||||||
|
|
||||||
|
// find the completed task with the largest index; we'll start from the next index
|
||||||
|
for index := range downloader.completedTasks {
|
||||||
|
if downloader.lastChunkIndex < index {
|
||||||
|
downloader.lastChunkIndex = index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Looping until there isn't a download task in progress
|
||||||
|
for downloader.numberOfActiveChunks > 0 || downloader.lastChunkIndex + 1 < len(downloader.taskList) {
|
||||||
|
|
||||||
|
// Wait for a completion event first
|
||||||
|
if downloader.numberOfActiveChunks > 0 {
|
||||||
|
completion := <-downloader.completionChannel
|
||||||
|
downloader.config.PutChunk(completion.chunk)
|
||||||
|
downloader.numberOfActiveChunks--
|
||||||
|
downloader.numberOfDownloadedChunks++
|
||||||
|
downloader.numberOfDownloadingChunks--
|
||||||
|
if completion.chunk.isBroken {
|
||||||
|
downloader.NumberOfFailedChunks++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pass the tasks one by one to the download queue
|
||||||
|
if downloader.lastChunkIndex + 1 < len(downloader.taskList) {
|
||||||
|
task := &downloader.taskList[downloader.lastChunkIndex + 1]
|
||||||
|
if task.isDownloading {
|
||||||
|
downloader.lastChunkIndex++
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
downloader.taskQueue <- *task
|
||||||
|
task.isDownloading = true
|
||||||
|
downloader.numberOfDownloadingChunks++
|
||||||
|
downloader.numberOfActiveChunks++
|
||||||
|
downloader.lastChunkIndex++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Stop terminates all downloading goroutines
|
// Stop terminates all downloading goroutines
|
||||||
func (downloader *ChunkDownloader) Stop() {
|
func (downloader *ChunkDownloader) Stop() {
|
||||||
for downloader.numberOfDownloadingChunks > 0 {
|
for downloader.numberOfDownloadingChunks > 0 {
|
||||||
@@ -251,9 +313,12 @@ func (downloader *ChunkDownloader) Stop() {
|
|||||||
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
||||||
downloader.numberOfDownloadedChunks++
|
downloader.numberOfDownloadedChunks++
|
||||||
downloader.numberOfDownloadingChunks--
|
downloader.numberOfDownloadingChunks--
|
||||||
|
if completion.chunk.isBroken {
|
||||||
|
downloader.NumberOfFailedChunks++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, _ := range downloader.completedTasks {
|
for i := range downloader.completedTasks {
|
||||||
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
||||||
downloader.taskList[i].chunk = nil
|
downloader.taskList[i].chunk = nil
|
||||||
downloader.numberOfActiveChunks--
|
downloader.numberOfActiveChunks--
|
||||||
@@ -305,13 +370,22 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
|||||||
// will be set up before the encryption
|
// will be set up before the encryption
|
||||||
chunk.Reset(false)
|
chunk.Reset(false)
|
||||||
|
|
||||||
|
// If failures are allowed, complete the task properly
|
||||||
|
completeFailedChunk := func(chunk *Chunk) {
|
||||||
|
if downloader.allowFailures {
|
||||||
|
chunk.isBroken = true
|
||||||
|
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
const MaxDownloadAttempts = 3
|
const MaxDownloadAttempts = 3
|
||||||
for downloadAttempt := 0; ; downloadAttempt++ {
|
for downloadAttempt := 0; ; downloadAttempt++ {
|
||||||
|
|
||||||
// Find the chunk by ID first.
|
// Find the chunk by ID first.
|
||||||
chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false)
|
chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
completeFailedChunk(chunk)
|
||||||
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -319,7 +393,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
|||||||
// No chunk is found. Have to find it in the fossil pool again.
|
// No chunk is found. Have to find it in the fossil pool again.
|
||||||
fossilPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, true)
|
fossilPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
completeFailedChunk(chunk)
|
||||||
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -341,11 +416,12 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
completeFailedChunk(chunk)
|
||||||
// A chunk is not found. This is a serious error and hopefully it will never happen.
|
// A chunk is not found. This is a serious error and hopefully it will never happen.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
|
||||||
} else {
|
} else {
|
||||||
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -354,7 +430,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
|||||||
// downloading again.
|
// downloading again.
|
||||||
err = downloader.storage.MoveFile(threadIndex, fossilPath, chunkPath)
|
err = downloader.storage.MoveFile(threadIndex, fossilPath, chunkPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_FATAL("DOWNLOAD_CHUNK", "Failed to resurrect chunk %s: %v", chunkID, err)
|
completeFailedChunk(chunk)
|
||||||
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to resurrect chunk %s: %v", chunkID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -371,7 +448,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
|||||||
chunk.Reset(false)
|
chunk.Reset(false)
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to download the chunk %s: %v", chunkID, err)
|
completeFailedChunk(chunk)
|
||||||
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to download the chunk %s: %v", chunkID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -383,7 +461,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
|||||||
chunk.Reset(false)
|
chunk.Reset(false)
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
LOG_ERROR("DOWNLOAD_DECRYPT", "Failed to decrypt the chunk %s: %v", chunkID, err)
|
completeFailedChunk(chunk)
|
||||||
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_DECRYPT", "Failed to decrypt the chunk %s: %v", chunkID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -395,7 +474,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
|||||||
chunk.Reset(false)
|
chunk.Reset(false)
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
LOG_FATAL("DOWNLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
|
completeFailedChunk(chunk)
|
||||||
|
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ const (
|
|||||||
ChunkOperationResurrect = 3
|
ChunkOperationResurrect = 3
|
||||||
)
|
)
|
||||||
|
|
||||||
// ChunkOperatorTask is used to pass paramaters for different kinds of chunk operations.
|
// ChunkOperatorTask is used to pass parameters for different kinds of chunk operations.
|
||||||
type ChunkOperatorTask struct {
|
type ChunkOperatorTask struct {
|
||||||
operation int // The type of operation
|
operation int // The type of operation
|
||||||
chunkID string // The chunk id
|
chunkID string // The chunk id
|
||||||
|
|||||||
@@ -128,7 +128,7 @@ func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) boo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt the chunk only after we know that it must be uploaded.
|
// Encrypt the chunk only after we know that it must be uploaded.
|
||||||
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash())
|
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash(), uploader.snapshotCache != nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("UPLOAD_CHUNK", "Failed to encrypt the chunk %s: %v", chunkID, err)
|
LOG_ERROR("UPLOAD_CHUNK", "Failed to encrypt the chunk %s: %v", chunkID, err)
|
||||||
return false
|
return false
|
||||||
|
|||||||
@@ -101,7 +101,7 @@ func TestUploaderAndDownloader(t *testing.T) {
|
|||||||
|
|
||||||
chunkUploader.Stop()
|
chunkUploader.Stop()
|
||||||
|
|
||||||
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
|
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads, false)
|
||||||
chunkDownloader.totalChunkSize = int64(totalFileSize)
|
chunkDownloader.totalChunkSize = int64(totalFileSize)
|
||||||
|
|
||||||
for _, chunk := range chunks {
|
for _, chunk := range chunks {
|
||||||
|
|||||||
@@ -9,15 +9,21 @@ import (
|
|||||||
"crypto/hmac"
|
"crypto/hmac"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
"crypto/rsa"
|
||||||
|
"crypto/x509"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"encoding/pem"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"os"
|
"os"
|
||||||
|
"strings"
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
"io/ioutil"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
blake2 "github.com/minio/blake2b-simd"
|
blake2 "github.com/minio/blake2b-simd"
|
||||||
)
|
)
|
||||||
@@ -29,8 +35,8 @@ var DEFAULT_KEY = []byte("duplicacy")
|
|||||||
// standard zlib levels of -1 to 9.
|
// standard zlib levels of -1 to 9.
|
||||||
var DEFAULT_COMPRESSION_LEVEL = 100
|
var DEFAULT_COMPRESSION_LEVEL = 100
|
||||||
|
|
||||||
// The new header of the config file (to differentiate from the old format where the salt and iterations are fixed)
|
// The new banner of the config file (to differentiate from the old format where the salt and iterations are fixed)
|
||||||
var CONFIG_HEADER = "duplicacy\001"
|
var CONFIG_BANNER = "duplicacy\001"
|
||||||
|
|
||||||
// The length of the salt used in the new format
|
// The length of the salt used in the new format
|
||||||
var CONFIG_SALT_LENGTH = 32
|
var CONFIG_SALT_LENGTH = 32
|
||||||
@@ -65,6 +71,14 @@ type Config struct {
|
|||||||
// for encrypting a non-chunk file
|
// for encrypting a non-chunk file
|
||||||
FileKey []byte `json:"-"`
|
FileKey []byte `json:"-"`
|
||||||
|
|
||||||
|
// for erasure coding
|
||||||
|
DataShards int `json:'data-shards'`
|
||||||
|
ParityShards int `json:'parity-shards'`
|
||||||
|
|
||||||
|
// for RSA encryption
|
||||||
|
rsaPrivateKey *rsa.PrivateKey
|
||||||
|
rsaPublicKey *rsa.PublicKey
|
||||||
|
|
||||||
chunkPool chan *Chunk
|
chunkPool chan *Chunk
|
||||||
numberOfChunks int32
|
numberOfChunks int32
|
||||||
dryRun bool
|
dryRun bool
|
||||||
@@ -80,10 +94,15 @@ type jsonableConfig struct {
|
|||||||
IDKey string `json:"id-key"`
|
IDKey string `json:"id-key"`
|
||||||
ChunkKey string `json:"chunk-key"`
|
ChunkKey string `json:"chunk-key"`
|
||||||
FileKey string `json:"file-key"`
|
FileKey string `json:"file-key"`
|
||||||
|
RSAPublicKey string `json:"rsa-public-key"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) MarshalJSON() ([]byte, error) {
|
func (config *Config) MarshalJSON() ([]byte, error) {
|
||||||
|
|
||||||
|
publicKey := []byte {}
|
||||||
|
if config.rsaPublicKey != nil {
|
||||||
|
publicKey, _ = x509.MarshalPKIXPublicKey(config.rsaPublicKey)
|
||||||
|
}
|
||||||
return json.Marshal(&jsonableConfig{
|
return json.Marshal(&jsonableConfig{
|
||||||
aliasedConfig: (*aliasedConfig)(config),
|
aliasedConfig: (*aliasedConfig)(config),
|
||||||
ChunkSeed: hex.EncodeToString(config.ChunkSeed),
|
ChunkSeed: hex.EncodeToString(config.ChunkSeed),
|
||||||
@@ -91,6 +110,7 @@ func (config *Config) MarshalJSON() ([]byte, error) {
|
|||||||
IDKey: hex.EncodeToString(config.IDKey),
|
IDKey: hex.EncodeToString(config.IDKey),
|
||||||
ChunkKey: hex.EncodeToString(config.ChunkKey),
|
ChunkKey: hex.EncodeToString(config.ChunkKey),
|
||||||
FileKey: hex.EncodeToString(config.FileKey),
|
FileKey: hex.EncodeToString(config.FileKey),
|
||||||
|
RSAPublicKey: hex.EncodeToString(publicKey),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -120,6 +140,19 @@ func (config *Config) UnmarshalJSON(description []byte) (err error) {
|
|||||||
return fmt.Errorf("Invalid representation of the file key in the config")
|
return fmt.Errorf("Invalid representation of the file key in the config")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if publicKey, err := hex.DecodeString(aliased.RSAPublicKey); err != nil {
|
||||||
|
return fmt.Errorf("Invalid hex encoding of the RSA public key in the config")
|
||||||
|
} else if len(publicKey) > 0 {
|
||||||
|
parsedKey, err := x509.ParsePKIXPublicKey(publicKey)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Invalid RSA public key in the config: %v", err)
|
||||||
|
}
|
||||||
|
config.rsaPublicKey = parsedKey.(*rsa.PublicKey)
|
||||||
|
if config.rsaPublicKey == nil {
|
||||||
|
return fmt.Errorf("Unsupported public key type %s in the config", reflect.TypeOf(parsedKey))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -140,6 +173,33 @@ func (config *Config) Print() {
|
|||||||
LOG_INFO("CONFIG_INFO", "Maximum chunk size: %d", config.MaximumChunkSize)
|
LOG_INFO("CONFIG_INFO", "Maximum chunk size: %d", config.MaximumChunkSize)
|
||||||
LOG_INFO("CONFIG_INFO", "Minimum chunk size: %d", config.MinimumChunkSize)
|
LOG_INFO("CONFIG_INFO", "Minimum chunk size: %d", config.MinimumChunkSize)
|
||||||
LOG_INFO("CONFIG_INFO", "Chunk seed: %x", config.ChunkSeed)
|
LOG_INFO("CONFIG_INFO", "Chunk seed: %x", config.ChunkSeed)
|
||||||
|
|
||||||
|
LOG_TRACE("CONFIG_INFO", "Hash key: %x", config.HashKey)
|
||||||
|
LOG_TRACE("CONFIG_INFO", "ID key: %x", config.IDKey)
|
||||||
|
|
||||||
|
if len(config.ChunkKey) > 0 {
|
||||||
|
LOG_TRACE("CONFIG_INFO", "File chunks are encrypted")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(config.FileKey) > 0 {
|
||||||
|
LOG_TRACE("CONFIG_INFO", "Metadata chunks are encrypted")
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.DataShards != 0 && config.ParityShards != 0 {
|
||||||
|
LOG_TRACE("CONFIG_INFO", "Data shards: %d, parity shards: %d", config.DataShards, config.ParityShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.rsaPublicKey != nil {
|
||||||
|
pkisPublicKey, _ := x509.MarshalPKIXPublicKey(config.rsaPublicKey)
|
||||||
|
|
||||||
|
publicKey := pem.EncodeToMemory(&pem.Block{
|
||||||
|
Type: "PUBLIC KEY",
|
||||||
|
Bytes: pkisPublicKey,
|
||||||
|
})
|
||||||
|
|
||||||
|
LOG_TRACE("CONFIG_INFO", "RSA public key: %s", publicKey)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
|
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
|
||||||
@@ -335,11 +395,11 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
|||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(configFile.GetBytes()) < len(ENCRYPTION_HEADER) {
|
if len(configFile.GetBytes()) < len(ENCRYPTION_BANNER) {
|
||||||
return nil, false, fmt.Errorf("The storage has an invalid config file")
|
return nil, false, fmt.Errorf("The storage has an invalid config file")
|
||||||
}
|
}
|
||||||
|
|
||||||
if string(configFile.GetBytes()[:len(ENCRYPTION_HEADER)-1]) == ENCRYPTION_HEADER[:len(ENCRYPTION_HEADER)-1] && len(password) == 0 {
|
if string(configFile.GetBytes()[:len(ENCRYPTION_BANNER)-1]) == ENCRYPTION_BANNER[:len(ENCRYPTION_BANNER)-1] && len(password) == 0 {
|
||||||
return nil, true, fmt.Errorf("The storage is likely to have been initialized with a password before")
|
return nil, true, fmt.Errorf("The storage is likely to have been initialized with a password before")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -347,23 +407,23 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
|||||||
|
|
||||||
if len(password) > 0 {
|
if len(password) > 0 {
|
||||||
|
|
||||||
if string(configFile.GetBytes()[:len(ENCRYPTION_HEADER)]) == ENCRYPTION_HEADER {
|
if string(configFile.GetBytes()[:len(ENCRYPTION_BANNER)]) == ENCRYPTION_BANNER {
|
||||||
// This is the old config format with a static salt and a fixed number of iterations
|
// This is the old config format with a static salt and a fixed number of iterations
|
||||||
masterKey = GenerateKeyFromPassword(password, DEFAULT_KEY, CONFIG_DEFAULT_ITERATIONS)
|
masterKey = GenerateKeyFromPassword(password, DEFAULT_KEY, CONFIG_DEFAULT_ITERATIONS)
|
||||||
LOG_TRACE("CONFIG_FORMAT", "Using a static salt and %d iterations for key derivation", CONFIG_DEFAULT_ITERATIONS)
|
LOG_TRACE("CONFIG_FORMAT", "Using a static salt and %d iterations for key derivation", CONFIG_DEFAULT_ITERATIONS)
|
||||||
} else if string(configFile.GetBytes()[:len(CONFIG_HEADER)]) == CONFIG_HEADER {
|
} else if string(configFile.GetBytes()[:len(CONFIG_BANNER)]) == CONFIG_BANNER {
|
||||||
// This is the new config format with a random salt and a configurable number of iterations
|
// This is the new config format with a random salt and a configurable number of iterations
|
||||||
encryptedLength := len(configFile.GetBytes()) - CONFIG_SALT_LENGTH - 4
|
encryptedLength := len(configFile.GetBytes()) - CONFIG_SALT_LENGTH - 4
|
||||||
|
|
||||||
// Extract the salt and the number of iterations
|
// Extract the salt and the number of iterations
|
||||||
saltStart := configFile.GetBytes()[len(CONFIG_HEADER):]
|
saltStart := configFile.GetBytes()[len(CONFIG_BANNER):]
|
||||||
iterations := binary.LittleEndian.Uint32(saltStart[CONFIG_SALT_LENGTH : CONFIG_SALT_LENGTH+4])
|
iterations := binary.LittleEndian.Uint32(saltStart[CONFIG_SALT_LENGTH : CONFIG_SALT_LENGTH+4])
|
||||||
LOG_TRACE("CONFIG_ITERATIONS", "Using %d iterations for key derivation", iterations)
|
LOG_TRACE("CONFIG_ITERATIONS", "Using %d iterations for key derivation", iterations)
|
||||||
masterKey = GenerateKeyFromPassword(password, saltStart[:CONFIG_SALT_LENGTH], int(iterations))
|
masterKey = GenerateKeyFromPassword(password, saltStart[:CONFIG_SALT_LENGTH], int(iterations))
|
||||||
|
|
||||||
// Copy to a temporary buffer to replace the header and remove the salt and the number of riterations
|
// Copy to a temporary buffer to replace the banner and remove the salt and the number of riterations
|
||||||
var encrypted bytes.Buffer
|
var encrypted bytes.Buffer
|
||||||
encrypted.Write([]byte(ENCRYPTION_HEADER))
|
encrypted.Write([]byte(ENCRYPTION_BANNER))
|
||||||
encrypted.Write(saltStart[CONFIG_SALT_LENGTH+4:])
|
encrypted.Write(saltStart[CONFIG_SALT_LENGTH+4:])
|
||||||
|
|
||||||
configFile.Reset(false)
|
configFile.Reset(false)
|
||||||
@@ -372,7 +432,7 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
|||||||
LOG_ERROR("CONFIG_DOWNLOAD", "Encrypted config has %d bytes instead of expected %d bytes", len(configFile.GetBytes()), encryptedLength)
|
LOG_ERROR("CONFIG_DOWNLOAD", "Encrypted config has %d bytes instead of expected %d bytes", len(configFile.GetBytes()), encryptedLength)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return nil, true, fmt.Errorf("The config file has an invalid header")
|
return nil, true, fmt.Errorf("The config file has an invalid banner")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decrypt the config file. masterKey == nil means no encryption.
|
// Decrypt the config file. masterKey == nil means no encryption.
|
||||||
@@ -430,21 +490,21 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i
|
|||||||
|
|
||||||
if len(password) > 0 {
|
if len(password) > 0 {
|
||||||
// Encrypt the config file with masterKey. If masterKey is nil then no encryption is performed.
|
// Encrypt the config file with masterKey. If masterKey is nil then no encryption is performed.
|
||||||
err = chunk.Encrypt(masterKey, "")
|
err = chunk.Encrypt(masterKey, "", true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("CONFIG_CREATE", "Failed to create the config file: %v", err)
|
LOG_ERROR("CONFIG_CREATE", "Failed to create the config file: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// The new encrypted format for config is CONFIG_HEADER + salt + #iterations + encrypted content
|
// The new encrypted format for config is CONFIG_BANNER + salt + #iterations + encrypted content
|
||||||
encryptedLength := len(chunk.GetBytes()) + CONFIG_SALT_LENGTH + 4
|
encryptedLength := len(chunk.GetBytes()) + CONFIG_SALT_LENGTH + 4
|
||||||
|
|
||||||
// Copy to a temporary buffer to replace the header and add the salt and the number of iterations
|
// Copy to a temporary buffer to replace the banner and add the salt and the number of iterations
|
||||||
var encrypted bytes.Buffer
|
var encrypted bytes.Buffer
|
||||||
encrypted.Write([]byte(CONFIG_HEADER))
|
encrypted.Write([]byte(CONFIG_BANNER))
|
||||||
encrypted.Write(salt)
|
encrypted.Write(salt)
|
||||||
binary.Write(&encrypted, binary.LittleEndian, uint32(iterations))
|
binary.Write(&encrypted, binary.LittleEndian, uint32(iterations))
|
||||||
encrypted.Write(chunk.GetBytes()[len(ENCRYPTION_HEADER):])
|
encrypted.Write(chunk.GetBytes()[len(ENCRYPTION_BANNER):])
|
||||||
|
|
||||||
chunk.Reset(false)
|
chunk.Reset(false)
|
||||||
chunk.Write(encrypted.Bytes())
|
chunk.Write(encrypted.Bytes())
|
||||||
@@ -477,7 +537,7 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i
|
|||||||
// it simply creates a file named 'config' that stores various parameters as well as a set of keys if encryption
|
// it simply creates a file named 'config' that stores various parameters as well as a set of keys if encryption
|
||||||
// is enabled.
|
// is enabled.
|
||||||
func ConfigStorage(storage Storage, iterations int, compressionLevel int, averageChunkSize int, maximumChunkSize int,
|
func ConfigStorage(storage Storage, iterations int, compressionLevel int, averageChunkSize int, maximumChunkSize int,
|
||||||
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool) bool {
|
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool, keyFile string, dataShards int, parityShards int) bool {
|
||||||
|
|
||||||
exist, _, _, err := storage.GetFileInfo(0, "config")
|
exist, _, _, err := storage.GetFileInfo(0, "config")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -496,5 +556,129 @@ func ConfigStorage(storage Storage, iterations int, compressionLevel int, averag
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if keyFile != "" {
|
||||||
|
config.loadRSAPublicKey(keyFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
config.DataShards = dataShards
|
||||||
|
config.ParityShards = parityShards
|
||||||
|
|
||||||
return UploadConfig(storage, config, password, iterations)
|
return UploadConfig(storage, config, password, iterations)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (config *Config) loadRSAPublicKey(keyFile string) {
|
||||||
|
encodedKey := []byte(keyFile)
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// keyFile may be the actually key, in which case we don't need to read from a file
|
||||||
|
if !strings.Contains(keyFile, "-----BEGIN") {
|
||||||
|
encodedKey, err = ioutil.ReadFile(keyFile)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("BACKUP_KEY", "Failed to read the public key file: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
decodedKey, _ := pem.Decode(encodedKey)
|
||||||
|
if decodedKey == nil {
|
||||||
|
LOG_ERROR("RSA_PUBLIC", "unrecognized public key in %s", keyFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if decodedKey.Type != "PUBLIC KEY" {
|
||||||
|
LOG_ERROR("RSA_PUBLIC", "Unsupported public key type %s in %s", decodedKey.Type, keyFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
parsedKey, err := x509.ParsePKIXPublicKey(decodedKey.Bytes)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RSA_PUBLIC", "Failed to parse the public key in %s: %v", keyFile, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := parsedKey.(*rsa.PublicKey)
|
||||||
|
if !ok {
|
||||||
|
LOG_ERROR("RSA_PUBLIC", "Unsupported public key type %s in %s", reflect.TypeOf(parsedKey), keyFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
config.rsaPublicKey = key
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadRSAPrivateKey loads the specifed private key file for decrypting file chunks
|
||||||
|
func (config *Config) loadRSAPrivateKey(keyFile string, passphrase string) {
|
||||||
|
|
||||||
|
if config.rsaPublicKey == nil {
|
||||||
|
LOG_ERROR("RSA_PUBLIC", "The storage was not encrypted by an RSA key")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
encodedKey := []byte(keyFile)
|
||||||
|
var err error
|
||||||
|
|
||||||
|
// keyFile may be the actually key, in which case we don't need to read from a file
|
||||||
|
if !strings.Contains(keyFile, "-----BEGIN") {
|
||||||
|
encodedKey, err = ioutil.ReadFile(keyFile)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Failed to read the private key file: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
decodedKey, _ := pem.Decode(encodedKey)
|
||||||
|
if decodedKey == nil {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "unrecognized private key in %s", keyFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if decodedKey.Type != "RSA PRIVATE KEY" {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Unsupported private key type %s in %s", decodedKey.Type, keyFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var decodedKeyBytes []byte
|
||||||
|
if passphrase != "" {
|
||||||
|
decodedKeyBytes, err = x509.DecryptPEMBlock(decodedKey, []byte(passphrase))
|
||||||
|
} else {
|
||||||
|
decodedKeyBytes = decodedKey.Bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
var parsedKey interface{}
|
||||||
|
if parsedKey, err = x509.ParsePKCS1PrivateKey(decodedKeyBytes); err != nil {
|
||||||
|
if parsedKey, err = x509.ParsePKCS8PrivateKey(decodedKeyBytes); err != nil {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Failed to parse the private key in %s: %v", keyFile, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := parsedKey.(*rsa.PrivateKey)
|
||||||
|
if !ok {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Unsupported private key type %s in %s", reflect.TypeOf(parsedKey), keyFile)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
data := make([]byte, 32)
|
||||||
|
_, err = rand.Read(data)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Failed to generate random data for testing the private key: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now test if the private key matches the public key
|
||||||
|
encryptedData, err := rsa.EncryptOAEP(sha256.New(), rand.Reader, config.rsaPublicKey, data, nil)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Failed to encrypt random data with the public key: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
decryptedData, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, key, encryptedData, nil)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Incorrect private key: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(data, decryptedData) {
|
||||||
|
LOG_ERROR("RSA_PRIVATE", "Decrypted data do not match the original data")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
config.rsaPrivateKey = key
|
||||||
|
}
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ package duplicacy
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/gilbertchen/go-dropbox"
|
"github.com/gilbertchen/go-dropbox"
|
||||||
@@ -199,6 +200,7 @@ func (storage *DropboxStorage) DownloadFile(threadIndex int, filePath string, ch
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer output.Body.Close()
|
defer output.Body.Close()
|
||||||
|
defer ioutil.ReadAll(output.Body)
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.clients))
|
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.clients))
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -513,7 +513,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
|
|||||||
}
|
}
|
||||||
if entry.IsLink() {
|
if entry.IsLink() {
|
||||||
isRegular := false
|
isRegular := false
|
||||||
isRegular, entry.Link, err = Readlink(filepath.Join(top, entry.Path))
|
isRegular, entry.Link, err = Readlink(joinPath(top, entry.Path))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("LIST_LINK", "Failed to read the symlink %s: %v", entry.Path, err)
|
LOG_WARN("LIST_LINK", "Failed to read the symlink %s: %v", entry.Path, err)
|
||||||
skippedFiles = append(skippedFiles, entry.Path)
|
skippedFiles = append(skippedFiles, entry.Path)
|
||||||
@@ -523,7 +523,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
|
|||||||
if isRegular {
|
if isRegular {
|
||||||
entry.Mode ^= uint32(os.ModeSymlink)
|
entry.Mode ^= uint32(os.ModeSymlink)
|
||||||
} else if path == "" && (filepath.IsAbs(entry.Link) || filepath.HasPrefix(entry.Link, `\\`)) && !strings.HasPrefix(entry.Link, normalizedTop) {
|
} else if path == "" && (filepath.IsAbs(entry.Link) || filepath.HasPrefix(entry.Link, `\\`)) && !strings.HasPrefix(entry.Link, normalizedTop) {
|
||||||
stat, err := os.Stat(filepath.Join(top, entry.Path))
|
stat, err := os.Stat(joinPath(top, entry.Path))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err)
|
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err)
|
||||||
skippedFiles = append(skippedFiles, entry.Path)
|
skippedFiles = append(skippedFiles, entry.Path)
|
||||||
@@ -536,6 +536,9 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
|
|||||||
// path from f.Name(); note that a "/" is append assuming a symbolic link is always a directory
|
// path from f.Name(); note that a "/" is append assuming a symbolic link is always a directory
|
||||||
newEntry.Path = filepath.Join(normalizedPath, f.Name()) + "/"
|
newEntry.Path = filepath.Join(normalizedPath, f.Name()) + "/"
|
||||||
}
|
}
|
||||||
|
if len(patterns) > 0 && !MatchPath(newEntry.Path, patterns) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
entry = newEntry
|
entry = newEntry
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
618
src/duplicacy_filefabricstorage.go
Normal file
618
src/duplicacy_filefabricstorage.go
Normal file
@@ -0,0 +1,618 @@
|
|||||||
|
// Copyright (c) Storage Made Easy. All rights reserved.
|
||||||
|
//
|
||||||
|
// This storage backend is contributed by Storage Made Easy (https://storagemadeeasy.com/) to be used in
|
||||||
|
// Duplicacy and its derivative works.
|
||||||
|
//
|
||||||
|
|
||||||
|
package duplicacy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
"sync"
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"net/url"
|
||||||
|
"net/http"
|
||||||
|
"math/rand"
|
||||||
|
"io/ioutil"
|
||||||
|
"encoding/xml"
|
||||||
|
"path/filepath"
|
||||||
|
"mime/multipart"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The XML element representing a file returned by the File Fabric server
|
||||||
|
type FileFabricFile struct {
|
||||||
|
XMLName xml.Name
|
||||||
|
ID string `xml:"fi_id"`
|
||||||
|
Path string `xml:"path"`
|
||||||
|
Size int64 `xml:"fi_size"`
|
||||||
|
Type int `xml:"fi_type"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// The XML element representing a file list returned by the server
|
||||||
|
type FileFabricFileList struct {
|
||||||
|
XMLName xml.Name `xml:"files"`
|
||||||
|
Files []FileFabricFile `xml:",any"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FileFabricStorage struct {
|
||||||
|
StorageBase
|
||||||
|
|
||||||
|
endpoint string // the server
|
||||||
|
authToken string // the authentication token
|
||||||
|
accessToken string // the access token (as returned by getTokenByAuthToken)
|
||||||
|
storageDir string // the path of the storage directory
|
||||||
|
storageDirID string // the id of 'storageDir'
|
||||||
|
|
||||||
|
client *http.Client // the default http client
|
||||||
|
threads int // number of threads
|
||||||
|
maxRetries int // maximum number of tries
|
||||||
|
directoryCache map[string]string // stores ids for directories known to this backend
|
||||||
|
directoryCacheLock sync.Mutex // lock for accessing directoryCache
|
||||||
|
|
||||||
|
isAuthorized bool
|
||||||
|
testMode bool
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errFileFabricAuthorizationFailure = errors.New("Authentication failure")
|
||||||
|
errFileFabricDirectoryExists = errors.New("Directory exists")
|
||||||
|
)
|
||||||
|
|
||||||
|
// The general server response
|
||||||
|
type FileFabricResponse struct {
|
||||||
|
Status string `xml:"status"`
|
||||||
|
Message string `xml:"statusmessage"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the server response and return an error representing the error message it contains
|
||||||
|
func checkFileFabricResponse(response FileFabricResponse, actionFormat string, actionArguments ...interface{}) error {
|
||||||
|
|
||||||
|
action := fmt.Sprintf(actionFormat, actionArguments...)
|
||||||
|
if response.Status == "ok" && response.Message == "Success" {
|
||||||
|
return nil
|
||||||
|
} else if response.Status == "error_data" {
|
||||||
|
if response.Message == "Folder with same name already exists." {
|
||||||
|
return errFileFabricDirectoryExists
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("Failed to %s (status: %s, message: %s)", action, response.Status, response.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a File Fabric storage backend
|
||||||
|
func CreateFileFabricStorage(endpoint string, token string, storageDir string, threads int) (storage *FileFabricStorage, err error) {
|
||||||
|
|
||||||
|
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||||
|
storageDir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &FileFabricStorage{
|
||||||
|
|
||||||
|
endpoint: endpoint,
|
||||||
|
authToken: token,
|
||||||
|
client: http.DefaultClient,
|
||||||
|
threads: threads,
|
||||||
|
directoryCache: make(map[string]string),
|
||||||
|
maxRetries: 12,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.getAccessToken()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
storageDirID, isDir, _, err := storage.getFileInfo(0, storageDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if storageDirID == "" {
|
||||||
|
return nil, fmt.Errorf("Storage path %s does not exist", storageDir)
|
||||||
|
}
|
||||||
|
if !isDir {
|
||||||
|
return nil, fmt.Errorf("Storage path %s is not a directory", storageDir)
|
||||||
|
}
|
||||||
|
storage.storageDir = storageDir
|
||||||
|
storage.storageDirID = storageDirID
|
||||||
|
|
||||||
|
for _, dir := range []string{"snapshots", "chunks"} {
|
||||||
|
storage.CreateDirectory(0, dir)
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.DerivedStorage = storage
|
||||||
|
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||||
|
return storage, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve the access token using an auth token
|
||||||
|
func (storage *FileFabricStorage) getAccessToken() (error) {
|
||||||
|
|
||||||
|
formData := url.Values { "authtoken": {storage.authToken},}
|
||||||
|
readCloser, _, _, err := storage.sendRequest(0, http.MethodPost, storage.getAPIURL("getTokenByAuthToken"), nil, formData)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
var output struct {
|
||||||
|
FileFabricResponse
|
||||||
|
Token string `xml:"token"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkFileFabricResponse(output.FileFabricResponse, "request the access token")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.accessToken = output.Token
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine if we should retry based on the number of retries given by 'retry' and if so calculate the delay with exponential backoff
|
||||||
|
func (storage *FileFabricStorage) shouldRetry(retry int, messageFormat string, messageArguments ...interface{}) bool {
|
||||||
|
message := fmt.Sprintf(messageFormat, messageArguments...)
|
||||||
|
|
||||||
|
if retry >= storage.maxRetries {
|
||||||
|
LOG_WARN("FILEFABRIC_REQUEST", "%s", message)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
backoff := 1 << uint(retry)
|
||||||
|
if backoff > 60 {
|
||||||
|
backoff = 60
|
||||||
|
}
|
||||||
|
delay := rand.Intn(backoff*500) + backoff*500
|
||||||
|
LOG_INFO("FILEFABRIC_RETRY", "%s; retrying after %.1f seconds", message, float32(delay) / 1000.0)
|
||||||
|
time.Sleep(time.Duration(delay) * time.Millisecond)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send a request to the server
|
||||||
|
func (storage *FileFabricStorage) sendRequest(threadIndex int, method string, requestURL string, requestHeaders map[string]string, input interface{}) ( io.ReadCloser, http.Header, int64, error) {
|
||||||
|
|
||||||
|
var response *http.Response
|
||||||
|
|
||||||
|
for retries := 0; ; retries++ {
|
||||||
|
var inputReader io.Reader
|
||||||
|
|
||||||
|
switch input.(type) {
|
||||||
|
case url.Values:
|
||||||
|
values := input.(url.Values)
|
||||||
|
inputReader = strings.NewReader(values.Encode())
|
||||||
|
if requestHeaders == nil {
|
||||||
|
requestHeaders = make(map[string]string)
|
||||||
|
}
|
||||||
|
requestHeaders["Content-Type"] = "application/x-www-form-urlencoded"
|
||||||
|
case *RateLimitedReader:
|
||||||
|
rateLimitedReader := input.(*RateLimitedReader)
|
||||||
|
rateLimitedReader.Reset()
|
||||||
|
inputReader = rateLimitedReader
|
||||||
|
default:
|
||||||
|
LOG_FATAL("FILEFABRIC_REQUEST", "Input type is not supported")
|
||||||
|
return nil, nil, 0, fmt.Errorf("Input type is not supported")
|
||||||
|
}
|
||||||
|
|
||||||
|
request, err := http.NewRequest(method, requestURL, inputReader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if requestHeaders != nil {
|
||||||
|
for key, value := range requestHeaders {
|
||||||
|
request.Header.Set(key, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := input.(*RateLimitedReader); ok {
|
||||||
|
request.ContentLength = input.(*RateLimitedReader).Length()
|
||||||
|
}
|
||||||
|
|
||||||
|
response, err = storage.client.Do(request)
|
||||||
|
if err != nil {
|
||||||
|
if !storage.shouldRetry(retries, "[%d] %s %s returned an error: %v", threadIndex, method, requestURL, err) {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.StatusCode < 300 {
|
||||||
|
return response.Body, response.Header, response.ContentLength, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
defer response.Body.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, response.Body)
|
||||||
|
|
||||||
|
var output struct {
|
||||||
|
Status string `xml:"status"`
|
||||||
|
Message string `xml:"statusmessage"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = xml.NewDecoder(response.Body).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
if !storage.shouldRetry(retries, "[%d] %s %s returned an invalid response: %v", threadIndex, method, requestURL, err) {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !storage.shouldRetry(retries, "[%d] %s %s returned status: %s, message: %s", threadIndex, method, requestURL, output.Status, output.Message) {
|
||||||
|
return nil, nil, 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *FileFabricStorage) getAPIURL(function string) string {
|
||||||
|
if storage.accessToken == "" {
|
||||||
|
return "https://" + storage.endpoint + "/api/*/" + function + "/"
|
||||||
|
} else {
|
||||||
|
return "https://" + storage.endpoint + "/api/" + storage.accessToken + "/" + function + "/"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
|
||||||
|
// a size of 0. If 'dir' is 'snapshots', only subdirectories will be returned. If 'dir' is 'snapshots/repository_id', then only
|
||||||
|
// files will be returned. If 'dir' is 'chunks', the implementation can return the list either recusively or non-recusively.
|
||||||
|
func (storage *FileFabricStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
if dir != "" && dir[len(dir)-1] != '/' {
|
||||||
|
dir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
dirID, _, _, err := storage.getFileInfo(threadIndex, dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if dirID == "" {
|
||||||
|
return nil, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
lastID := ""
|
||||||
|
|
||||||
|
for {
|
||||||
|
formData := url.Values { "marker": {lastID}, "limit": {"1000"}, "includefolders": {"n"}, "fi_pid" : {dirID}}
|
||||||
|
if dir == "snapshots/" {
|
||||||
|
formData["includefolders"] = []string{"y"}
|
||||||
|
}
|
||||||
|
if storage.testMode {
|
||||||
|
formData["limit"] = []string{"5"}
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("getListOfFiles"), nil, formData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
var output struct {
|
||||||
|
FileFabricResponse
|
||||||
|
FileList FileFabricFileList `xml:"files"`
|
||||||
|
Truncated int `xml:"truncated"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkFileFabricResponse(output.FileFabricResponse, "list the storage directory '%s'", dir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if dir == "snapshots/" {
|
||||||
|
for _, file := range output.FileList.Files {
|
||||||
|
if file.Type == 1 {
|
||||||
|
files = append(files, file.Path + "/")
|
||||||
|
}
|
||||||
|
lastID = file.ID
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for _, file := range output.FileList.Files {
|
||||||
|
if file.Type == 0 {
|
||||||
|
files = append(files, file.Path)
|
||||||
|
sizes = append(sizes, file.Size)
|
||||||
|
}
|
||||||
|
lastID = file.ID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if output.Truncated != 1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
|
func (storage *FileFabricStorage) getFileInfo(threadIndex int, filePath string) (fileID string, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
|
formData := url.Values { "path" : {storage.storageDir + filePath}}
|
||||||
|
|
||||||
|
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("checkPathExists"), nil, formData)
|
||||||
|
if err != nil {
|
||||||
|
return "", false, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
var output struct {
|
||||||
|
FileFabricResponse
|
||||||
|
File FileFabricFile `xml:"file"`
|
||||||
|
Exists string `xml:"exists"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
return "", false, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkFileFabricResponse(output.FileFabricResponse, "get the info on '%s'", filePath)
|
||||||
|
if err != nil {
|
||||||
|
return "", false, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if output.Exists != "y" {
|
||||||
|
return "", false, 0, nil
|
||||||
|
} else {
|
||||||
|
if output.File.Type == 1 {
|
||||||
|
for filePath != "" && filePath[len(filePath)-1] == '/' {
|
||||||
|
filePath = filePath[:len(filePath)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
storage.directoryCache[filePath] = output.File.ID
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
|
}
|
||||||
|
return output.File.ID, output.File.Type == 1, output.File.Size, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFileInfo returns the information about the file or directory at 'filePath'. This is a function required by the Storage interface.
|
||||||
|
func (storage *FileFabricStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
|
fileID := ""
|
||||||
|
fileID, isDir, size, err = storage.getFileInfo(threadIndex, filePath)
|
||||||
|
return fileID != "", isDir, size, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
|
func (storage *FileFabricStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
|
||||||
|
fileID, _, _, _ := storage.getFileInfo(threadIndex, filePath)
|
||||||
|
if fileID == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
formData := url.Values { "fi_id" : {fileID}}
|
||||||
|
|
||||||
|
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doDeleteFile"), nil, formData)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
var output FileFabricResponse
|
||||||
|
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkFileFabricResponse(output, "delete file '%s'", filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveFile renames the file.
|
||||||
|
func (storage *FileFabricStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
fileID, _, _, _ := storage.getFileInfo(threadIndex, from)
|
||||||
|
if fileID == "" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
formData := url.Values { "fi_id" : {fileID}, "fi_name": {filepath.Base(to)},}
|
||||||
|
|
||||||
|
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doRenameFile"), nil, formData)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
var output FileFabricResponse
|
||||||
|
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkFileFabricResponse(output, "rename file '%s' to '%s'", from, to)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createParentDirectory creates the parent directory if it doesn't exist in the cache.
|
||||||
|
func (storage *FileFabricStorage) createParentDirectory(threadIndex int, dir string) (parentID string, err error) {
|
||||||
|
|
||||||
|
found := strings.LastIndex(dir, "/")
|
||||||
|
if found == -1 {
|
||||||
|
return storage.storageDirID, nil
|
||||||
|
}
|
||||||
|
parent := dir[:found]
|
||||||
|
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
parentID = storage.directoryCache[parent]
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
|
|
||||||
|
if parentID != "" {
|
||||||
|
return parentID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parentID, err = storage.createDirectory(threadIndex, parent)
|
||||||
|
if err != nil {
|
||||||
|
if err == errFileFabricDirectoryExists {
|
||||||
|
var isDir bool
|
||||||
|
parentID, isDir, _, err = storage.getFileInfo(threadIndex, parent)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if isDir == false {
|
||||||
|
return "", fmt.Errorf("'%s' in the storage is a file", parent)
|
||||||
|
}
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
storage.directoryCache[parent] = parentID
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
|
return parentID, nil
|
||||||
|
} else {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return parentID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// createDirectory creates a new directory.
|
||||||
|
func (storage *FileFabricStorage) createDirectory(threadIndex int, dir string) (dirID string, err error) {
|
||||||
|
for dir != "" && dir[len(dir)-1] == '/' {
|
||||||
|
dir = dir[:len(dir)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
parentID, err := storage.createParentDirectory(threadIndex, dir)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
formData := url.Values { "fi_name": {filepath.Base(dir)}, "fi_pid" : {parentID}}
|
||||||
|
|
||||||
|
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doCreateNewFolder"), nil, formData)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
var output struct {
|
||||||
|
FileFabricResponse
|
||||||
|
File FileFabricFile `xml:"file"`
|
||||||
|
}
|
||||||
|
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkFileFabricResponse(output.FileFabricResponse, "create directory '%s'", dir)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
storage.directoryCache[dir] = output.File.ID
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
|
|
||||||
|
return output.File.ID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *FileFabricStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
_, err = storage.createDirectory(threadIndex, dir)
|
||||||
|
if err == errFileFabricDirectoryExists {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
|
func (storage *FileFabricStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
formData := url.Values { "fi_id" : {storage.storageDir + filePath}}
|
||||||
|
|
||||||
|
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("getFile"), nil, formData)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.threads)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
|
func (storage *FileFabricStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
|
parentID, err := storage.createParentDirectory(threadIndex, filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fileName := filepath.Base(filePath)
|
||||||
|
requestBody := &bytes.Buffer{}
|
||||||
|
writer := multipart.NewWriter(requestBody)
|
||||||
|
part, _ := writer.CreateFormFile("file_1", fileName)
|
||||||
|
part.Write(content)
|
||||||
|
|
||||||
|
writer.WriteField("file_name1", fileName)
|
||||||
|
writer.WriteField("fi_pid", parentID)
|
||||||
|
writer.WriteField("fi_structtype", "g")
|
||||||
|
writer.Close()
|
||||||
|
|
||||||
|
headers := make(map[string]string)
|
||||||
|
headers["Content-Type"] = writer.FormDataContentType()
|
||||||
|
|
||||||
|
rateLimitedReader := CreateRateLimitedReader(requestBody.Bytes(), storage.UploadRateLimit/storage.threads)
|
||||||
|
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doUploadFiles"), headers, rateLimitedReader)
|
||||||
|
|
||||||
|
defer readCloser.Close()
|
||||||
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
|
|
||||||
|
var output FileFabricResponse
|
||||||
|
|
||||||
|
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = checkFileFabricResponse(output, "upload file '%s'", filePath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
// managing snapshots.
|
||||||
|
func (storage *FileFabricStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
|
// If the 'MoveFile' method is implemented.
|
||||||
|
func (storage *FileFabricStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
|
// If the storage can guarantee strong consistency.
|
||||||
|
func (storage *FileFabricStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
|
// If the storage supports fast listing of files names.
|
||||||
|
func (storage *FileFabricStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
|
// Enable the test mode.
|
||||||
|
func (storage *FileFabricStorage) EnableTestMode() { storage.testMode = true }
|
||||||
@@ -34,7 +34,7 @@ func CreateFileReader(top string, files []*Entry) *FileReader {
|
|||||||
return reader
|
return reader
|
||||||
}
|
}
|
||||||
|
|
||||||
// NextFile switchs to the next file in the file reader.
|
// NextFile switches to the next file in the file reader.
|
||||||
func (reader *FileReader) NextFile() bool {
|
func (reader *FileReader) NextFile() bool {
|
||||||
|
|
||||||
if reader.CurrentFile != nil {
|
if reader.CurrentFile != nil {
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -165,7 +166,7 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !stat.IsDir() {
|
if !stat.IsDir() {
|
||||||
fmt.Errorf("The path %s is not a directory", dir)
|
return fmt.Errorf("The path %s is not a directory", dir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -190,7 +191,19 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
file.Close()
|
if err = file.Sync(); err != nil {
|
||||||
|
pathErr, ok := err.(*os.PathError)
|
||||||
|
isNotSupported := ok && pathErr.Op == "sync" && pathErr.Err == syscall.ENOTSUP
|
||||||
|
if !isNotSupported {
|
||||||
|
_ = file.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = file.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
err = os.Rename(temporaryFile, fullPath)
|
err = os.Rename(temporaryFile, fullPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -20,13 +20,16 @@ import (
|
|||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
|
"golang.org/x/oauth2/google"
|
||||||
"google.golang.org/api/drive/v3"
|
"google.golang.org/api/drive/v3"
|
||||||
"google.golang.org/api/googleapi"
|
"google.golang.org/api/googleapi"
|
||||||
|
"google.golang.org/api/option"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
GCDFileMimeType = "application/octet-stream"
|
GCDFileMimeType = "application/octet-stream"
|
||||||
GCDDirectoryMimeType = "application/vnd.google-apps.folder"
|
GCDDirectoryMimeType = "application/vnd.google-apps.folder"
|
||||||
|
GCDUserDrive = "root"
|
||||||
)
|
)
|
||||||
|
|
||||||
type GCDStorage struct {
|
type GCDStorage struct {
|
||||||
@@ -37,6 +40,7 @@ type GCDStorage struct {
|
|||||||
idCacheLock sync.Mutex
|
idCacheLock sync.Mutex
|
||||||
backoffs []int // desired backoff time in seconds for each thread
|
backoffs []int // desired backoff time in seconds for each thread
|
||||||
attempts []int // number of failed attempts since last success for each thread
|
attempts []int // number of failed attempts since last success for each thread
|
||||||
|
driveID string // the ID of the shared drive or 'root' (GCDUserDrive) if the user's drive
|
||||||
|
|
||||||
createDirectoryLock sync.Mutex
|
createDirectoryLock sync.Mutex
|
||||||
isConnected bool
|
isConnected bool
|
||||||
@@ -78,6 +82,14 @@ func (storage *GCDStorage) shouldRetry(threadIndex int, err error) (bool, error)
|
|||||||
// User Rate Limit Exceeded
|
// User Rate Limit Exceeded
|
||||||
message = e.Message
|
message = e.Message
|
||||||
retry = true
|
retry = true
|
||||||
|
} else if e.Code == 408 {
|
||||||
|
// Request timeout
|
||||||
|
message = e.Message
|
||||||
|
retry = true
|
||||||
|
} else if e.Code == 400 && strings.Contains(e.Message, "failedPrecondition") {
|
||||||
|
// Daily quota exceeded
|
||||||
|
message = e.Message
|
||||||
|
retry = true
|
||||||
} else if e.Code == 401 {
|
} else if e.Code == 401 {
|
||||||
// Only retry on authorization error when storage has been connected before
|
// Only retry on authorization error when storage has been connected before
|
||||||
if storage.isConnected {
|
if storage.isConnected {
|
||||||
@@ -187,7 +199,11 @@ func (storage *GCDStorage) listFiles(threadIndex int, parentID string, listFiles
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
for {
|
for {
|
||||||
fileList, err = storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount).Do()
|
q := storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount)
|
||||||
|
if storage.driveID != GCDUserDrive {
|
||||||
|
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
|
||||||
|
}
|
||||||
|
fileList, err = q.Do()
|
||||||
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
|
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
|
||||||
break
|
break
|
||||||
} else if retry {
|
} else if retry {
|
||||||
@@ -215,7 +231,11 @@ func (storage *GCDStorage) listByName(threadIndex int, parentID string, name str
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
query := "name = '" + name + "' and '" + parentID + "' in parents and trashed = false "
|
query := "name = '" + name + "' and '" + parentID + "' in parents and trashed = false "
|
||||||
fileList, err = storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)").Do()
|
q := storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)")
|
||||||
|
if storage.driveID != GCDUserDrive {
|
||||||
|
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
|
||||||
|
}
|
||||||
|
fileList, err = q.Do()
|
||||||
|
|
||||||
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
|
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
|
||||||
break
|
break
|
||||||
@@ -244,7 +264,7 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, creat
|
|||||||
return fileID, nil
|
return fileID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
fileID := "root"
|
fileID := storage.driveID
|
||||||
|
|
||||||
if rootID, ok := storage.findPathID(""); ok {
|
if rootID, ok := storage.findPathID(""); ok {
|
||||||
fileID = rootID
|
fileID = rootID
|
||||||
@@ -299,37 +319,85 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, creat
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateGCDStorage creates a GCD storage object.
|
// CreateGCDStorage creates a GCD storage object.
|
||||||
func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storage *GCDStorage, err error) {
|
func CreateGCDStorage(tokenFile string, driveID string, storagePath string, threads int) (storage *GCDStorage, err error) {
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
description, err := ioutil.ReadFile(tokenFile)
|
description, err := ioutil.ReadFile(tokenFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var object map[string]interface{}
|
||||||
|
|
||||||
|
err = json.Unmarshal(description, &object)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
isServiceAccount := false
|
||||||
|
if value, ok := object["type"]; ok {
|
||||||
|
if authType, ok := value.(string); ok && authType == "service_account" {
|
||||||
|
isServiceAccount = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var tokenSource oauth2.TokenSource
|
||||||
|
|
||||||
|
if isServiceAccount {
|
||||||
|
config, err := google.JWTConfigFromJSON(description, drive.DriveScope)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tokenSource = config.TokenSource(ctx)
|
||||||
|
} else {
|
||||||
gcdConfig := &GCDConfig{}
|
gcdConfig := &GCDConfig{}
|
||||||
if err := json.Unmarshal(description, gcdConfig); err != nil {
|
if err := json.Unmarshal(description, gcdConfig); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
oauth2Config := oauth2.Config{
|
config := oauth2.Config{
|
||||||
ClientID: gcdConfig.ClientID,
|
ClientID: gcdConfig.ClientID,
|
||||||
ClientSecret: gcdConfig.ClientSecret,
|
ClientSecret: gcdConfig.ClientSecret,
|
||||||
Endpoint: gcdConfig.Endpoint,
|
Endpoint: gcdConfig.Endpoint,
|
||||||
}
|
}
|
||||||
|
tokenSource = config.TokenSource(ctx, &gcdConfig.Token)
|
||||||
|
}
|
||||||
|
|
||||||
authClient := oauth2Config.Client(context.Background(), &gcdConfig.Token)
|
service, err := drive.NewService(ctx, option.WithTokenSource(tokenSource))
|
||||||
|
|
||||||
service, err := drive.New(authClient)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(driveID) == 0 {
|
||||||
|
driveID = GCDUserDrive
|
||||||
|
} else {
|
||||||
|
driveList, err := drive.NewTeamdrivesService(service).List().Do()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to look up the drive id: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
found := false
|
||||||
|
for _, teamDrive := range driveList.TeamDrives {
|
||||||
|
if teamDrive.Id == driveID || teamDrive.Name == driveID {
|
||||||
|
driveID = teamDrive.Id
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return nil, fmt.Errorf("%s is not the id or name of a shared drive", driveID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
storage = &GCDStorage{
|
storage = &GCDStorage{
|
||||||
service: service,
|
service: service,
|
||||||
numberOfThreads: threads,
|
numberOfThreads: threads,
|
||||||
idCache: make(map[string]string),
|
idCache: make(map[string]string),
|
||||||
backoffs: make([]int, threads),
|
backoffs: make([]int, threads),
|
||||||
attempts: make([]int, threads),
|
attempts: make([]int, threads),
|
||||||
|
driveID: driveID,
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range storage.backoffs {
|
for i := range storage.backoffs {
|
||||||
@@ -337,6 +405,7 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
|||||||
storage.attempts[i] = 0
|
storage.attempts[i] = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
storage.savePathID("", driveID)
|
||||||
storagePathID, err := storage.getIDFromPath(0, storagePath, true)
|
storagePathID, err := storage.getIDFromPath(0, storagePath, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -390,7 +459,7 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
subDirs = append(subDirs, file.Name+"/")
|
subDirs = append(subDirs, file.Name+"/")
|
||||||
}
|
}
|
||||||
return subDirs, nil, nil
|
return subDirs, nil, nil
|
||||||
} else if strings.HasPrefix(dir, "snapshots/") {
|
} else if strings.HasPrefix(dir, "snapshots/") || strings.HasPrefix(dir, "benchmark") {
|
||||||
pathID, err := storage.getIDFromPath(threadIndex, dir, false)
|
pathID, err := storage.getIDFromPath(threadIndex, dir, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@@ -411,20 +480,36 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
}
|
}
|
||||||
return files, nil, nil
|
return files, nil, nil
|
||||||
} else {
|
} else {
|
||||||
files := []string{}
|
lock := sync.Mutex {}
|
||||||
sizes := []int64{}
|
allFiles := []string{}
|
||||||
|
allSizes := []int64{}
|
||||||
|
|
||||||
|
errorChannel := make(chan error)
|
||||||
|
directoryChannel := make(chan string)
|
||||||
|
activeWorkers := 0
|
||||||
|
|
||||||
parents := []string{"chunks", "fossils"}
|
parents := []string{"chunks", "fossils"}
|
||||||
for i := 0; i < len(parents); i++ {
|
for len(parents) > 0 || activeWorkers > 0 {
|
||||||
parent := parents[i]
|
|
||||||
|
if len(parents) > 0 && activeWorkers < storage.numberOfThreads {
|
||||||
|
parent := parents[0]
|
||||||
|
parents = parents[1:]
|
||||||
|
activeWorkers++
|
||||||
|
go func(parent string) {
|
||||||
pathID, ok := storage.findPathID(parent)
|
pathID, ok := storage.findPathID(parent)
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
return
|
||||||
}
|
}
|
||||||
entries, err := storage.listFiles(threadIndex, pathID, true, true)
|
entries, err := storage.listFiles(threadIndex, pathID, true, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
errorChannel <- err
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
LOG_DEBUG("GCD_STORAGE", "Listing %s; %d items returned", parent, len(entries))
|
||||||
|
|
||||||
|
files := []string {}
|
||||||
|
sizes := []int64 {}
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.MimeType != GCDDirectoryMimeType {
|
if entry.MimeType != GCDDirectoryMimeType {
|
||||||
name := entry.Name
|
name := entry.Name
|
||||||
@@ -438,12 +523,33 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
files = append(files, name)
|
files = append(files, name)
|
||||||
sizes = append(sizes, entry.Size)
|
sizes = append(sizes, entry.Size)
|
||||||
} else {
|
} else {
|
||||||
parents = append(parents, parent+ "/" + entry.Name)
|
directoryChannel <- parent+"/"+entry.Name
|
||||||
storage.savePathID(parent+"/"+entry.Name, entry.Id)
|
storage.savePathID(parent+"/"+entry.Name, entry.Id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
lock.Lock()
|
||||||
|
allFiles = append(allFiles, files...)
|
||||||
|
allSizes = append(allSizes, sizes...)
|
||||||
|
lock.Unlock()
|
||||||
|
directoryChannel <- ""
|
||||||
|
} (parent)
|
||||||
}
|
}
|
||||||
return files, sizes, nil
|
|
||||||
|
if activeWorkers > 0 {
|
||||||
|
select {
|
||||||
|
case err := <- errorChannel:
|
||||||
|
return nil, nil, err
|
||||||
|
case directory := <- directoryChannel:
|
||||||
|
if directory == "" {
|
||||||
|
activeWorkers--
|
||||||
|
} else {
|
||||||
|
parents = append(parents, directory)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return allFiles, allSizes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -458,7 +564,7 @@ func (storage *GCDStorage) DeleteFile(threadIndex int, filePath string) (err err
|
|||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
err = storage.service.Files.Delete(fileID).Fields("id").Do()
|
err = storage.service.Files.Delete(fileID).SupportsAllDrives(true).Fields("id").Do()
|
||||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
storage.deletePathID(filePath)
|
storage.deletePathID(filePath)
|
||||||
return nil
|
return nil
|
||||||
@@ -504,7 +610,7 @@ func (storage *GCDStorage) MoveFile(threadIndex int, from string, to string) (er
|
|||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
_, err = storage.service.Files.Update(fileID, nil).AddParents(toParentID).RemoveParents(fromParentID).Do()
|
_, err = storage.service.Files.Update(fileID, nil).SupportsAllDrives(true).AddParents(toParentID).RemoveParents(fromParentID).Do()
|
||||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
break
|
break
|
||||||
} else if retry {
|
} else if retry {
|
||||||
@@ -555,7 +661,7 @@ func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err err
|
|||||||
Parents: []string{parentID},
|
Parents: []string{parentID},
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err = storage.service.Files.Create(file).Fields("id").Do()
|
file, err = storage.service.Files.Create(file).SupportsAllDrives(true).Fields("id").Do()
|
||||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
break
|
break
|
||||||
} else {
|
} else {
|
||||||
@@ -624,13 +730,22 @@ func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk
|
|||||||
var response *http.Response
|
var response *http.Response
|
||||||
|
|
||||||
for {
|
for {
|
||||||
response, err = storage.service.Files.Get(fileID).Download()
|
// AcknowledgeAbuse(true) lets the download proceed even if GCD thinks that it contains malware.
|
||||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
// TODO: Should this prompt the user or log a warning?
|
||||||
|
req := storage.service.Files.Get(fileID).SupportsAllDrives(true)
|
||||||
|
if e, ok := err.(*googleapi.Error); ok {
|
||||||
|
if strings.Contains(err.Error(), "cannotDownloadAbusiveFile") || len(e.Errors) > 0 && e.Errors[0].Reason == "cannotDownloadAbusiveFile" {
|
||||||
|
LOG_WARN("GCD_STORAGE", "%s is marked as abusive, will download anyway.", filePath)
|
||||||
|
req = req.AcknowledgeAbuse(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
response, err = req.Download()
|
||||||
|
if retry, retry_err := storage.shouldRetry(threadIndex, err); retry_err == nil && !retry {
|
||||||
break
|
break
|
||||||
} else if retry {
|
} else if retry {
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
return err
|
return retry_err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -663,7 +778,7 @@ func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
for {
|
for {
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
_, err = storage.service.Files.Create(file).Media(reader).Fields("id").Do()
|
_, err = storage.service.Files.Create(file).SupportsAllDrives(true).Media(reader).Fields("id").Do()
|
||||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
break
|
break
|
||||||
} else if retry {
|
} else if retry {
|
||||||
|
|||||||
@@ -7,10 +7,12 @@ package duplicacy
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"log"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
"regexp"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -43,6 +45,13 @@ func setTestingT(t *testing.T) {
|
|||||||
testingT = t
|
testingT = t
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Contains the ids of logs that won't be displayed
|
||||||
|
var suppressedLogs map[string]bool = map[string]bool{}
|
||||||
|
|
||||||
|
func SuppressLog(id string) {
|
||||||
|
suppressedLogs[id] = true
|
||||||
|
}
|
||||||
|
|
||||||
func getLevelName(level int) string {
|
func getLevelName(level int) string {
|
||||||
switch level {
|
switch level {
|
||||||
case DEBUG:
|
case DEBUG:
|
||||||
@@ -98,6 +107,15 @@ func LOG_ERROR(logID string, format string, v ...interface{}) {
|
|||||||
logf(ERROR, logID, format, v...)
|
logf(ERROR, logID, format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func LOG_WERROR(isWarning bool, logID string, format string, v ...interface{}) {
|
||||||
|
if isWarning {
|
||||||
|
logf(WARN, logID, format, v...)
|
||||||
|
} else {
|
||||||
|
logf(ERROR, logID, format, v...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
func LOG_FATAL(logID string, format string, v ...interface{}) {
|
func LOG_FATAL(logID string, format string, v ...interface{}) {
|
||||||
logf(FATAL, logID, format, v...)
|
logf(FATAL, logID, format, v...)
|
||||||
}
|
}
|
||||||
@@ -143,6 +161,12 @@ func logf(level int, logID string, format string, v ...interface{}) {
|
|||||||
defer logMutex.Unlock()
|
defer logMutex.Unlock()
|
||||||
|
|
||||||
if level >= loggingLevel {
|
if level >= loggingLevel {
|
||||||
|
if level <= ERROR && len(suppressedLogs) > 0 {
|
||||||
|
if _, found := suppressedLogs[logID]; found {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if printLogHeader {
|
if printLogHeader {
|
||||||
fmt.Printf("%s %s %s %s\n",
|
fmt.Printf("%s %s %s %s\n",
|
||||||
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
||||||
@@ -161,6 +185,32 @@ func logf(level int, logID string, format string, v ...interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set up logging for libraries that Duplicacy depends on. They can call 'log.Printf("[ID] message")'
|
||||||
|
// to produce logs in Duplicacy's format
|
||||||
|
type Logger struct {
|
||||||
|
formatRegex *regexp.Regexp
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Write(line []byte) (n int, err error) {
|
||||||
|
n = len(line)
|
||||||
|
for len(line) > 0 && line[len(line) - 1] == '\n' {
|
||||||
|
line = line[:len(line) - 1]
|
||||||
|
}
|
||||||
|
matched := logger.formatRegex.FindStringSubmatch(string(line))
|
||||||
|
if matched != nil {
|
||||||
|
LOG_INFO(matched[1], "%s", matched[2])
|
||||||
|
} else {
|
||||||
|
LOG_INFO("LOG_DEFAULT", "%s", line)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
log.SetFlags(0)
|
||||||
|
log.SetOutput(&Logger{ formatRegex: regexp.MustCompile(`^\[(.+)\]\s*(.+)`) })
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
duplicacyExitCode = 100
|
duplicacyExitCode = 100
|
||||||
otherExitCode = 101
|
otherExitCode = 101
|
||||||
|
|||||||
@@ -13,8 +13,10 @@ import (
|
|||||||
"math/rand"
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
@@ -32,9 +34,6 @@ type OneDriveErrorResponse struct {
|
|||||||
Error OneDriveError `json:"error"`
|
Error OneDriveError `json:"error"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var OneDriveRefreshTokenURL = "https://duplicacy.com/one_refresh"
|
|
||||||
var OneDriveAPIURL = "https://api.onedrive.com/v1.0"
|
|
||||||
|
|
||||||
type OneDriveClient struct {
|
type OneDriveClient struct {
|
||||||
HTTPClient *http.Client
|
HTTPClient *http.Client
|
||||||
|
|
||||||
@@ -44,9 +43,13 @@ type OneDriveClient struct {
|
|||||||
|
|
||||||
IsConnected bool
|
IsConnected bool
|
||||||
TestMode bool
|
TestMode bool
|
||||||
|
|
||||||
|
IsBusiness bool
|
||||||
|
RefreshTokenURL string
|
||||||
|
APIURL string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewOneDriveClient(tokenFile string) (*OneDriveClient, error) {
|
func NewOneDriveClient(tokenFile string, isBusiness bool) (*OneDriveClient, error) {
|
||||||
|
|
||||||
description, err := ioutil.ReadFile(tokenFile)
|
description, err := ioutil.ReadFile(tokenFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -63,6 +66,15 @@ func NewOneDriveClient(tokenFile string) (*OneDriveClient, error) {
|
|||||||
TokenFile: tokenFile,
|
TokenFile: tokenFile,
|
||||||
Token: token,
|
Token: token,
|
||||||
TokenLock: &sync.Mutex{},
|
TokenLock: &sync.Mutex{},
|
||||||
|
IsBusiness: isBusiness,
|
||||||
|
}
|
||||||
|
|
||||||
|
if isBusiness {
|
||||||
|
client.RefreshTokenURL = "https://duplicacy.com/odb_refresh"
|
||||||
|
client.APIURL = "https://graph.microsoft.com/v1.0/me"
|
||||||
|
} else {
|
||||||
|
client.RefreshTokenURL = "https://duplicacy.com/one_refresh"
|
||||||
|
client.APIURL = "https://api.onedrive.com/v1.0"
|
||||||
}
|
}
|
||||||
|
|
||||||
client.RefreshToken(false)
|
client.RefreshToken(false)
|
||||||
@@ -75,7 +87,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
var response *http.Response
|
var response *http.Response
|
||||||
|
|
||||||
backoff := 1
|
backoff := 1
|
||||||
for i := 0; i < 8; i++ {
|
for i := 0; i < 12; i++ {
|
||||||
|
|
||||||
LOG_DEBUG("ONEDRIVE_CALL", "%s %s", method, url)
|
LOG_DEBUG("ONEDRIVE_CALL", "%s %s", method, url)
|
||||||
|
|
||||||
@@ -106,9 +118,10 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
|
|
||||||
if reader, ok := inputReader.(*RateLimitedReader); ok {
|
if reader, ok := inputReader.(*RateLimitedReader); ok {
|
||||||
request.ContentLength = reader.Length()
|
request.ContentLength = reader.Length()
|
||||||
|
request.Header.Set("Content-Range", fmt.Sprintf("bytes 0-%d/%d", reader.Length() - 1, reader.Length()))
|
||||||
}
|
}
|
||||||
|
|
||||||
if url != OneDriveRefreshTokenURL {
|
if url != client.RefreshTokenURL {
|
||||||
client.TokenLock.Lock()
|
client.TokenLock.Lock()
|
||||||
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
|
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
|
||||||
client.TokenLock.Unlock()
|
client.TokenLock.Unlock()
|
||||||
@@ -117,6 +130,8 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
request.Header.Set("Content-Type", contentType)
|
request.Header.Set("Content-Type", contentType)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
request.Header.Set("User-Agent", "ISV|Acrosync|Duplicacy/2.0")
|
||||||
|
|
||||||
response, err = client.HTTPClient.Do(request)
|
response, err = client.HTTPClient.Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if client.IsConnected {
|
if client.IsConnected {
|
||||||
@@ -133,6 +148,9 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
time.Sleep(retryAfter * time.Millisecond)
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
}
|
}
|
||||||
backoff *= 2
|
backoff *= 2
|
||||||
|
if backoff > 256 {
|
||||||
|
backoff = 256
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
@@ -152,7 +170,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
|
|
||||||
if response.StatusCode == 401 {
|
if response.StatusCode == 401 {
|
||||||
|
|
||||||
if url == OneDriveRefreshTokenURL {
|
if url == client.RefreshTokenURL {
|
||||||
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
|
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -161,11 +179,23 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
|
} else if response.StatusCode == 409 {
|
||||||
|
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Conflict"}
|
||||||
} else if response.StatusCode > 401 && response.StatusCode != 404 {
|
} else if response.StatusCode > 401 && response.StatusCode != 404 {
|
||||||
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
delay := int((rand.Float32() * 0.5 + 0.5) * 1000.0 * float32(backoff))
|
||||||
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
|
if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 {
|
||||||
time.Sleep(retryAfter * time.Millisecond)
|
retryAfter, _ := strconv.Atoi(backoffList[0])
|
||||||
|
if retryAfter * 1000 > delay {
|
||||||
|
delay = retryAfter * 1000
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, delay)
|
||||||
|
time.Sleep(time.Duration(delay) * time.Millisecond)
|
||||||
backoff *= 2
|
backoff *= 2
|
||||||
|
if backoff > 256 {
|
||||||
|
backoff = 256
|
||||||
|
}
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
|
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
|
||||||
@@ -188,7 +218,7 @@ func (client *OneDriveClient) RefreshToken(force bool) (err error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser, _, err := client.call(OneDriveRefreshTokenURL, "POST", client.Token, "")
|
readCloser, _, err := client.call(client.RefreshTokenURL, "POST", client.Token, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to refresh the access token: %v", err)
|
return fmt.Errorf("failed to refresh the access token: %v", err)
|
||||||
}
|
}
|
||||||
@@ -228,9 +258,9 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
|
|||||||
|
|
||||||
entries := []OneDriveEntry{}
|
entries := []OneDriveEntry{}
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/drive/root:/" + path + ":/children"
|
url := client.APIURL + "/drive/root:/" + path + ":/children"
|
||||||
if path == "" {
|
if path == "" {
|
||||||
url = OneDriveAPIURL + "/drive/root/children"
|
url = client.APIURL + "/drive/root/children"
|
||||||
}
|
}
|
||||||
if client.TestMode {
|
if client.TestMode {
|
||||||
url += "?top=8"
|
url += "?top=8"
|
||||||
@@ -266,7 +296,7 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
|
|||||||
|
|
||||||
func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) {
|
func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) {
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/drive/root:/" + path
|
url := client.APIURL + "/drive/root:/" + path
|
||||||
url += "?select=id,name,size,folder"
|
url += "?select=id,name,size,folder"
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "GET", 0, "")
|
readCloser, _, err := client.call(url, "GET", 0, "")
|
||||||
@@ -291,17 +321,19 @@ func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, err
|
|||||||
|
|
||||||
func (client *OneDriveClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
|
func (client *OneDriveClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/drive/items/root:/" + path + ":/content"
|
url := client.APIURL + "/drive/items/root:/" + path + ":/content"
|
||||||
|
|
||||||
return client.call(url, "GET", 0, "")
|
return client.call(url, "GET", 0, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
|
func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/drive/root:/" + path + ":/content"
|
// Upload file using the simple method; this is only possible for OneDrive Personal or if the file
|
||||||
|
// is smaller than 4MB for OneDrive Business
|
||||||
|
if !client.IsBusiness || (client.TestMode && rand.Int() % 2 == 0) {
|
||||||
|
url := client.APIURL + "/drive/root:/" + path + ":/content"
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
|
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -310,9 +342,74 @@ func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// For large files, create an upload session first
|
||||||
|
uploadURL, err := client.CreateUploadSession(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return client.UploadFileSession(uploadURL, content, rateLimit)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *OneDriveClient) CreateUploadSession(path string) (uploadURL string, err error) {
|
||||||
|
|
||||||
|
type CreateUploadSessionItem struct {
|
||||||
|
ConflictBehavior string `json:"@microsoft.graph.conflictBehavior"`
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
input := map[string]interface{} {
|
||||||
|
"item": CreateUploadSessionItem {
|
||||||
|
ConflictBehavior: "replace",
|
||||||
|
Name: filepath.Base(path),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(client.APIURL + "/drive/root:/" + path + ":/createUploadSession", "POST", input, "application/json")
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
type CreateUploadSessionOutput struct {
|
||||||
|
UploadURL string `json:"uploadUrl"`
|
||||||
|
}
|
||||||
|
|
||||||
|
output := &CreateUploadSessionOutput{}
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return output.UploadURL, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (client *OneDriveClient) UploadFileSession(uploadURL string, content []byte, rateLimit int) (err error) {
|
||||||
|
|
||||||
|
readCloser, _, err := client.call(uploadURL, "PUT", CreateRateLimitedReader(content, rateLimit), "")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
type UploadFileSessionOutput struct {
|
||||||
|
Size int `json:"size"`
|
||||||
|
}
|
||||||
|
output := &UploadFileSessionOutput{}
|
||||||
|
|
||||||
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
return fmt.Errorf("Failed to complete the file upload session: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if output.Size != len(content) {
|
||||||
|
return fmt.Errorf("Uploaded %d bytes out of %d bytes", output.Size, len(content))
|
||||||
|
}
|
||||||
|
|
||||||
|
readCloser.Close()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (client *OneDriveClient) DeleteFile(path string) error {
|
func (client *OneDriveClient) DeleteFile(path string) error {
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/drive/root:/" + path
|
url := client.APIURL + "/drive/root:/" + path
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "DELETE", 0, "")
|
readCloser, _, err := client.call(url, "DELETE", 0, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -325,7 +422,7 @@ func (client *OneDriveClient) DeleteFile(path string) error {
|
|||||||
|
|
||||||
func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/drive/root:/" + path
|
url := client.APIURL + "/drive/root:/" + path
|
||||||
|
|
||||||
parentReference := make(map[string]string)
|
parentReference := make(map[string]string)
|
||||||
parentReference["path"] = "/drive/root:/" + parent
|
parentReference["path"] = "/drive/root:/" + parent
|
||||||
@@ -335,6 +432,20 @@ func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
|||||||
|
|
||||||
readCloser, _, err := client.call(url, "PATCH", parameters, "application/json")
|
readCloser, _, err := client.call(url, "PATCH", parameters, "application/json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if e, ok := err.(OneDriveError); ok && e.Status == 400 {
|
||||||
|
// The destination directory doesn't exist; trying to create it...
|
||||||
|
dir := filepath.Dir(parent)
|
||||||
|
if dir == "." {
|
||||||
|
dir = ""
|
||||||
|
}
|
||||||
|
client.CreateDirectory(dir, filepath.Base(parent))
|
||||||
|
readCloser, _, err = client.call(url, "PATCH", parameters, "application/json")
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -344,24 +455,29 @@ func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
|||||||
|
|
||||||
func (client *OneDriveClient) CreateDirectory(path string, name string) error {
|
func (client *OneDriveClient) CreateDirectory(path string, name string) error {
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/root/children"
|
url := client.APIURL + "/root/children"
|
||||||
|
|
||||||
if path != "" {
|
if path != "" {
|
||||||
|
|
||||||
parentID, isDir, _, err := client.GetFileInfo(path)
|
pathID, isDir, _, err := client.GetFileInfo(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if parentID == "" {
|
if pathID == "" {
|
||||||
return fmt.Errorf("The path '%s' does not exist", path)
|
dir := filepath.Dir(path)
|
||||||
|
if dir != "." {
|
||||||
|
// The parent directory doesn't exist; trying to create it...
|
||||||
|
client.CreateDirectory(dir, filepath.Base(path))
|
||||||
|
isDir = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isDir {
|
if !isDir {
|
||||||
return fmt.Errorf("The path '%s' is not a directory", path)
|
return fmt.Errorf("The path '%s' is not a directory", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
url = OneDriveAPIURL + "/drive/items/" + parentID + "/children"
|
url = client.APIURL + "/drive/root:/" + path + ":/children"
|
||||||
}
|
}
|
||||||
|
|
||||||
parameters := make(map[string]interface{})
|
parameters := make(map[string]interface{})
|
||||||
@@ -370,6 +486,11 @@ func (client *OneDriveClient) CreateDirectory(path string, name string) error {
|
|||||||
|
|
||||||
readCloser, _, err := client.call(url, "POST", parameters, "application/json")
|
readCloser, _, err := client.call(url, "POST", parameters, "application/json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
|
||||||
|
// This error usually means the directory already exists
|
||||||
|
LOG_TRACE("ONEDRIVE_MKDIR", "The directory '%s/%s' already exists", path, name)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ import (
|
|||||||
|
|
||||||
func TestOneDriveClient(t *testing.T) {
|
func TestOneDriveClient(t *testing.T) {
|
||||||
|
|
||||||
oneDriveClient, err := NewOneDriveClient("one-token.json")
|
oneDriveClient, err := NewOneDriveClient("one-token.json", false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create the OneDrive client: %v", err)
|
t.Errorf("Failed to create the OneDrive client: %v", err)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -19,13 +19,13 @@ type OneDriveStorage struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateOneDriveStorage creates an OneDrive storage object.
|
// CreateOneDriveStorage creates an OneDrive storage object.
|
||||||
func CreateOneDriveStorage(tokenFile string, storagePath string, threads int) (storage *OneDriveStorage, err error) {
|
func CreateOneDriveStorage(tokenFile string, isBusiness bool, storagePath string, threads int) (storage *OneDriveStorage, err error) {
|
||||||
|
|
||||||
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
|
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
|
||||||
storagePath = storagePath[:len(storagePath)-1]
|
storagePath = storagePath[:len(storagePath)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := NewOneDriveClient(tokenFile)
|
client, err := NewOneDriveClient(tokenFile, isBusiness)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -80,6 +80,7 @@ func (storage *OneDriveStorage) convertFilePath(filePath string) string {
|
|||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
|
|
||||||
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir)-1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
@@ -97,7 +98,7 @@ func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return subDirs, nil, nil
|
return subDirs, nil, nil
|
||||||
} else if strings.HasPrefix(dir, "snapshots/") {
|
} else if strings.HasPrefix(dir, "snapshots/") || strings.HasPrefix(dir, "benchmark") {
|
||||||
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
|
|||||||
@@ -24,8 +24,9 @@ type Preference struct {
|
|||||||
RestoreProhibited bool `json:"no_restore"`
|
RestoreProhibited bool `json:"no_restore"`
|
||||||
DoNotSavePassword bool `json:"no_save_password"`
|
DoNotSavePassword bool `json:"no_save_password"`
|
||||||
NobackupFile string `json:"nobackup_file"`
|
NobackupFile string `json:"nobackup_file"`
|
||||||
ExcludeByAttribute bool `json:"exclude_by_attribute"`
|
|
||||||
Keys map[string]string `json:"keys"`
|
Keys map[string]string `json:"keys"`
|
||||||
|
FiltersFile string `json:"filters"`
|
||||||
|
ExcludeByAttribute bool `json:"exclude_by_attribute"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var preferencePath string
|
var preferencePath string
|
||||||
|
|||||||
@@ -210,7 +210,7 @@ func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *
|
|||||||
|
|
||||||
defer output.Body.Close()
|
defer output.Body.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.bucket))
|
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||||
return err
|
return err
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -225,7 +225,7 @@ func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content [
|
|||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
Key: aws.String(storage.storageDir + filePath),
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
ACL: aws.String(s3.ObjectCannedACLPrivate),
|
ACL: aws.String(s3.ObjectCannedACLPrivate),
|
||||||
Body: CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.bucket)),
|
Body: CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads),
|
||||||
ContentType: aws.String("application/duplicacy"),
|
ContentType: aws.String("application/duplicacy"),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -237,8 +237,6 @@ func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content [
|
|||||||
LOG_INFO("S3_RETRY", "Retrying on %s: %v", reflect.TypeOf(err), err)
|
LOG_INFO("S3_RETRY", "Retrying on %s: %v", reflect.TypeOf(err), err)
|
||||||
attempts += 1
|
attempts += 1
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/sftp"
|
"github.com/pkg/sftp"
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
@@ -23,9 +24,13 @@ type SFTPStorage struct {
|
|||||||
StorageBase
|
StorageBase
|
||||||
|
|
||||||
client *sftp.Client
|
client *sftp.Client
|
||||||
|
clientLock sync.Mutex
|
||||||
minimumNesting int // The minimum level of directories to dive into before searching for the chunk file.
|
minimumNesting int // The minimum level of directories to dive into before searching for the chunk file.
|
||||||
storageDir string
|
storageDir string
|
||||||
numberOfThreads int
|
numberOfThreads int
|
||||||
|
numberOfTries int
|
||||||
|
serverAddress string
|
||||||
|
sftpConfig *ssh.ClientConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateSFTPStorageWithPassword(server string, port int, username string, storageDir string,
|
func CreateSFTPStorageWithPassword(server string, port int, username string, storageDir string,
|
||||||
@@ -38,10 +43,10 @@ func CreateSFTPStorageWithPassword(server string, port int, username string, sto
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return CreateSFTPStorage(server, port, username, storageDir, minimumNesting, authMethods, hostKeyCallback, threads)
|
return CreateSFTPStorage(false, server, port, username, storageDir, minimumNesting, authMethods, hostKeyCallback, threads)
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateSFTPStorage(server string, port int, username string, storageDir string, minimumNesting int,
|
func CreateSFTPStorage(compatibilityMode bool, server string, port int, username string, storageDir string, minimumNesting int,
|
||||||
authMethods []ssh.AuthMethod,
|
authMethods []ssh.AuthMethod,
|
||||||
hostKeyCallback func(hostname string, remote net.Addr,
|
hostKeyCallback func(hostname string, remote net.Addr,
|
||||||
key ssh.PublicKey) error, threads int) (storage *SFTPStorage, err error) {
|
key ssh.PublicKey) error, threads int) (storage *SFTPStorage, err error) {
|
||||||
@@ -52,8 +57,21 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
|
|||||||
HostKeyCallback: hostKeyCallback,
|
HostKeyCallback: hostKeyCallback,
|
||||||
}
|
}
|
||||||
|
|
||||||
if server == "sftp.hidrive.strato.com" {
|
if compatibilityMode {
|
||||||
sftpConfig.Ciphers = []string{"aes128-ctr", "aes256-ctr"}
|
sftpConfig.Ciphers = []string{
|
||||||
|
"aes128-ctr", "aes192-ctr", "aes256-ctr",
|
||||||
|
"aes128-gcm@openssh.com",
|
||||||
|
"chacha20-poly1305@openssh.com",
|
||||||
|
"arcfour256", "arcfour128", "arcfour",
|
||||||
|
"aes128-cbc",
|
||||||
|
"3des-cbc",
|
||||||
|
}
|
||||||
|
sftpConfig.KeyExchanges = [] string {
|
||||||
|
"curve25519-sha256@libssh.org",
|
||||||
|
"ecdh-sha2-nistp256", "ecdh-sha2-nistp384", "ecdh-sha2-nistp521",
|
||||||
|
"diffie-hellman-group1-sha1", "diffie-hellman-group14-sha1",
|
||||||
|
"diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256",
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
serverAddress := fmt.Sprintf("%s:%d", server, port)
|
serverAddress := fmt.Sprintf("%s:%d", server, port)
|
||||||
@@ -86,6 +104,9 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
|
|||||||
storageDir: storageDir,
|
storageDir: storageDir,
|
||||||
minimumNesting: minimumNesting,
|
minimumNesting: minimumNesting,
|
||||||
numberOfThreads: threads,
|
numberOfThreads: threads,
|
||||||
|
numberOfTries: 8,
|
||||||
|
serverAddress: serverAddress,
|
||||||
|
sftpConfig: sftpConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Random number fo generating the temporary chunk file suffix.
|
// Random number fo generating the temporary chunk file suffix.
|
||||||
@@ -99,13 +120,57 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
func CloseSFTPStorage(storage *SFTPStorage) {
|
func CloseSFTPStorage(storage *SFTPStorage) {
|
||||||
|
if storage.client != nil {
|
||||||
storage.client.Close()
|
storage.client.Close()
|
||||||
|
storage.client = nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (storage *SFTPStorage) getSFTPClient() *sftp.Client {
|
||||||
|
storage.clientLock.Lock()
|
||||||
|
defer storage.clientLock.Unlock()
|
||||||
|
return storage.client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (storage *SFTPStorage) retry(f func () error) error {
|
||||||
|
delay := time.Second
|
||||||
|
for i := 0;; i++ {
|
||||||
|
err := f()
|
||||||
|
if err != nil && strings.Contains(err.Error(), "EOF") && i < storage.numberOfTries {
|
||||||
|
LOG_WARN("SFTP_RETRY", "Encountered an error (%v); retry after %d second(s)", err, delay/time.Second)
|
||||||
|
time.Sleep(delay)
|
||||||
|
delay *= 2
|
||||||
|
|
||||||
|
storage.clientLock.Lock()
|
||||||
|
connection, err := ssh.Dial("tcp", storage.serverAddress, storage.sftpConfig)
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("SFT_RECONNECT", "Failed to connect to %s: %v; retrying", storage.serverAddress, err)
|
||||||
|
storage.clientLock.Unlock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := sftp.NewClient(connection)
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("SFT_RECONNECT", "Failed to create a new SFTP client to %s: %v; retrying", storage.serverAddress, err)
|
||||||
|
connection.Close()
|
||||||
|
storage.clientLock.Unlock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
storage.client = client
|
||||||
|
storage.clientLock.Unlock()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
// ListFiles return the list of files and subdirectories under 'file' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'file' (non-recursively)
|
||||||
func (storage *SFTPStorage) ListFiles(threadIndex int, dirPath string) (files []string, sizes []int64, err error) {
|
func (storage *SFTPStorage) ListFiles(threadIndex int, dirPath string) (files []string, sizes []int64, err error) {
|
||||||
|
|
||||||
entries, err := storage.client.ReadDir(path.Join(storage.storageDir, dirPath))
|
var entries []os.FileInfo
|
||||||
|
err = storage.retry(func() error {
|
||||||
|
entries, err = storage.getSFTPClient().ReadDir(path.Join(storage.storageDir, dirPath))
|
||||||
|
return err
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -126,7 +191,11 @@ func (storage *SFTPStorage) ListFiles(threadIndex int, dirPath string) (files []
|
|||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *SFTPStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *SFTPStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
fullPath := path.Join(storage.storageDir, filePath)
|
fullPath := path.Join(storage.storageDir, filePath)
|
||||||
fileInfo, err := storage.client.Stat(fullPath)
|
var fileInfo os.FileInfo
|
||||||
|
err = storage.retry(func() error {
|
||||||
|
fileInfo, err = storage.getSFTPClient().Stat(fullPath)
|
||||||
|
return err
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
LOG_TRACE("SFTP_STORAGE", "File %s has disappeared before deletion", filePath)
|
LOG_TRACE("SFTP_STORAGE", "File %s has disappeared before deletion", filePath)
|
||||||
@@ -137,33 +206,47 @@ func (storage *SFTPStorage) DeleteFile(threadIndex int, filePath string) (err er
|
|||||||
if fileInfo == nil {
|
if fileInfo == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return storage.client.Remove(path.Join(storage.storageDir, filePath))
|
return storage.retry(func() error { return storage.getSFTPClient().Remove(path.Join(storage.storageDir, filePath)) })
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *SFTPStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *SFTPStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
toPath := path.Join(storage.storageDir, to)
|
toPath := path.Join(storage.storageDir, to)
|
||||||
fileInfo, err := storage.client.Stat(toPath)
|
var fileInfo os.FileInfo
|
||||||
|
err = storage.retry(func() error {
|
||||||
|
fileInfo, err = storage.getSFTPClient().Stat(toPath)
|
||||||
|
return err
|
||||||
|
})
|
||||||
if fileInfo != nil {
|
if fileInfo != nil {
|
||||||
return fmt.Errorf("The destination file %s already exists", toPath)
|
return fmt.Errorf("The destination file %s already exists", toPath)
|
||||||
}
|
}
|
||||||
return storage.client.Rename(path.Join(storage.storageDir, from),
|
err = storage.retry(func() error { return storage.getSFTPClient().Rename(path.Join(storage.storageDir, from),
|
||||||
path.Join(storage.storageDir, to))
|
path.Join(storage.storageDir, to)) })
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *SFTPStorage) CreateDirectory(threadIndex int, dirPath string) (err error) {
|
func (storage *SFTPStorage) CreateDirectory(threadIndex int, dirPath string) (err error) {
|
||||||
fullPath := path.Join(storage.storageDir, dirPath)
|
fullPath := path.Join(storage.storageDir, dirPath)
|
||||||
fileInfo, err := storage.client.Stat(fullPath)
|
var fileInfo os.FileInfo
|
||||||
|
err = storage.retry(func() error {
|
||||||
|
fileInfo, err = storage.getSFTPClient().Stat(fullPath)
|
||||||
|
return err
|
||||||
|
})
|
||||||
if fileInfo != nil && fileInfo.IsDir() {
|
if fileInfo != nil && fileInfo.IsDir() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return storage.client.Mkdir(path.Join(storage.storageDir, dirPath))
|
return storage.retry(func() error { return storage.getSFTPClient().Mkdir(path.Join(storage.storageDir, dirPath)) })
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *SFTPStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *SFTPStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
fileInfo, err := storage.client.Stat(path.Join(storage.storageDir, filePath))
|
var fileInfo os.FileInfo
|
||||||
|
err = storage.retry(func() error {
|
||||||
|
fileInfo, err = storage.getSFTPClient().Stat(path.Join(storage.storageDir, filePath))
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
@@ -181,7 +264,8 @@ func (storage *SFTPStorage) GetFileInfo(threadIndex int, filePath string) (exist
|
|||||||
|
|
||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *SFTPStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *SFTPStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
file, err := storage.client.Open(path.Join(storage.storageDir, filePath))
|
return storage.retry(func() error {
|
||||||
|
file, err := storage.getSFTPClient().Open(path.Join(storage.storageDir, filePath))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -191,8 +275,8 @@ func (storage *SFTPStorage) DownloadFile(threadIndex int, filePath string, chunk
|
|||||||
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
|
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
@@ -201,25 +285,16 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
fullPath := path.Join(storage.storageDir, filePath)
|
fullPath := path.Join(storage.storageDir, filePath)
|
||||||
|
|
||||||
dirs := strings.Split(filePath, "/")
|
dirs := strings.Split(filePath, "/")
|
||||||
if len(dirs) > 1 {
|
|
||||||
fullDir := path.Dir(fullPath)
|
fullDir := path.Dir(fullPath)
|
||||||
_, err := storage.client.Stat(fullDir)
|
return storage.retry(func() error {
|
||||||
if err != nil {
|
|
||||||
// The error may be caused by a non-existent fullDir, or a broken connection. In either case,
|
|
||||||
// we just assume it is the former because there isn't a way to tell which is the case.
|
|
||||||
for i, _ := range dirs[1 : len(dirs)-1] {
|
|
||||||
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
|
|
||||||
// We don't check the error; just keep going blindly but always store the last err
|
|
||||||
err = storage.client.Mkdir(subDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there is an error creating the dirs, we check fullDir one more time, because another thread
|
if len(dirs) > 1 {
|
||||||
// may happen to create the same fullDir ahead of this thread
|
_, err := storage.getSFTPClient().Stat(fullDir)
|
||||||
if err != nil {
|
if os.IsNotExist(err) {
|
||||||
_, err := storage.client.Stat(fullDir)
|
for i := range dirs[1 : len(dirs)-1] {
|
||||||
if err != nil {
|
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
|
||||||
return err
|
// We don't check the error; just keep going blindly
|
||||||
}
|
storage.getSFTPClient().Mkdir(subDir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -232,7 +307,7 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
|
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
|
||||||
|
|
||||||
file, err := storage.client.OpenFile(temporaryFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
|
file, err := storage.getSFTPClient().OpenFile(temporaryFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -243,13 +318,16 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
file.Close()
|
file.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
file.Close()
|
|
||||||
|
|
||||||
err = storage.client.Rename(temporaryFile, fullPath)
|
err = file.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if _, err = storage.client.Stat(fullPath); err == nil {
|
err = storage.getSFTPClient().Rename(temporaryFile, fullPath)
|
||||||
storage.client.Remove(temporaryFile)
|
if err != nil {
|
||||||
|
if _, err = storage.getSFTPClient().Stat(fullPath); err == nil {
|
||||||
|
storage.getSFTPClient().Remove(temporaryFile)
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Uploaded file but failed to store it at %s: %v", fullPath, err)
|
return fmt.Errorf("Uploaded file but failed to store it at %s: %v", fullPath, err)
|
||||||
@@ -257,6 +335,7 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
|||||||
@@ -13,6 +13,7 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
@@ -123,11 +124,11 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
|
|||||||
}
|
}
|
||||||
deviceIdRepository, err := GetPathDeviceId(top)
|
deviceIdRepository, err := GetPathDeviceId(top)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: ", top)
|
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: %s", top)
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
if deviceIdLocal != deviceIdRepository {
|
if deviceIdLocal != deviceIdRepository {
|
||||||
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: ", top)
|
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: %s", top)
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -145,22 +146,37 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
|
|||||||
// Use tmutil to create snapshot
|
// Use tmutil to create snapshot
|
||||||
tmutilOutput, err := CommandWithTimeout(timeoutInSeconds, "tmutil", "snapshot")
|
tmutilOutput, err := CommandWithTimeout(timeoutInSeconds, "tmutil", "snapshot")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("VSS_CREATE", "Error while calling tmutil: ", err)
|
LOG_ERROR("VSS_CREATE", "Error while calling tmutil: %v", err)
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
colonPos := strings.IndexByte(tmutilOutput, ':')
|
colonPos := strings.IndexByte(tmutilOutput, ':')
|
||||||
if colonPos < 0 {
|
if colonPos < 0 {
|
||||||
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: ", tmutilOutput)
|
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: %s", tmutilOutput)
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
snapshotDate = strings.TrimSpace(tmutilOutput[colonPos+1:])
|
snapshotDate = strings.TrimSpace(tmutilOutput[colonPos+1:])
|
||||||
|
|
||||||
|
tmutilOutput, err = CommandWithTimeout(timeoutInSeconds, "tmutil", "listlocalsnapshots", ".")
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("VSS_CREATE", "Error while calling 'tmutil listlocalsnapshots': %v", err)
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
snapshotName := "com.apple.TimeMachine." + snapshotDate
|
||||||
|
|
||||||
|
r := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`)
|
||||||
|
snapshotNames := r.FindStringSubmatch(tmutilOutput)
|
||||||
|
if len(snapshotNames) > 0 {
|
||||||
|
snapshotName = snapshotNames[0]
|
||||||
|
} else {
|
||||||
|
LOG_WARN("VSS_CREATE", "Error while using 'tmutil listlocalsnapshots' to find snapshot name. Will fallback to 'com.apple.TimeMachine.SNAPSHOT_DATE'")
|
||||||
|
}
|
||||||
|
|
||||||
// Mount snapshot as readonly and hide from GUI i.e. Finder
|
// Mount snapshot as readonly and hide from GUI i.e. Finder
|
||||||
_, err = CommandWithTimeout(timeoutInSeconds,
|
_, err = CommandWithTimeout(timeoutInSeconds,
|
||||||
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s=com.apple.TimeMachine." + snapshotDate, "/", snapshotPath)
|
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/", snapshotPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: ", err)
|
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: %v", err)
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,6 +11,7 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -57,7 +58,7 @@ func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
|
|||||||
|
|
||||||
// CreateSnapshotFromDirectory creates a snapshot from the local directory 'top'. Only 'Files'
|
// CreateSnapshotFromDirectory creates a snapshot from the local directory 'top'. Only 'Files'
|
||||||
// will be constructed, while 'ChunkHashes' and 'ChunkLengths' can only be populated after uploading.
|
// will be constructed, while 'ChunkHashes' and 'ChunkLengths' can only be populated after uploading.
|
||||||
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, excludeByAttribute bool) (snapshot *Snapshot, skippedDirectories []string,
|
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, filtersFile string, , excludeByAttribute bool) (snapshot *Snapshot, skippedDirectories []string,
|
||||||
skippedFiles []string, err error) {
|
skippedFiles []string, err error) {
|
||||||
|
|
||||||
snapshot = &Snapshot{
|
snapshot = &Snapshot{
|
||||||
@@ -68,14 +69,127 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, exc
|
|||||||
|
|
||||||
var patterns []string
|
var patterns []string
|
||||||
|
|
||||||
patternFile, err := ioutil.ReadFile(path.Join(GetDuplicacyPreferencePath(), "filters"))
|
if filtersFile == "" {
|
||||||
|
filtersFile = joinPath(GetDuplicacyPreferencePath(), "filters")
|
||||||
|
}
|
||||||
|
patterns = ProcessFilters(filtersFile)
|
||||||
|
|
||||||
|
directories := make([]*Entry, 0, 256)
|
||||||
|
directories = append(directories, CreateEntry("", 0, 0, 0))
|
||||||
|
|
||||||
|
snapshot.Files = make([]*Entry, 0, 256)
|
||||||
|
|
||||||
|
attributeThreshold := 1024 * 1024
|
||||||
|
if attributeThresholdValue, found := os.LookupEnv("DUPLICACY_ATTRIBUTE_THRESHOLD"); found && attributeThresholdValue != "" {
|
||||||
|
attributeThreshold, _ = strconv.Atoi(attributeThresholdValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
for len(directories) > 0 {
|
||||||
|
|
||||||
|
directory := directories[len(directories)-1]
|
||||||
|
directories = directories[:len(directories)-1]
|
||||||
|
snapshot.Files = append(snapshot.Files, directory)
|
||||||
|
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, nobackupFile, snapshot.discardAttributes, excludeByAttribute)
|
||||||
|
if err != nil {
|
||||||
|
if directory.Path == "" {
|
||||||
|
LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err)
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory: %v", err)
|
||||||
|
skippedDirectories = append(skippedDirectories, directory.Path)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
directories = append(directories, subdirectories...)
|
||||||
|
skippedFiles = append(skippedFiles, skipped...)
|
||||||
|
|
||||||
|
if !snapshot.discardAttributes && len(snapshot.Files) > attributeThreshold {
|
||||||
|
LOG_INFO("LIST_ATTRIBUTES", "Discarding file attributes")
|
||||||
|
snapshot.discardAttributes = true
|
||||||
|
for _, file := range snapshot.Files {
|
||||||
|
file.Attributes = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the root entry
|
||||||
|
snapshot.Files = snapshot.Files[1:]
|
||||||
|
|
||||||
|
return snapshot, skippedDirectories, skippedFiles, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func AppendPattern(patterns []string, new_pattern string) (new_patterns []string) {
|
||||||
|
for _, pattern := range patterns {
|
||||||
|
if pattern == new_pattern {
|
||||||
|
LOG_INFO("SNAPSHOT_FILTER", "Ignoring duplicate pattern: %s ...", new_pattern)
|
||||||
|
return patterns
|
||||||
|
}
|
||||||
|
}
|
||||||
|
new_patterns = append(patterns, new_pattern)
|
||||||
|
return new_patterns
|
||||||
|
}
|
||||||
|
func ProcessFilters(filtersFile string) (patterns []string) {
|
||||||
|
patterns = ProcessFilterFile(filtersFile, make([]string, 0))
|
||||||
|
|
||||||
|
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))
|
||||||
|
|
||||||
|
LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
|
||||||
|
|
||||||
|
if IsTracing() {
|
||||||
|
for _, pattern := range patterns {
|
||||||
|
LOG_TRACE("SNAPSHOT_PATTERN", "Pattern: %s", pattern)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return patterns
|
||||||
|
}
|
||||||
|
|
||||||
|
func ProcessFilterFile(patternFile string, includedFiles []string) (patterns []string) {
|
||||||
|
for _, file := range includedFiles {
|
||||||
|
if file == patternFile {
|
||||||
|
// cycle in include mechanism discovered.
|
||||||
|
LOG_ERROR("SNAPSHOT_FILTER", "The filter file %s has already been included", patternFile)
|
||||||
|
return patterns
|
||||||
|
}
|
||||||
|
}
|
||||||
|
includedFiles = append(includedFiles, patternFile)
|
||||||
|
LOG_INFO("SNAPSHOT_FILTER", "Parsing filter file %s", patternFile)
|
||||||
|
patternFileContent, err := ioutil.ReadFile(patternFile)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for _, pattern := range strings.Split(string(patternFile), "\n") {
|
patternFileLines := strings.Split(string(patternFileContent), "\n")
|
||||||
|
patterns = ProcessFilterLines(patternFileLines, includedFiles)
|
||||||
|
}
|
||||||
|
return patterns
|
||||||
|
}
|
||||||
|
|
||||||
|
func ProcessFilterLines(patternFileLines []string, includedFiles []string) (patterns []string) {
|
||||||
|
for _, pattern := range patternFileLines {
|
||||||
pattern = strings.TrimSpace(pattern)
|
pattern = strings.TrimSpace(pattern)
|
||||||
if len(pattern) == 0 {
|
if len(pattern) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if strings.HasPrefix(pattern, "@") {
|
||||||
|
patternIncludeFile := strings.TrimSpace(pattern[1:])
|
||||||
|
if patternIncludeFile == "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if ! filepath.IsAbs(patternIncludeFile) {
|
||||||
|
basePath := ""
|
||||||
|
if len(includedFiles) == 0 {
|
||||||
|
basePath, _ = os.Getwd()
|
||||||
|
} else {
|
||||||
|
basePath = filepath.Dir(includedFiles[len(includedFiles)-1])
|
||||||
|
}
|
||||||
|
patternIncludeFile = joinPath(basePath, patternIncludeFile)
|
||||||
|
}
|
||||||
|
for _, pattern := range ProcessFilterFile(patternIncludeFile, includedFiles) {
|
||||||
|
patterns = AppendPattern(patterns, pattern)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if pattern[0] == '#' {
|
if pattern[0] == '#' {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -95,59 +209,10 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, exc
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
patterns = append(patterns, pattern)
|
patterns = AppendPattern(patterns, pattern)
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))
|
return patterns
|
||||||
|
|
||||||
LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
|
|
||||||
|
|
||||||
if IsTracing() {
|
|
||||||
for _, pattern := range patterns {
|
|
||||||
LOG_TRACE("SNAPSHOT_PATTERN", "Pattern: %s", pattern)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
directories := make([]*Entry, 0, 256)
|
|
||||||
directories = append(directories, CreateEntry("", 0, 0, 0))
|
|
||||||
|
|
||||||
snapshot.Files = make([]*Entry, 0, 256)
|
|
||||||
|
|
||||||
attributeThreshold := 1024 * 1024
|
|
||||||
if attributeThresholdValue, found := os.LookupEnv("DUPLICACY_ATTRIBUTE_THRESHOLD"); found && attributeThresholdValue != "" {
|
|
||||||
attributeThreshold, _ = strconv.Atoi(attributeThresholdValue)
|
|
||||||
}
|
|
||||||
|
|
||||||
for len(directories) > 0 {
|
|
||||||
|
|
||||||
directory := directories[len(directories)-1]
|
|
||||||
directories = directories[:len(directories)-1]
|
|
||||||
snapshot.Files = append(snapshot.Files, directory)
|
|
||||||
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, nobackupFile, snapshot.discardAttributes, excludeByAttribute)
|
|
||||||
if err != nil {
|
|
||||||
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory: %v", err)
|
|
||||||
skippedDirectories = append(skippedDirectories, directory.Path)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
directories = append(directories, subdirectories...)
|
|
||||||
skippedFiles = append(skippedFiles, skipped...)
|
|
||||||
|
|
||||||
if !snapshot.discardAttributes && len(snapshot.Files) > attributeThreshold {
|
|
||||||
LOG_INFO("LIST_ATTRIBUTES", "Discarding file attributes")
|
|
||||||
snapshot.discardAttributes = true
|
|
||||||
for _, file := range snapshot.Files {
|
|
||||||
file.Attributes = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the root entry
|
|
||||||
snapshot.Files = snapshot.Files[1:]
|
|
||||||
|
|
||||||
return snapshot, skippedDirectories, skippedFiles, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is the struct used to save/load incomplete snapshots
|
// This is the struct used to save/load incomplete snapshots
|
||||||
|
|||||||
@@ -270,7 +270,7 @@ func (reader *sequenceReader) Read(data []byte) (n int, err error) {
|
|||||||
|
|
||||||
func (manager *SnapshotManager) CreateChunkDownloader() {
|
func (manager *SnapshotManager) CreateChunkDownloader() {
|
||||||
if manager.chunkDownloader == nil {
|
if manager.chunkDownloader == nil {
|
||||||
manager.chunkDownloader = CreateChunkDownloader(manager.config, manager.storage, manager.snapshotCache, false, 1)
|
manager.chunkDownloader = CreateChunkDownloader(manager.config, manager.storage, manager.snapshotCache, false, 1, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -381,12 +381,19 @@ func (manager *SnapshotManager) DownloadSnapshotContents(snapshot *Snapshot, pat
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ClearSnapshotContents removes contents loaded by DownloadSnapshotContents
|
||||||
|
func (manager *SnapshotManager) ClearSnapshotContents(snapshot *Snapshot) {
|
||||||
|
snapshot.ChunkHashes = nil
|
||||||
|
snapshot.ChunkLengths = nil
|
||||||
|
snapshot.Files = nil
|
||||||
|
}
|
||||||
|
|
||||||
// CleanSnapshotCache removes all files not referenced by the specified 'snapshot' in the snapshot cache.
|
// CleanSnapshotCache removes all files not referenced by the specified 'snapshot' in the snapshot cache.
|
||||||
func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, allSnapshots map[string][]*Snapshot) bool {
|
func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, allSnapshots map[string][]*Snapshot) bool {
|
||||||
|
|
||||||
if allSnapshots == nil {
|
if allSnapshots == nil {
|
||||||
// If the 'fossils' directory exists then don't clean the cache as all snapshots will be needed later
|
// If the 'fossils' directory exists then don't clean the cache as all snapshots will be needed later
|
||||||
// during the fossil collection phase. The deletion procedure creates this direcotry.
|
// during the fossil collection phase. The deletion procedure creates this directory.
|
||||||
// We only check this condition when allSnapshots is nil because
|
// We only check this condition when allSnapshots is nil because
|
||||||
// in thise case it is the deletion procedure that is trying to clean the snapshot cache.
|
// in thise case it is the deletion procedure that is trying to clean the snapshot cache.
|
||||||
exist, _, _, err := manager.snapshotCache.GetFileInfo(0, "fossils")
|
exist, _, _, err := manager.snapshotCache.GetFileInfo(0, "fossils")
|
||||||
@@ -653,6 +660,51 @@ func (manager *SnapshotManager) GetSnapshotChunks(snapshot *Snapshot, keepChunkH
|
|||||||
return chunks
|
return chunks
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetSnapshotChunkHashes has an option to retrieve chunk hashes in addition to chunk ids.
|
||||||
|
func (manager *SnapshotManager) GetSnapshotChunkHashes(snapshot *Snapshot, chunkHashes *map[string]bool, chunkIDs map[string]bool) {
|
||||||
|
|
||||||
|
for _, chunkHash := range snapshot.FileSequence {
|
||||||
|
if chunkHashes != nil {
|
||||||
|
(*chunkHashes)[chunkHash] = true
|
||||||
|
}
|
||||||
|
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, chunkHash := range snapshot.ChunkSequence {
|
||||||
|
if chunkHashes != nil {
|
||||||
|
(*chunkHashes)[chunkHash] = true
|
||||||
|
}
|
||||||
|
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, chunkHash := range snapshot.LengthSequence {
|
||||||
|
if chunkHashes != nil {
|
||||||
|
(*chunkHashes)[chunkHash] = true
|
||||||
|
}
|
||||||
|
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(snapshot.ChunkHashes) == 0 {
|
||||||
|
|
||||||
|
description := manager.DownloadSequence(snapshot.ChunkSequence)
|
||||||
|
err := snapshot.LoadChunks(description)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("SNAPSHOT_CHUNK", "Failed to load chunks for snapshot %s at revision %d: %v",
|
||||||
|
snapshot.ID, snapshot.Revision, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, chunkHash := range snapshot.ChunkHashes {
|
||||||
|
if chunkHashes != nil {
|
||||||
|
(*chunkHashes)[chunkHash] = true
|
||||||
|
}
|
||||||
|
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
snapshot.ClearChunks()
|
||||||
|
}
|
||||||
|
|
||||||
// ListSnapshots shows the information about a snapshot.
|
// ListSnapshots shows the information about a snapshot.
|
||||||
func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList []int, tag string,
|
func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList []int, tag string,
|
||||||
showFiles bool, showChunks bool) int {
|
showFiles bool, showChunks bool) int {
|
||||||
@@ -689,6 +741,9 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
|
|||||||
for _, revision := range revisions {
|
for _, revision := range revisions {
|
||||||
|
|
||||||
snapshot := manager.DownloadSnapshot(snapshotID, revision)
|
snapshot := manager.DownloadSnapshot(snapshotID, revision)
|
||||||
|
if tag != "" && snapshot.Tag != tag {
|
||||||
|
continue
|
||||||
|
}
|
||||||
creationTime := time.Unix(snapshot.StartTime, 0).Format("2006-01-02 15:04")
|
creationTime := time.Unix(snapshot.StartTime, 0).Format("2006-01-02 15:04")
|
||||||
tagWithSpace := ""
|
tagWithSpace := ""
|
||||||
if len(snapshot.Tag) > 0 {
|
if len(snapshot.Tag) > 0 {
|
||||||
@@ -697,15 +752,16 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
|
|||||||
LOG_INFO("SNAPSHOT_INFO", "Snapshot %s revision %d created at %s %s%s",
|
LOG_INFO("SNAPSHOT_INFO", "Snapshot %s revision %d created at %s %s%s",
|
||||||
snapshotID, revision, creationTime, tagWithSpace, snapshot.Options)
|
snapshotID, revision, creationTime, tagWithSpace, snapshot.Options)
|
||||||
|
|
||||||
if tag != "" && snapshot.Tag != tag {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if showFiles {
|
if showFiles {
|
||||||
manager.DownloadSnapshotFileSequence(snapshot, nil, false)
|
manager.DownloadSnapshotFileSequence(snapshot, nil, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
if showFiles {
|
if showFiles {
|
||||||
|
|
||||||
|
if snapshot.NumberOfFiles > 0 {
|
||||||
|
LOG_INFO("SNAPSHOT_STATS", "Files: %d", snapshot.NumberOfFiles)
|
||||||
|
}
|
||||||
|
|
||||||
maxSize := int64(9)
|
maxSize := int64(9)
|
||||||
maxSizeDigits := 1
|
maxSizeDigits := 1
|
||||||
totalFiles := 0
|
totalFiles := 0
|
||||||
@@ -753,10 +809,12 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
|
|||||||
|
|
||||||
// ListSnapshots shows the information about a snapshot.
|
// ListSnapshots shows the information about a snapshot.
|
||||||
func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToCheck []int, tag string, showStatistics bool, showTabular bool,
|
func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToCheck []int, tag string, showStatistics bool, showTabular bool,
|
||||||
checkFiles bool, searchFossils bool, resurrect bool) bool {
|
checkFiles bool, checkChunks, searchFossils bool, resurrect bool, threads int, allowFailures bool) bool {
|
||||||
|
|
||||||
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
|
manager.chunkDownloader = CreateChunkDownloader(manager.config, manager.storage, manager.snapshotCache, false, threads, allowFailures)
|
||||||
snapshotID, revisionsToCheck, tag, showStatistics, checkFiles, searchFossils, resurrect)
|
|
||||||
|
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, showTabular: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
|
||||||
|
snapshotID, revisionsToCheck, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
|
||||||
|
|
||||||
snapshotMap := make(map[string][]*Snapshot)
|
snapshotMap := make(map[string][]*Snapshot)
|
||||||
var err error
|
var err error
|
||||||
@@ -770,6 +828,8 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
// Store the index of the snapshot that references each chunk; if the chunk is shared by multiple chunks, the index is -1
|
// Store the index of the snapshot that references each chunk; if the chunk is shared by multiple chunks, the index is -1
|
||||||
chunkSnapshotMap := make(map[string]int)
|
chunkSnapshotMap := make(map[string]int)
|
||||||
|
|
||||||
|
emptyChunks := 0
|
||||||
|
|
||||||
LOG_INFO("SNAPSHOT_CHECK", "Listing all chunks")
|
LOG_INFO("SNAPSHOT_CHECK", "Listing all chunks")
|
||||||
allChunks, allSizes := manager.ListAllFiles(manager.storage, chunkDir)
|
allChunks, allSizes := manager.ListAllFiles(manager.storage, chunkDir)
|
||||||
|
|
||||||
@@ -784,9 +844,14 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
|
|
||||||
chunk = strings.Replace(chunk, "/", "", -1)
|
chunk = strings.Replace(chunk, "/", "", -1)
|
||||||
chunkSizeMap[chunk] = allSizes[i]
|
chunkSizeMap[chunk] = allSizes[i]
|
||||||
|
|
||||||
|
if allSizes[i] == 0 && !strings.HasSuffix(chunk, ".tmp") {
|
||||||
|
LOG_WARN("SNAPSHOT_CHECK", "Chunk %s has a size of 0", chunk)
|
||||||
|
emptyChunks++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if snapshotID == "" || showStatistics {
|
if snapshotID == "" || showStatistics || showTabular {
|
||||||
snapshotIDs, err := manager.ListSnapshotIDs()
|
snapshotIDs, err := manager.ListSnapshotIDs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
||||||
@@ -803,10 +868,10 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
|
|
||||||
snapshotIDIndex := 0
|
snapshotIDIndex := 0
|
||||||
totalMissingChunks := 0
|
totalMissingChunks := 0
|
||||||
for snapshotID, _ = range snapshotMap {
|
for snapshotID = range snapshotMap {
|
||||||
|
|
||||||
revisions := revisionsToCheck
|
revisions := revisionsToCheck
|
||||||
if len(revisions) == 0 || showStatistics {
|
if len(revisions) == 0 || showStatistics || showTabular {
|
||||||
revisions, err = manager.ListSnapshotRevisions(snapshotID)
|
revisions, err = manager.ListSnapshotRevisions(snapshotID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", snapshotID, err)
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", snapshotID, err)
|
||||||
@@ -816,40 +881,76 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
|
|
||||||
for _, revision := range revisions {
|
for _, revision := range revisions {
|
||||||
snapshot := manager.DownloadSnapshot(snapshotID, revision)
|
snapshot := manager.DownloadSnapshot(snapshotID, revision)
|
||||||
snapshotMap[snapshotID] = append(snapshotMap[snapshotID], snapshot)
|
|
||||||
|
|
||||||
if tag != "" && snapshot.Tag != tag {
|
if tag != "" && snapshot.Tag != tag {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
snapshotMap[snapshotID] = append(snapshotMap[snapshotID], snapshot)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
totalRevisions := 0
|
||||||
|
for _, snapshotList := range snapshotMap {
|
||||||
|
totalRevisions += len(snapshotList)
|
||||||
|
}
|
||||||
|
LOG_INFO("SNAPSHOT_CHECK", "%d snapshots and %d revisions", len(snapshotMap), totalRevisions)
|
||||||
|
|
||||||
|
var totalChunkSize int64
|
||||||
|
for _, size := range chunkSizeMap {
|
||||||
|
totalChunkSize += size
|
||||||
|
}
|
||||||
|
LOG_INFO("SNAPSHOT_CHECK", "Total chunk size is %s in %d chunks", PrettyNumber(totalChunkSize), len(chunkSizeMap))
|
||||||
|
|
||||||
|
var allChunkHashes *map[string]bool
|
||||||
|
if checkChunks && !checkFiles {
|
||||||
|
m := make(map[string]bool)
|
||||||
|
allChunkHashes = &m
|
||||||
|
}
|
||||||
|
|
||||||
|
for snapshotID = range snapshotMap {
|
||||||
|
|
||||||
|
for _, snapshot := range snapshotMap[snapshotID] {
|
||||||
|
|
||||||
if checkFiles {
|
if checkFiles {
|
||||||
manager.DownloadSnapshotContents(snapshot, nil, false)
|
manager.DownloadSnapshotContents(snapshot, nil, false)
|
||||||
manager.VerifySnapshot(snapshot)
|
manager.VerifySnapshot(snapshot)
|
||||||
|
manager.ClearSnapshotContents(snapshot)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
chunks := make(map[string]bool)
|
chunks := make(map[string]bool)
|
||||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot, false) {
|
manager.GetSnapshotChunkHashes(snapshot, allChunkHashes, chunks)
|
||||||
chunks[chunkID] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
missingChunks := 0
|
missingChunks := 0
|
||||||
for chunkID, _ := range chunks {
|
for chunkID := range chunks {
|
||||||
|
|
||||||
_, found := chunkSizeMap[chunkID]
|
_, found := chunkSizeMap[chunkID]
|
||||||
|
|
||||||
if !found {
|
if !found {
|
||||||
|
|
||||||
|
// Look up the chunk again in case it actually exists, but only if there aren't
|
||||||
|
// too many missing chunks.
|
||||||
|
if missingChunks < 100 {
|
||||||
|
_, exist, _, err := manager.storage.FindChunk(0, chunkID, false)
|
||||||
|
if err != nil {
|
||||||
|
LOG_WARN("SNAPSHOT_VALIDATE", "Failed to check the existence of chunk %s: %v",
|
||||||
|
chunkID, err)
|
||||||
|
} else if exist {
|
||||||
|
LOG_INFO("SNAPSHOT_VALIDATE", "Chunk %s is confirmed to exist", chunkID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if !searchFossils {
|
if !searchFossils {
|
||||||
missingChunks += 1
|
missingChunks += 1
|
||||||
LOG_WARN("SNAPSHOT_VALIDATE",
|
LOG_WARN("SNAPSHOT_VALIDATE",
|
||||||
"Chunk %s referenced by snapshot %s at revision %d does not exist",
|
"Chunk %s referenced by snapshot %s at revision %d does not exist",
|
||||||
chunkID, snapshotID, revision)
|
chunkID, snapshotID, snapshot.Revision)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkPath, exist, size, err := manager.storage.FindChunk(0, chunkID, true)
|
chunkPath, exist, size, err := manager.storage.FindChunk(0, chunkID, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("SNAPSHOT_VALIDATE", "Failed to check the existence of chunk %s: %v",
|
LOG_ERROR("SNAPSHOT_VALIDATE", "Failed to check the existence of fossil %s: %v",
|
||||||
chunkID, err)
|
chunkID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -858,7 +959,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
missingChunks += 1
|
missingChunks += 1
|
||||||
LOG_WARN("SNAPSHOT_VALIDATE",
|
LOG_WARN("SNAPSHOT_VALIDATE",
|
||||||
"Chunk %s referenced by snapshot %s at revision %d does not exist",
|
"Chunk %s referenced by snapshot %s at revision %d does not exist",
|
||||||
chunkID, snapshotID, revision)
|
chunkID, snapshotID, snapshot.Revision)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -866,7 +967,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
manager.resurrectChunk(chunkPath, chunkID)
|
manager.resurrectChunk(chunkPath, chunkID)
|
||||||
} else {
|
} else {
|
||||||
LOG_WARN("SNAPSHOT_FOSSIL", "Chunk %s referenced by snapshot %s at revision %d "+
|
LOG_WARN("SNAPSHOT_FOSSIL", "Chunk %s referenced by snapshot %s at revision %d "+
|
||||||
"has been marked as a fossil", chunkID, snapshotID, revision)
|
"has been marked as a fossil", chunkID, snapshotID, snapshot.Revision)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkSizeMap[chunkID] = size
|
chunkSizeMap[chunkID] = size
|
||||||
@@ -889,11 +990,11 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
|
|
||||||
if missingChunks > 0 {
|
if missingChunks > 0 {
|
||||||
LOG_WARN("SNAPSHOT_CHECK", "Some chunks referenced by snapshot %s at revision %d are missing",
|
LOG_WARN("SNAPSHOT_CHECK", "Some chunks referenced by snapshot %s at revision %d are missing",
|
||||||
snapshotID, revision)
|
snapshotID, snapshot.Revision)
|
||||||
totalMissingChunks += missingChunks
|
totalMissingChunks += missingChunks
|
||||||
} else {
|
} else {
|
||||||
LOG_INFO("SNAPSHOT_CHECK", "All chunks referenced by snapshot %s at revision %d exist",
|
LOG_INFO("SNAPSHOT_CHECK", "All chunks referenced by snapshot %s at revision %d exist",
|
||||||
snapshotID, revision)
|
snapshotID, snapshot.Revision)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -905,12 +1006,61 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if emptyChunks > 0 {
|
||||||
|
LOG_ERROR("SNAPSHOT_CHECK", "%d chunks have a size of 0", emptyChunks)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
if showTabular {
|
if showTabular {
|
||||||
manager.ShowStatisticsTabular(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
|
manager.ShowStatisticsTabular(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
|
||||||
} else if showStatistics {
|
} else if showStatistics {
|
||||||
manager.ShowStatistics(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
|
manager.ShowStatistics(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if checkChunks && !checkFiles {
|
||||||
|
manager.chunkDownloader.snapshotCache = nil
|
||||||
|
LOG_INFO("SNAPSHOT_VERIFY", "Verifying %d chunks", len(*allChunkHashes))
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
var chunkHashes []string
|
||||||
|
|
||||||
|
// The index of the first chunk to add to the downloader, which may have already downloaded
|
||||||
|
// some metadata chunks so the index doesn't start with 0.
|
||||||
|
chunkIndex := -1
|
||||||
|
|
||||||
|
for chunkHash := range *allChunkHashes {
|
||||||
|
chunkHashes = append(chunkHashes, chunkHash)
|
||||||
|
if chunkIndex == -1 {
|
||||||
|
chunkIndex = manager.chunkDownloader.AddChunk(chunkHash)
|
||||||
|
} else {
|
||||||
|
manager.chunkDownloader.AddChunk(chunkHash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var downloadedChunkSize int64
|
||||||
|
totalChunks := len(*allChunkHashes)
|
||||||
|
for i := 0; i < totalChunks; i++ {
|
||||||
|
chunk := manager.chunkDownloader.WaitForChunk(i + chunkIndex)
|
||||||
|
if chunk.isBroken {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
downloadedChunkSize += int64(chunk.GetLength())
|
||||||
|
|
||||||
|
elapsedTime := time.Now().Sub(startTime).Seconds()
|
||||||
|
speed := int64(float64(downloadedChunkSize) / elapsedTime)
|
||||||
|
remainingTime := int64(float64(totalChunks - i - 1) / float64(i + 1) * elapsedTime)
|
||||||
|
percentage := float64(i + 1) / float64(totalChunks) * 100.0
|
||||||
|
LOG_INFO("VERIFY_PROGRESS", "Verified chunk %s (%d/%d), %sB/s %s %.1f%%",
|
||||||
|
manager.config.GetChunkIDFromHash(chunkHashes[i]), i + 1, totalChunks,
|
||||||
|
PrettySize(speed), PrettyTime(remainingTime), percentage)
|
||||||
|
}
|
||||||
|
|
||||||
|
if manager.chunkDownloader.NumberOfFailedChunks > 0 {
|
||||||
|
LOG_ERROR("SNAPSHOT_VERIFY", "%d out of %d chunks are corrupted", manager.chunkDownloader.NumberOfFailedChunks, totalChunks)
|
||||||
|
} else {
|
||||||
|
LOG_INFO("SNAPSHOT_VERIFY", "All %d chunks have been successfully verified", totalChunks)
|
||||||
|
}
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -932,7 +1082,7 @@ func (manager *SnapshotManager) ShowStatistics(snapshotMap map[string][]*Snapsho
|
|||||||
var totalChunkSize int64
|
var totalChunkSize int64
|
||||||
var uniqueChunkSize int64
|
var uniqueChunkSize int64
|
||||||
|
|
||||||
for chunkID, _ := range chunks {
|
for chunkID := range chunks {
|
||||||
chunkSize := chunkSizeMap[chunkID]
|
chunkSize := chunkSizeMap[chunkID]
|
||||||
totalChunkSize += chunkSize
|
totalChunkSize += chunkSize
|
||||||
if chunkUniqueMap[chunkID] {
|
if chunkUniqueMap[chunkID] {
|
||||||
@@ -950,7 +1100,7 @@ func (manager *SnapshotManager) ShowStatistics(snapshotMap map[string][]*Snapsho
|
|||||||
|
|
||||||
var totalChunkSize int64
|
var totalChunkSize int64
|
||||||
var uniqueChunkSize int64
|
var uniqueChunkSize int64
|
||||||
for chunkID, _ := range snapshotChunks {
|
for chunkID := range snapshotChunks {
|
||||||
chunkSize := chunkSizeMap[chunkID]
|
chunkSize := chunkSizeMap[chunkID]
|
||||||
totalChunkSize += chunkSize
|
totalChunkSize += chunkSize
|
||||||
|
|
||||||
@@ -977,18 +1127,20 @@ func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string][]*
|
|||||||
earliestSeenChunks := make(map[string]int)
|
earliestSeenChunks := make(map[string]int)
|
||||||
|
|
||||||
for _, snapshot := range snapshotList {
|
for _, snapshot := range snapshotList {
|
||||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot, true) {
|
for _, chunkID := range manager.GetSnapshotChunks(snapshot, false) {
|
||||||
if earliestSeenChunks[chunkID] == 0 {
|
if earliestSeenChunks[chunkID] == 0 {
|
||||||
earliestSeenChunks[chunkID] = math.MaxInt32
|
earliestSeenChunks[chunkID] = math.MaxInt32
|
||||||
}
|
}
|
||||||
earliestSeenChunks[chunkID] = MinInt(earliestSeenChunks[chunkID], snapshot.Revision)
|
if earliestSeenChunks[chunkID] > snapshot.Revision {
|
||||||
|
earliestSeenChunks[chunkID] = snapshot.Revision
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, snapshot := range snapshotList {
|
for _, snapshot := range snapshotList {
|
||||||
|
|
||||||
chunks := make(map[string]bool)
|
chunks := make(map[string]bool)
|
||||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot, true) {
|
for _, chunkID := range manager.GetSnapshotChunks(snapshot, false) {
|
||||||
chunks[chunkID] = true
|
chunks[chunkID] = true
|
||||||
snapshotChunks[chunkID] = true
|
snapshotChunks[chunkID] = true
|
||||||
}
|
}
|
||||||
@@ -1000,7 +1152,7 @@ func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string][]*
|
|||||||
var newChunkCount int64
|
var newChunkCount int64
|
||||||
var newChunkSize int64
|
var newChunkSize int64
|
||||||
|
|
||||||
for chunkID, _ := range chunks {
|
for chunkID := range chunks {
|
||||||
chunkSize := chunkSizeMap[chunkID]
|
chunkSize := chunkSizeMap[chunkID]
|
||||||
totalChunkSize += chunkSize
|
totalChunkSize += chunkSize
|
||||||
totalChunkCount += 1
|
totalChunkCount += 1
|
||||||
@@ -1028,7 +1180,7 @@ func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string][]*
|
|||||||
var uniqueChunkSize int64
|
var uniqueChunkSize int64
|
||||||
var totalChunkCount int64
|
var totalChunkCount int64
|
||||||
var uniqueChunkCount int64
|
var uniqueChunkCount int64
|
||||||
for chunkID, _ := range snapshotChunks {
|
for chunkID := range snapshotChunks {
|
||||||
chunkSize := chunkSizeMap[chunkID]
|
chunkSize := chunkSizeMap[chunkID]
|
||||||
totalChunkSize += chunkSize
|
totalChunkSize += chunkSize
|
||||||
totalChunkCount += 1
|
totalChunkCount += 1
|
||||||
@@ -1133,7 +1285,7 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveFile retrieve the file in the specifed snapshot.
|
// RetrieveFile retrieves the file in the specified snapshot.
|
||||||
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, output func([]byte)) bool {
|
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, output func([]byte)) bool {
|
||||||
|
|
||||||
if file.Size == 0 {
|
if file.Size == 0 {
|
||||||
@@ -1157,7 +1309,6 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
|
|||||||
}
|
}
|
||||||
|
|
||||||
var chunk *Chunk
|
var chunk *Chunk
|
||||||
currentHash := ""
|
|
||||||
|
|
||||||
for i := file.StartChunk; i <= file.EndChunk; i++ {
|
for i := file.StartChunk; i <= file.EndChunk; i++ {
|
||||||
start := 0
|
start := 0
|
||||||
@@ -1170,10 +1321,12 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
|
|||||||
}
|
}
|
||||||
|
|
||||||
hash := snapshot.ChunkHashes[i]
|
hash := snapshot.ChunkHashes[i]
|
||||||
if currentHash != hash {
|
lastChunk, lastChunkHash := manager.chunkDownloader.GetLastDownloadedChunk()
|
||||||
|
if lastChunkHash != hash {
|
||||||
i := manager.chunkDownloader.AddChunk(hash)
|
i := manager.chunkDownloader.AddChunk(hash)
|
||||||
chunk = manager.chunkDownloader.WaitForChunk(i)
|
chunk = manager.chunkDownloader.WaitForChunk(i)
|
||||||
currentHash = hash
|
} else {
|
||||||
|
chunk = lastChunk
|
||||||
}
|
}
|
||||||
|
|
||||||
output(chunk.GetBytes()[start:end])
|
output(chunk.GetBytes()[start:end])
|
||||||
@@ -1248,21 +1401,20 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
|
|||||||
}
|
}
|
||||||
|
|
||||||
file := manager.FindFile(snapshot, path, false)
|
file := manager.FindFile(snapshot, path, false)
|
||||||
var content []byte
|
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) {
|
||||||
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) { content = append(content, chunk...) }) {
|
fmt.Printf("%s", chunk)
|
||||||
|
}) {
|
||||||
LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d",
|
LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d",
|
||||||
path, snapshot.ID, snapshot.Revision)
|
path, snapshot.ID, snapshot.Revision)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("%s", string(content))
|
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Diff compares two snapshots, or two revision of a file if the file argument is given.
|
// Diff compares two snapshots, or two revision of a file if the file argument is given.
|
||||||
func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []int,
|
func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []int,
|
||||||
filePath string, compareByHash bool, nobackupFile string, excludeByAttribute bool) bool {
|
filePath string, compareByHash bool, nobackupFile string, filtersFile string, excludeByAttribute bool) bool {
|
||||||
|
|
||||||
LOG_DEBUG("DIFF_PARAMETERS", "top: %s, id: %s, revision: %v, path: %s, compareByHash: %t",
|
LOG_DEBUG("DIFF_PARAMETERS", "top: %s, id: %s, revision: %v, path: %s, compareByHash: %t",
|
||||||
top, snapshotID, revisions, filePath, compareByHash)
|
top, snapshotID, revisions, filePath, compareByHash)
|
||||||
@@ -1275,7 +1427,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
|||||||
if len(revisions) <= 1 {
|
if len(revisions) <= 1 {
|
||||||
// Only scan the repository if filePath is not provided
|
// Only scan the repository if filePath is not provided
|
||||||
if len(filePath) == 0 {
|
if len(filePath) == 0 {
|
||||||
rightSnapshot, _, _, err = CreateSnapshotFromDirectory(snapshotID, top, nobackupFile, excludeByAttribute)
|
rightSnapshot, _, _, err = CreateSnapshotFromDirectory(snapshotID, top, nobackupFile, filtersFile, excludeByAttribute)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
|
||||||
return false
|
return false
|
||||||
@@ -1445,9 +1597,13 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
|||||||
} else {
|
} else {
|
||||||
same = right.IsSameAs(left)
|
same = right.IsSameAs(left)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
if left.Size == 0 && right.Size == 0 {
|
||||||
|
same = true
|
||||||
} else {
|
} else {
|
||||||
same = left.Hash == right.Hash
|
same = left.Hash == right.Hash
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if !same {
|
if !same {
|
||||||
LOG_INFO("SNAPSHOT_DIFF", " %s", left.String(maxSizeDigits))
|
LOG_INFO("SNAPSHOT_DIFF", " %s", left.String(maxSizeDigits))
|
||||||
@@ -1817,7 +1973,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
if _, found := newChunks[chunk]; found {
|
if _, found := newChunks[chunk]; found {
|
||||||
// The fossil is referenced so it can't be deleted.
|
// The fossil is referenced so it can't be deleted.
|
||||||
if dryRun {
|
if dryRun {
|
||||||
LOG_INFO("FOSSIL_RESURRECT", "Fossil %s would be resurrected: %v", chunk)
|
LOG_INFO("FOSSIL_RESURRECT", "Fossil %s would be resurrected", chunk)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2226,7 +2382,7 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s
|
|||||||
chunk := strings.Replace(file, "/", "", -1)
|
chunk := strings.Replace(file, "/", "", -1)
|
||||||
|
|
||||||
if !chunkRegex.MatchString(chunk) {
|
if !chunkRegex.MatchString(chunk) {
|
||||||
LOG_WARN("CHUNK_UNKONWN_FILE", "File %s is not a chunk", file)
|
LOG_WARN("CHUNK_UNKNOWN_FILE", "File %s is not a chunk", file)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2425,7 +2581,7 @@ func (manager *SnapshotManager) UploadFile(path string, derivationKey string, co
|
|||||||
derivationKey = derivationKey[len(derivationKey)-64:]
|
derivationKey = derivationKey[len(derivationKey)-64:]
|
||||||
}
|
}
|
||||||
|
|
||||||
err := manager.fileChunk.Encrypt(manager.config.FileKey, derivationKey)
|
err := manager.fileChunk.Encrypt(manager.config.FileKey, derivationKey, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("UPLOAD_File", "Failed to encrypt the file %s: %v", path, err)
|
LOG_ERROR("UPLOAD_File", "Failed to encrypt the file %s: %v", path, err)
|
||||||
return false
|
return false
|
||||||
|
|||||||
@@ -9,12 +9,12 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
"io/ioutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot {
|
func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot {
|
||||||
@@ -620,7 +620,7 @@ func TestPruneNewSnapshots(t *testing.T) {
|
|||||||
// Now chunkHash1 wil be resurrected
|
// Now chunkHash1 wil be resurrected
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
checkTestSnapshots(snapshotManager, 4, 0)
|
checkTestSnapshots(snapshotManager, 4, 0)
|
||||||
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false);
|
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false, false, 1, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A fossil collection left by an aborted prune should be ignored if any supposedly deleted snapshot exists
|
// A fossil collection left by an aborted prune should be ignored if any supposedly deleted snapshot exists
|
||||||
@@ -669,7 +669,7 @@ func TestPruneGhostSnapshots(t *testing.T) {
|
|||||||
// Run the prune again but the fossil collection should be igored, since revision 1 still exists
|
// Run the prune again but the fossil collection should be igored, since revision 1 still exists
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
checkTestSnapshots(snapshotManager, 3, 2)
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, true /*searchFossils*/, false);
|
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, false, true /*searchFossils*/, false, 1, false)
|
||||||
|
|
||||||
// Prune snapshot 1 again
|
// Prune snapshot 1 again
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
@@ -683,5 +683,5 @@ func TestPruneGhostSnapshots(t *testing.T) {
|
|||||||
// Run the prune again and this time the fossil collection will be processed and the fossils removed
|
// Run the prune again and this time the fossil collection will be processed and the fossils removed
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false);
|
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false, false, 1, false)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ func (storage *StorageBase) SetRateLimits(downloadRateLimit int, uploadRateLimit
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetDefaultNestingLevels sets the default read and write levels. This is usually called by
|
// SetDefaultNestingLevels sets the default read and write levels. This is usually called by
|
||||||
// derived storages to set the levels with old values so that storages initialied by ealier versions
|
// derived storages to set the levels with old values so that storages initialized by earlier versions
|
||||||
// will continue to work.
|
// will continue to work.
|
||||||
func (storage *StorageBase) SetDefaultNestingLevels(readLevels []int, writeLevel int) {
|
func (storage *StorageBase) SetDefaultNestingLevels(readLevels []int, writeLevel int) {
|
||||||
storage.readLevels = readLevels
|
storage.readLevels = readLevels
|
||||||
@@ -268,7 +268,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
if matched == nil {
|
if matched == nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Unrecognizable storage URL: %s", storageURL)
|
LOG_ERROR("STORAGE_CREATE", "Unrecognizable storage URL: %s", storageURL)
|
||||||
return nil
|
return nil
|
||||||
} else if matched[1] == "sftp" {
|
} else if matched[1] == "sftp" || matched[1] == "sftpc" {
|
||||||
server := matched[3]
|
server := matched[3]
|
||||||
username := matched[2]
|
username := matched[2]
|
||||||
storageDir := matched[5]
|
storageDir := matched[5]
|
||||||
@@ -291,6 +291,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
|
|
||||||
// If ssh_key_file is set, skip password-based login
|
// If ssh_key_file is set, skip password-based login
|
||||||
keyFile := GetPasswordFromPreference(preference, "ssh_key_file")
|
keyFile := GetPasswordFromPreference(preference, "ssh_key_file")
|
||||||
|
passphrase := ""
|
||||||
|
|
||||||
password := ""
|
password := ""
|
||||||
passwordCallback := func() (string, error) {
|
passwordCallback := func() (string, error) {
|
||||||
@@ -335,7 +336,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
keyFile = GetPassword(preference, "ssh_key_file", "Enter the path of the private key file:",
|
keyFile = GetPassword(preference, "ssh_key_file", "Enter the path of the private key file:",
|
||||||
true, resetPassword)
|
true, resetPassword)
|
||||||
|
|
||||||
var key ssh.Signer
|
var keySigner ssh.Signer
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if keyFile == "" {
|
if keyFile == "" {
|
||||||
@@ -346,15 +347,52 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_INFO("SSH_PUBLICKEY", "Failed to read the private key file: %v", err)
|
LOG_INFO("SSH_PUBLICKEY", "Failed to read the private key file: %v", err)
|
||||||
} else {
|
} else {
|
||||||
key, err = ssh.ParsePrivateKey(content)
|
keySigner, err = ssh.ParsePrivateKey(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if _, ok := err.(*ssh.PassphraseMissingError); ok {
|
||||||
|
LOG_TRACE("SSH_PUBLICKEY", "The private key file is encrypted")
|
||||||
|
passphrase = GetPassword(preference, "ssh_passphrase", "Enter the passphrase to decrypt the private key file:", false, resetPassword)
|
||||||
|
if len(passphrase) == 0 {
|
||||||
|
LOG_INFO("SSH_PUBLICKEY", "No passphrase to descrypt the private key file %s", keyFile)
|
||||||
|
} else {
|
||||||
|
keySigner, err = ssh.ParsePrivateKeyWithPassphrase(content, []byte(passphrase))
|
||||||
|
if err != nil {
|
||||||
|
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the encrypted private key file %s: %v", keyFile, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the private key file %s: %v", keyFile, err)
|
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the private key file %s: %v", keyFile, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if keySigner != nil {
|
||||||
|
certFile := keyFile + "-cert.pub"
|
||||||
|
if stat, err := os.Stat(certFile); err == nil && !stat.IsDir() {
|
||||||
|
LOG_DEBUG("SSH_CERTIFICATE", "Attempting to use ssh certificate from file %s", certFile)
|
||||||
|
var content []byte
|
||||||
|
content, err = ioutil.ReadFile(certFile)
|
||||||
|
if err != nil {
|
||||||
|
LOG_INFO("SSH_CERTIFICATE", "Failed to read ssh certificate file %s: %v", certFile, err)
|
||||||
|
} else {
|
||||||
|
pubKey, _, _, _, err := ssh.ParseAuthorizedKey(content)
|
||||||
|
if err != nil {
|
||||||
|
LOG_INFO("SSH_CERTIFICATE", "Failed parse ssh certificate file %s: %v", certFile, err)
|
||||||
|
} else {
|
||||||
|
certSigner, err := ssh.NewCertSigner(pubKey.(*ssh.Certificate), keySigner)
|
||||||
|
if err != nil {
|
||||||
|
LOG_INFO("SSH_CERTIFICATE", "Failed to create certificate signer: %v", err)
|
||||||
|
} else {
|
||||||
|
keySigner = certSigner
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if key != nil {
|
if keySigner != nil {
|
||||||
signers = append(signers, key)
|
signers = append(signers, keySigner)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(signers) > 0 {
|
if len(signers) > 0 {
|
||||||
@@ -402,7 +440,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
return checkHostKey(hostname, remote, key)
|
return checkHostKey(hostname, remote, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, 2, authMethods, hostKeyChecker, threads)
|
sftpStorage, err := CreateSFTPStorage(matched[1] == "sftpc", server, port, username, storageDir, 2, authMethods, hostKeyChecker, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the SFTP storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the SFTP storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
@@ -410,6 +448,9 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
|
|
||||||
if keyFile != "" {
|
if keyFile != "" {
|
||||||
SavePassword(preference, "ssh_key_file", keyFile)
|
SavePassword(preference, "ssh_key_file", keyFile)
|
||||||
|
if passphrase != "" {
|
||||||
|
SavePassword(preference, "ssh_passphrase", passphrase)
|
||||||
|
}
|
||||||
} else if password != "" {
|
} else if password != "" {
|
||||||
SavePassword(preference, "ssh_password", password)
|
SavePassword(preference, "ssh_password", password)
|
||||||
}
|
}
|
||||||
@@ -509,11 +550,30 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
return dropboxStorage
|
return dropboxStorage
|
||||||
} else if matched[1] == "b2" {
|
} else if matched[1] == "b2" {
|
||||||
bucket := matched[3]
|
bucket := matched[3]
|
||||||
|
storageDir := matched[5]
|
||||||
|
|
||||||
accountID := GetPassword(preference, "b2_id", "Enter Backblaze Account ID:", true, resetPassword)
|
accountID := GetPassword(preference, "b2_id", "Enter Backblaze account or application id:", true, resetPassword)
|
||||||
applicationKey := GetPassword(preference, "b2_key", "Enter Backblaze Application Key:", true, resetPassword)
|
applicationKey := GetPassword(preference, "b2_key", "Enter corresponding Backblaze application key:", true, resetPassword)
|
||||||
|
|
||||||
b2Storage, err := CreateB2Storage(accountID, applicationKey, bucket, threads)
|
b2Storage, err := CreateB2Storage(accountID, applicationKey, "", bucket, storageDir, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, "b2_id", accountID)
|
||||||
|
SavePassword(preference, "b2_key", applicationKey)
|
||||||
|
return b2Storage
|
||||||
|
} else if matched[1] == "b2-custom" {
|
||||||
|
b2customUrlRegex := regexp.MustCompile(`^b2-custom://([^/]+)/([^/]+)(/(.+))?`)
|
||||||
|
matched := b2customUrlRegex.FindStringSubmatch(storageURL)
|
||||||
|
downloadURL := "https://" + matched[1]
|
||||||
|
bucket := matched[2]
|
||||||
|
storageDir := matched[4]
|
||||||
|
|
||||||
|
accountID := GetPassword(preference, "b2_id", "Enter Backblaze account or application id:", true, resetPassword)
|
||||||
|
applicationKey := GetPassword(preference, "b2_key", "Enter corresponding Backblaze application key:", true, resetPassword)
|
||||||
|
|
||||||
|
b2Storage, err := CreateB2Storage(accountID, applicationKey, downloadURL, bucket, storageDir, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
@@ -564,26 +624,35 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
SavePassword(preference, "gcs_token", tokenFile)
|
SavePassword(preference, "gcs_token", tokenFile)
|
||||||
return gcsStorage
|
return gcsStorage
|
||||||
} else if matched[1] == "gcd" {
|
} else if matched[1] == "gcd" {
|
||||||
|
// Handle writing directly to the root of the drive
|
||||||
|
// For gcd://driveid@/, driveid@ is match[3] not match[2]
|
||||||
|
if matched[2] == "" && strings.HasSuffix(matched[3], "@") {
|
||||||
|
matched[2], matched[3] = matched[3], matched[2]
|
||||||
|
}
|
||||||
|
driveID := matched[2]
|
||||||
|
if driveID != "" {
|
||||||
|
driveID = driveID[:len(driveID)-1]
|
||||||
|
}
|
||||||
storagePath := matched[3] + matched[4]
|
storagePath := matched[3] + matched[4]
|
||||||
prompt := fmt.Sprintf("Enter the path of the Google Drive token file (downloadable from https://duplicacy.com/gcd_start):")
|
prompt := fmt.Sprintf("Enter the path of the Google Drive token file (downloadable from https://duplicacy.com/gcd_start):")
|
||||||
tokenFile := GetPassword(preference, "gcd_token", prompt, true, resetPassword)
|
tokenFile := GetPassword(preference, "gcd_token", prompt, true, resetPassword)
|
||||||
gcdStorage, err := CreateGCDStorage(tokenFile, storagePath, threads)
|
gcdStorage, err := CreateGCDStorage(tokenFile, driveID, storagePath, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Drive storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Drive storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
SavePassword(preference, "gcd_token", tokenFile)
|
SavePassword(preference, "gcd_token", tokenFile)
|
||||||
return gcdStorage
|
return gcdStorage
|
||||||
} else if matched[1] == "one" {
|
} else if matched[1] == "one" || matched[1] == "odb" {
|
||||||
storagePath := matched[3] + matched[4]
|
storagePath := matched[3] + matched[4]
|
||||||
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
|
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
|
||||||
tokenFile := GetPassword(preference, "one_token", prompt, true, resetPassword)
|
tokenFile := GetPassword(preference, matched[1] + "_token", prompt, true, resetPassword)
|
||||||
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, storagePath, threads)
|
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
SavePassword(preference, "one_token", tokenFile)
|
SavePassword(preference, matched[1] + "_token", tokenFile)
|
||||||
return oneDriveStorage
|
return oneDriveStorage
|
||||||
} else if matched[1] == "hubic" {
|
} else if matched[1] == "hubic" {
|
||||||
storagePath := matched[3] + matched[4]
|
storagePath := matched[3] + matched[4]
|
||||||
@@ -609,6 +678,10 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
} else if matched[1] == "webdav" || matched[1] == "webdav-http" {
|
} else if matched[1] == "webdav" || matched[1] == "webdav-http" {
|
||||||
server := matched[3]
|
server := matched[3]
|
||||||
username := matched[2]
|
username := matched[2]
|
||||||
|
if username == "" {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "No username is provided to access the WebDAV storage")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
username = username[:len(username)-1]
|
username = username[:len(username)-1]
|
||||||
storageDir := matched[5]
|
storageDir := matched[5]
|
||||||
port := 0
|
port := 0
|
||||||
@@ -629,6 +702,18 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
}
|
}
|
||||||
SavePassword(preference, "webdav_password", password)
|
SavePassword(preference, "webdav_password", password)
|
||||||
return webDAVStorage
|
return webDAVStorage
|
||||||
|
} else if matched[1] == "fabric" {
|
||||||
|
endpoint := matched[3]
|
||||||
|
storageDir := matched[5]
|
||||||
|
prompt := fmt.Sprintf("Enter the token for accessing the Storage Made Easy File Fabric storage:")
|
||||||
|
token := GetPassword(preference, "fabric_token", prompt, true, resetPassword)
|
||||||
|
smeStorage, err := CreateFileFabricStorage(endpoint, token, storageDir, threads)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the File Fabric storage at %s: %v", storageURL, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
SavePassword(preference, "fabric_token", token)
|
||||||
|
return smeStorage
|
||||||
} else {
|
} else {
|
||||||
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
|
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
|
||||||
return nil
|
return nil
|
||||||
|
|||||||
@@ -27,6 +27,8 @@ var testRateLimit int
|
|||||||
var testQuickMode bool
|
var testQuickMode bool
|
||||||
var testThreads int
|
var testThreads int
|
||||||
var testFixedChunkSize bool
|
var testFixedChunkSize bool
|
||||||
|
var testRSAEncryption bool
|
||||||
|
var testErasureCoding bool
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
|
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
|
||||||
@@ -34,6 +36,8 @@ func init() {
|
|||||||
flag.BoolVar(&testQuickMode, "quick", false, "quick test")
|
flag.BoolVar(&testQuickMode, "quick", false, "quick test")
|
||||||
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
|
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
|
||||||
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
|
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
|
||||||
|
flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption")
|
||||||
|
flag.BoolVar(&testErasureCoding, "erasure-coding", false, "enable Erasure Coding")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -80,12 +84,12 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
|||||||
return storage, err
|
return storage, err
|
||||||
} else if testStorageName == "s3" {
|
} else if testStorageName == "s3" {
|
||||||
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false)
|
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false)
|
||||||
return storage, err
|
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
} else if testStorageName == "wasabi" {
|
} else if testStorageName == "wasabi" {
|
||||||
storage, err := CreateWasabiStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
|
storage, err := CreateWasabiStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
|
||||||
return storage, err
|
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
} else if testStorageName == "s3c" {
|
} else if testStorageName == "s3c" {
|
||||||
storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
|
storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
@@ -107,7 +111,7 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
|||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
return storage, err
|
return storage, err
|
||||||
} else if testStorageName == "b2" {
|
} else if testStorageName == "b2" {
|
||||||
storage, err := CreateB2Storage(config["account"], config["key"], config["bucket"], threads)
|
storage, err := CreateB2Storage(config["account"], config["key"], "", config["bucket"], config["directory"], threads)
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
return storage, err
|
return storage, err
|
||||||
} else if testStorageName == "gcs-s3" {
|
} else if testStorageName == "gcs-s3" {
|
||||||
@@ -131,11 +135,23 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
|||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
return storage, err
|
return storage, err
|
||||||
} else if testStorageName == "gcd" {
|
} else if testStorageName == "gcd" {
|
||||||
storage, err := CreateGCDStorage(config["token_file"], config["storage_path"], threads)
|
storage, err := CreateGCDStorage(config["token_file"], "", config["storage_path"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "gcd-shared" {
|
||||||
|
storage, err := CreateGCDStorage(config["token_file"], config["drive"], config["storage_path"], threads)
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
return storage, err
|
return storage, err
|
||||||
} else if testStorageName == "one" {
|
} else if testStorageName == "one" {
|
||||||
storage, err := CreateOneDriveStorage(config["token_file"], config["storage_path"], threads)
|
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "odb" {
|
||||||
|
storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads)
|
||||||
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
|
} else if testStorageName == "one" {
|
||||||
|
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
return storage, err
|
return storage, err
|
||||||
} else if testStorageName == "hubic" {
|
} else if testStorageName == "hubic" {
|
||||||
@@ -153,10 +169,7 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
|||||||
}
|
}
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
return storage, err
|
return storage, err
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
|
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -299,7 +312,8 @@ func TestStorage(t *testing.T) {
|
|||||||
|
|
||||||
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
|
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
|
||||||
|
|
||||||
storage, err := loadStorage(testDir, 1)
|
threads := 8
|
||||||
|
storage, err := loadStorage(testDir, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create storage: %v", err)
|
t.Errorf("Failed to create storage: %v", err)
|
||||||
return
|
return
|
||||||
@@ -329,16 +343,16 @@ func TestStorage(t *testing.T) {
|
|||||||
storage.CreateDirectory(0, "shared")
|
storage.CreateDirectory(0, "shared")
|
||||||
|
|
||||||
// Upload to the same directory by multiple goroutines
|
// Upload to the same directory by multiple goroutines
|
||||||
count := 8
|
count := threads
|
||||||
finished := make(chan int, count)
|
finished := make(chan int, count)
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
go func(name string) {
|
go func(threadIndex int, name string) {
|
||||||
err := storage.UploadFile(0, name, []byte("this is a test file"))
|
err := storage.UploadFile(threadIndex, name, []byte("this is a test file"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error to upload '%s': %v", name, err)
|
t.Errorf("Error to upload '%s': %v", name, err)
|
||||||
}
|
}
|
||||||
finished <- 0
|
finished <- 0
|
||||||
}(fmt.Sprintf("shared/a/b/c/%d", i))
|
}(i, fmt.Sprintf("shared/a/b/c/%d", i))
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
@@ -387,7 +401,6 @@ func TestStorage(t *testing.T) {
|
|||||||
|
|
||||||
snapshotIDs := []string{}
|
snapshotIDs := []string{}
|
||||||
for _, snapshotDir := range snapshotDirs {
|
for _, snapshotDir := range snapshotDirs {
|
||||||
LOG_INFO("debug", "snapshot dir: %s", snapshotDir)
|
|
||||||
if len(snapshotDir) > 0 && snapshotDir[len(snapshotDir)-1] == '/' {
|
if len(snapshotDir) > 0 && snapshotDir[len(snapshotDir)-1] == '/' {
|
||||||
snapshotIDs = append(snapshotIDs, snapshotDir[:len(snapshotDir)-1])
|
snapshotIDs = append(snapshotIDs, snapshotDir[:len(snapshotDir)-1])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -129,6 +129,11 @@ func CreateSwiftStorage(storageURL string, key string, threads int) (storage *Sw
|
|||||||
TrustId: arguments["trust_id"],
|
TrustId: arguments["trust_id"],
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = connection.Authenticate()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
_, _, err = connection.Container(container)
|
_, _, err = connection.Container(container)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -10,10 +10,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"runtime"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -58,7 +55,7 @@ func IsEmptyFilter(pattern string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func IsUnspecifiedFilter(pattern string) bool {
|
func IsUnspecifiedFilter(pattern string) bool {
|
||||||
if pattern[0] != '+' && pattern[0] != '-' && pattern[0] != 'i' && pattern[0] != 'e' {
|
if pattern[0] != '+' && pattern[0] != '-' && !strings.HasPrefix(pattern, "i:") && !strings.HasPrefix(pattern, "e:") {
|
||||||
return true
|
return true
|
||||||
} else {
|
} else {
|
||||||
return false
|
return false
|
||||||
@@ -176,6 +173,15 @@ func GetPasswordFromPreference(preference Preference, passwordType string) strin
|
|||||||
if password, found := os.LookupEnv(name); found && password != "" {
|
if password, found := os.LookupEnv(name); found && password != "" {
|
||||||
return password
|
return password
|
||||||
}
|
}
|
||||||
|
|
||||||
|
re := regexp.MustCompile(`[^a-zA-Z0-9_]`)
|
||||||
|
namePlain := re.ReplaceAllString(name, "_")
|
||||||
|
if namePlain != name {
|
||||||
|
LOG_DEBUG("PASSWORD_ENV_VAR", "Reading the environment variable %s", namePlain)
|
||||||
|
if password, found := os.LookupEnv(namePlain); found && password != "" {
|
||||||
|
return password
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the password is stored in the preference, there is no need to include the storage name
|
// If the password is stored in the preference, there is no need to include the storage name
|
||||||
@@ -390,19 +396,6 @@ func MatchPath(filePath string, patterns []string) (included bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func joinPath(components ...string) string {
|
|
||||||
|
|
||||||
combinedPath := path.Join(components...)
|
|
||||||
if len(combinedPath) > 257 && runtime.GOOS == "windows" {
|
|
||||||
combinedPath = `\\?\` + filepath.Join(components...)
|
|
||||||
// If the path is on a samba drive we must use the UNC format
|
|
||||||
if strings.HasPrefix(combinedPath, `\\?\\\`) {
|
|
||||||
combinedPath = `\\?\UNC\` + combinedPath[6:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return combinedPath
|
|
||||||
}
|
|
||||||
|
|
||||||
func PrettyNumber(number int64) string {
|
func PrettyNumber(number int64) string {
|
||||||
|
|
||||||
G := int64(1024 * 1024 * 1024)
|
G := int64(1024 * 1024 * 1024)
|
||||||
@@ -441,7 +434,7 @@ func PrettyTime(seconds int64) string {
|
|||||||
seconds/day, (seconds%day)/3600, (seconds%3600)/60, seconds%60)
|
seconds/day, (seconds%day)/3600, (seconds%3600)/60, seconds%60)
|
||||||
} else if seconds > day {
|
} else if seconds > day {
|
||||||
return fmt.Sprintf("1 day %02d:%02d:%02d", (seconds%day)/3600, (seconds%3600)/60, seconds%60)
|
return fmt.Sprintf("1 day %02d:%02d:%02d", (seconds%day)/3600, (seconds%3600)/60, seconds%60)
|
||||||
} else if seconds > 0 {
|
} else if seconds >= 0 {
|
||||||
return fmt.Sprintf("%02d:%02d:%02d", seconds/3600, (seconds%3600)/60, seconds%60)
|
return fmt.Sprintf("%02d:%02d:%02d", seconds/3600, (seconds%3600)/60, seconds%60)
|
||||||
} else {
|
} else {
|
||||||
return "n/a"
|
return "n/a"
|
||||||
@@ -467,10 +460,3 @@ func AtoSize(sizeString string) int {
|
|||||||
|
|
||||||
return size
|
return size
|
||||||
}
|
}
|
||||||
|
|
||||||
func MinInt(x, y int) int {
|
|
||||||
if x < y {
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
return y
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ package duplicacy
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
@@ -83,3 +84,11 @@ func (entry *Entry) SetAttributesToFile(fullPath string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func joinPath(components ...string) string {
|
||||||
|
return path.Join(components...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func SplitDir(fullPath string) (dir string, file string) {
|
||||||
|
return path.Split(fullPath)
|
||||||
|
}
|
||||||
|
|||||||
@@ -92,6 +92,17 @@ func TestMatchPattern(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, pattern := range []string{ "+", "-", "i:", "e:", "+a", "-a", "i:a", "e:a"} {
|
||||||
|
if IsUnspecifiedFilter(pattern) {
|
||||||
|
t.Errorf("pattern %s has a specified filter", pattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pattern := range []string{ "i", "e", "ia", "ib", "a", "b"} {
|
||||||
|
if !IsUnspecifiedFilter(pattern) {
|
||||||
|
t.Errorf("pattern %s does not have a specified filter", pattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRateLimit(t *testing.T) {
|
func TestRateLimit(t *testing.T) {
|
||||||
|
|||||||
@@ -7,6 +7,8 @@ package duplicacy
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
@@ -114,3 +116,18 @@ func (entry *Entry) ReadAttributes(top string) {
|
|||||||
func (entry *Entry) SetAttributesToFile(fullPath string) {
|
func (entry *Entry) SetAttributesToFile(fullPath string) {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func joinPath(components ...string) string {
|
||||||
|
|
||||||
|
combinedPath := `\\?\` + filepath.Join(components...)
|
||||||
|
// If the path is on a samba drive we must use the UNC format
|
||||||
|
if strings.HasPrefix(combinedPath, `\\?\\\`) {
|
||||||
|
combinedPath = `\\?\UNC\` + combinedPath[6:]
|
||||||
|
}
|
||||||
|
return combinedPath
|
||||||
|
}
|
||||||
|
|
||||||
|
func SplitDir(fullPath string) (dir string, file string) {
|
||||||
|
i := strings.LastIndex(fullPath, "\\")
|
||||||
|
return fullPath[:i+1], fullPath[i+1:]
|
||||||
|
}
|
||||||
|
|||||||
@@ -93,49 +93,49 @@ func (storage *WasabiStorage) DeleteFile(
|
|||||||
// rename. It's designed to get the job done with as few dependencies
|
// rename. It's designed to get the job done with as few dependencies
|
||||||
// on other packages as possible rather than being somethng
|
// on other packages as possible rather than being somethng
|
||||||
// general-purpose and reusable.
|
// general-purpose and reusable.
|
||||||
func (storage *WasabiStorage) MoveFile(
|
func (storage *WasabiStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
threadIndex int, from string, to string,
|
|
||||||
) (err error) {
|
|
||||||
|
|
||||||
var from_path string
|
var fromPath string
|
||||||
// The from path includes the bucket. Take care not to include an empty storageDir
|
// The from path includes the bucket. Take care not to include an empty storageDir
|
||||||
// string as Wasabi's backend will return 404 on URLs with double slashes.
|
// string as Wasabi's backend will return 404 on URLs with double slashes.
|
||||||
if (storage.storageDir == "") {
|
if storage.storageDir == "" {
|
||||||
from_path = fmt.Sprintf("/%s/%s", storage.bucket, from)
|
fromPath = fmt.Sprintf("/%s/%s", storage.bucket, from)
|
||||||
} else {
|
} else {
|
||||||
from_path = fmt.Sprintf("/%s/%s/%s", storage.bucket, storage.storageDir, from)
|
fromPath = fmt.Sprintf("/%s/%s/%s", storage.bucket, storage.storageDir, from)
|
||||||
}
|
}
|
||||||
|
|
||||||
object := fmt.Sprintf("https://%s@%s%s",
|
object := fmt.Sprintf("https://%s@%s%s", storage.region, storage.endpoint, fromPath)
|
||||||
storage.region, storage.endpoint, from_path)
|
|
||||||
|
|
||||||
|
toPath := to
|
||||||
// The object's new name is relative to the top of the bucket.
|
// The object's new name is relative to the top of the bucket.
|
||||||
new_name := fmt.Sprintf("%s/%s", storage.storageDir, to)
|
if storage.storageDir != "" {
|
||||||
|
toPath = fmt.Sprintf("%s/%s", storage.storageDir, to)
|
||||||
|
}
|
||||||
|
|
||||||
timestamp := time.Now().Format(time.RFC1123Z)
|
timestamp := time.Now().Format(time.RFC1123Z)
|
||||||
|
|
||||||
signing_string := fmt.Sprintf("MOVE\n\n\n%s\n%s", timestamp, from_path)
|
signingString := fmt.Sprintf("MOVE\n\n\n%s\n%s", timestamp, fromPath)
|
||||||
|
|
||||||
signer := hmac.New(sha1.New, []byte(storage.secret))
|
signer := hmac.New(sha1.New, []byte(storage.secret))
|
||||||
signer.Write([]byte(signing_string))
|
signer.Write([]byte(signingString))
|
||||||
|
|
||||||
signature := base64.StdEncoding.EncodeToString(signer.Sum(nil))
|
signature := base64.StdEncoding.EncodeToString(signer.Sum(nil))
|
||||||
|
|
||||||
authorization := fmt.Sprintf("AWS %s:%s", storage.key, signature)
|
authorization := fmt.Sprintf("AWS %s:%s", storage.key, signature)
|
||||||
|
|
||||||
request, error := http.NewRequest("MOVE", object, nil)
|
request, err := http.NewRequest("MOVE", object, nil)
|
||||||
if error != nil {
|
if err != nil {
|
||||||
return error
|
return err
|
||||||
}
|
}
|
||||||
request.Header.Add("Authorization", authorization)
|
request.Header.Add("Authorization", authorization)
|
||||||
request.Header.Add("Date", timestamp)
|
request.Header.Add("Date", timestamp)
|
||||||
request.Header.Add("Destination", new_name)
|
request.Header.Add("Destination", toPath)
|
||||||
request.Header.Add("Host", storage.endpoint)
|
request.Header.Add("Host", storage.endpoint)
|
||||||
request.Header.Add("Overwrite", "true")
|
request.Header.Add("Overwrite", "true")
|
||||||
|
|
||||||
response, error := storage.client.Do(request)
|
response, err := storage.client.Do(request)
|
||||||
if error != nil {
|
if err != nil {
|
||||||
return error
|
return err
|
||||||
}
|
}
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
|
|
||||||
|
|||||||
@@ -14,14 +14,14 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
//"net/http/httputil"
|
//"net/http/httputil"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
"strings"
|
"io/ioutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type WebDAVStorage struct {
|
type WebDAVStorage struct {
|
||||||
@@ -49,7 +49,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func CreateWebDAVStorage(host string, port int, username string, password string, storageDir string, useHTTP bool, threads int) (storage *WebDAVStorage, err error) {
|
func CreateWebDAVStorage(host string, port int, username string, password string, storageDir string, useHTTP bool, threads int) (storage *WebDAVStorage, err error) {
|
||||||
if storageDir[len(storageDir)-1] != '/' {
|
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||||
storageDir += "/"
|
storageDir += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -59,7 +59,7 @@ func CreateWebDAVStorage(host string, port int, username string, password string
|
|||||||
username: username,
|
username: username,
|
||||||
password: password,
|
password: password,
|
||||||
storageDir: "",
|
storageDir: "",
|
||||||
useHTTP: false,
|
useHTTP: useHTTP,
|
||||||
|
|
||||||
client: http.DefaultClient,
|
client: http.DefaultClient,
|
||||||
threads: threads,
|
threads: threads,
|
||||||
@@ -128,7 +128,12 @@ func (storage *WebDAVStorage) sendRequest(method string, uri string, depth int,
|
|||||||
dataReader = bytes.NewReader(data)
|
dataReader = bytes.NewReader(data)
|
||||||
} else if method == "PUT" {
|
} else if method == "PUT" {
|
||||||
headers["Content-Type"] = "application/octet-stream"
|
headers["Content-Type"] = "application/octet-stream"
|
||||||
|
headers["Content-Length"] = fmt.Sprintf("%d", len(data))
|
||||||
|
if storage.UploadRateLimit <= 0 {
|
||||||
|
dataReader = bytes.NewReader(data)
|
||||||
|
} else {
|
||||||
dataReader = CreateRateLimitedReader(data, storage.UploadRateLimit/storage.threads)
|
dataReader = CreateRateLimitedReader(data, storage.UploadRateLimit/storage.threads)
|
||||||
|
}
|
||||||
} else if method == "MOVE" {
|
} else if method == "MOVE" {
|
||||||
headers["Destination"] = storage.createConnectionString(string(data))
|
headers["Destination"] = storage.createConnectionString(string(data))
|
||||||
headers["Content-Type"] = "application/octet-stream"
|
headers["Content-Type"] = "application/octet-stream"
|
||||||
@@ -151,12 +156,16 @@ func (storage *WebDAVStorage) sendRequest(method string, uri string, depth int,
|
|||||||
request.Header.Set(key, value)
|
request.Header.Set(key, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if method == "PUT" {
|
||||||
|
request.ContentLength = int64(len(data))
|
||||||
|
}
|
||||||
|
|
||||||
//requestDump, err := httputil.DumpRequest(request, true)
|
//requestDump, err := httputil.DumpRequest(request, true)
|
||||||
//LOG_INFO("debug", "Request: %s", requestDump)
|
//LOG_INFO("debug", "Request: %s", requestDump)
|
||||||
|
|
||||||
response, err := storage.client.Do(request)
|
response, err := storage.client.Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_TRACE("WEBDAV_RETRY", "URL request '%s %s' returned an error (%v)", method, uri, err)
|
LOG_TRACE("WEBDAV_ERROR", "URL request '%s %s' returned an error (%v)", method, uri, err)
|
||||||
backoff = storage.retry(backoff)
|
backoff = storage.retry(backoff)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -165,11 +174,13 @@ func (storage *WebDAVStorage) sendRequest(method string, uri string, depth int,
|
|||||||
return response.Body, response.Header, nil
|
return response.Body, response.Header, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
io.Copy(ioutil.Discard, response.Body)
|
||||||
|
response.Body.Close()
|
||||||
|
|
||||||
if response.StatusCode == 301 {
|
if response.StatusCode == 301 {
|
||||||
return nil, nil, errWebDAVMovedPermanently
|
return nil, nil, errWebDAVMovedPermanently
|
||||||
}
|
}
|
||||||
|
|
||||||
response.Body.Close()
|
|
||||||
if response.StatusCode == 404 {
|
if response.StatusCode == 404 {
|
||||||
// Retry if it is UPLOAD, otherwise return immediately
|
// Retry if it is UPLOAD, otherwise return immediately
|
||||||
if method != "PUT" {
|
if method != "PUT" {
|
||||||
@@ -210,6 +221,8 @@ type WebDAVMultiStatus struct {
|
|||||||
|
|
||||||
func (storage *WebDAVStorage) getProperties(uri string, depth int, properties ...string) (map[string]WebDAVProperties, error) {
|
func (storage *WebDAVStorage) getProperties(uri string, depth int, properties ...string) (map[string]WebDAVProperties, error) {
|
||||||
|
|
||||||
|
maxTries := 3
|
||||||
|
for tries := 0; ; tries++ {
|
||||||
propfind := "<prop>"
|
propfind := "<prop>"
|
||||||
for _, p := range properties {
|
for _, p := range properties {
|
||||||
propfind += fmt.Sprintf("<%s/>", p)
|
propfind += fmt.Sprintf("<%s/>", p)
|
||||||
@@ -223,14 +236,15 @@ func (storage *WebDAVStorage) getProperties(uri string, depth int, properties ..
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
content, err := ioutil.ReadAll(readCloser)
|
defer io.Copy(ioutil.Discard, readCloser)
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
object := WebDAVMultiStatus{}
|
object := WebDAVMultiStatus{}
|
||||||
err = xml.Unmarshal(content, &object)
|
err = xml.NewDecoder(readCloser).Decode(&object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
if strings.Contains(err.Error(), "unexpected EOF") && tries < maxTries {
|
||||||
|
LOG_WARN("WEBDAV_RETRY", "Retrying on %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -258,6 +272,7 @@ func (storage *WebDAVStorage) getProperties(uri string, depth int, properties ..
|
|||||||
|
|
||||||
return responses, nil
|
return responses, nil
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
|
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
|
||||||
// a size of 0. If 'dir' is 'snapshots', only subdirectories will be returned. If 'dir' is 'snapshots/repository_id', then only
|
// a size of 0. If 'dir' is 'snapshots', only subdirectories will be returned. If 'dir' is 'snapshots/repository_id', then only
|
||||||
@@ -305,6 +320,12 @@ func (storage *WebDAVStorage) ListFiles(threadIndex int, dir string) (files []st
|
|||||||
}
|
}
|
||||||
files = append(files, file)
|
files = append(files, file)
|
||||||
sizes = append(sizes, int64(0))
|
sizes = append(sizes, int64(0))
|
||||||
|
|
||||||
|
// Add the directory to the directory cache
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
storage.directoryCache[dir + file] = 1
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -313,6 +334,7 @@ func (storage *WebDAVStorage) ListFiles(threadIndex int, dir string) (files []st
|
|||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
properties, err := storage.getProperties(filePath, 0, "getcontentlength", "resourcetype")
|
properties, err := storage.getProperties(filePath, 0, "getcontentlength", "resourcetype")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == errWebDAVNotExist {
|
if err == errWebDAVNotExist {
|
||||||
@@ -325,7 +347,14 @@ func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exi
|
|||||||
return false, false, 0, err
|
return false, false, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if m, exist := properties["/" + storage.storageDir + filePath]; !exist {
|
m, exist := properties["/"+storage.storageDir+filePath]
|
||||||
|
|
||||||
|
// If no properties exist for the given filePath, remove the trailing / from filePath and search again
|
||||||
|
if !exist && filePath != "" && filePath[len(filePath) - 1] == '/' {
|
||||||
|
m, exist = properties["/"+storage.storageDir+filePath[:len(filePath) - 1]]
|
||||||
|
}
|
||||||
|
|
||||||
|
if !exist {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
} else if resourceType, exist := m["resourcetype"]; exist && strings.Contains(resourceType, "collection") {
|
} else if resourceType, exist := m["resourcetype"]; exist && strings.Contains(resourceType, "collection") {
|
||||||
return true, true, 0, nil
|
return true, true, 0, nil
|
||||||
@@ -343,6 +372,7 @@ func (storage *WebDAVStorage) DeleteFile(threadIndex int, filePath string) (err
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
io.Copy(ioutil.Discard, readCloser)
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -353,6 +383,7 @@ func (storage *WebDAVStorage) MoveFile(threadIndex int, from string, to string)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
io.Copy(ioutil.Discard, readCloser)
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -366,21 +397,7 @@ func (storage *WebDAVStorage) createParentDirectory(threadIndex int, dir string)
|
|||||||
}
|
}
|
||||||
parent := dir[:found]
|
parent := dir[:found]
|
||||||
|
|
||||||
storage.directoryCacheLock.Lock()
|
return storage.CreateDirectory(threadIndex, parent)
|
||||||
_, exist := storage.directoryCache[parent]
|
|
||||||
storage.directoryCacheLock.Unlock()
|
|
||||||
|
|
||||||
if exist {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err = storage.CreateDirectory(threadIndex, parent)
|
|
||||||
if err == nil {
|
|
||||||
storage.directoryCacheLock.Lock()
|
|
||||||
storage.directoryCache[parent] = 1
|
|
||||||
storage.directoryCacheLock.Unlock()
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
@@ -393,18 +410,35 @@ func (storage *WebDAVStorage) CreateDirectory(threadIndex int, dir string) (err
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
_, exist := storage.directoryCache[dir]
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
|
|
||||||
|
if exist {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// If there is an error in creating the parent directory, proceed anyway
|
// If there is an error in creating the parent directory, proceed anyway
|
||||||
storage.createParentDirectory(threadIndex, dir)
|
storage.createParentDirectory(threadIndex, dir)
|
||||||
|
|
||||||
readCloser, _, err := storage.sendRequest("MKCOL", dir, 0, []byte(""))
|
readCloser, _, err := storage.sendRequest("MKCOL", dir, 0, []byte(""))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == errWebDAVMethodNotAllowed || err == errWebDAVMovedPermanently {
|
if err == errWebDAVMethodNotAllowed || err == errWebDAVMovedPermanently || err == io.EOF {
|
||||||
// We simply ignore these errors and assume that the directory already exists
|
// We simply ignore these errors and assume that the directory already exists
|
||||||
|
LOG_TRACE("WEBDAV_MKDIR", "Can't create directory %s: %v; error ignored", dir, err)
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
storage.directoryCache[dir] = 1
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
io.Copy(ioutil.Discard, readCloser)
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
|
|
||||||
|
storage.directoryCacheLock.Lock()
|
||||||
|
storage.directoryCache[dir] = 1
|
||||||
|
storage.directoryCacheLock.Unlock()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -429,6 +463,7 @@ func (storage *WebDAVStorage) UploadFile(threadIndex int, filePath string, conte
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
io.Copy(ioutil.Discard, readCloser)
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user