1
0
mirror of https://github.com/gilbertchen/duplicacy synced 2025-12-06 00:03:38 +00:00

Compare commits

...

51 Commits

Author SHA1 Message Date
Gilbert Chen
4c3557eb80 Bump version to 2.5.0 2020-04-09 23:22:32 -04:00
Gilbert Chen
eebcece9e0 Update github.com/aws/aws-sdk-go and google.golang.org/api to the latest 2020-04-09 23:21:55 -04:00
gilbertchen
8c80470c29 Merge pull request #593 from freaksdotcom/readall
Call ReadAll() on the body io.ReadCloser to allow the http keepalive connection to be reused.
2020-04-09 21:12:41 -04:00
Gilbert Chen
bcb889272d Add test for Google Shared Drive 2020-04-09 00:04:30 -04:00
Gilbert Chen
79d8654a12 Allow the name of Google Shared Drive to be used in the storage url
The previous PR only accepts the id of the shared drive, which is not very
memorable.  This commit makes it able to specify the drive by id or by name.
2020-04-08 23:59:15 -04:00
Brandon High
6bf0d2265c Call ReadAll() on the body io.ReadCloser to allow the http keepalive connection to be reused. 2020-04-07 22:07:41 -07:00
Gilbert Chen
749db78a1f Implemented a global option to suppress logs by ids
You can now use -suppress LOGID or -s LOGID to not print logs with the given
ids.  This is a global option which means it is applicable to all commands.
It can be specified more than once.
2020-04-07 23:22:17 -04:00
Gilbert Chen
0a51bd8d1a Make log.Printf print to Duplicacy's logging system 2020-04-07 13:52:54 -04:00
Gilbert Chen
7208adbce2 Access Google Drive via service account.
Our GCS backend already supports service account.  This just copies relevant
code from there.
2020-04-06 23:25:47 -04:00
gilbertchen
e827662869 Merge pull request #579 from rsanger/master
Add support for Shared Google Drives
2020-04-06 22:55:46 -04:00
gilbertchen
57dd5ba927 Merge pull request #590 from fbarthez/macos_sync_error
Fix "Failed to upload the chunk ... sync ...: operation not supported"
2020-04-06 22:54:44 -04:00
Gilbert Chen
01a37b7828 Fixed a typo in command line arguments 2020-04-06 22:13:08 -04:00
Gilbert Chen
57cd20bb84 Fixed the condition to show 'chunks are encrypted' messages
'File/Metadata chunks are encrypted' were always shown even if the storage
wasn't encrypted.
2020-04-06 12:22:47 -04:00
Gilbert Chen
0e970da222 Fixed build errors in tests caused by snapshotManager.CheckSnapshots 2020-04-06 12:19:42 -04:00
Gilbert Chen
e880636502 Fixed test build errors caused by the prototype change in CheckSnapshots() 2020-03-30 17:49:26 -04:00
Gilbert Chen
810303ce25 Fail the backup if the repository can't be accessed or there are no files
This is mainly to avoid creating an empty snapshot when a drive/share is
not mounted which causes the subsequent backup to scan all files again.
2020-03-30 17:44:53 -04:00
Gilbert Chen
ffac83dd80 Assume the signed certificate of a ssh key file has the suffix '-cert.pub'.
So if the ssh key file is 'mykey' then Duplicacy will check if the signed
certificate can be loaded from the file 'mykey-cert.pub'.  This avoids the
use of another preference variable 'ssh_cert_file'.
2020-03-25 23:50:35 -04:00
gilbertchen
05674871fe Merge pull request #547 from philband/ssh_signed_certificate
Add option to use a ssh key signed with a certificate to authenticate
2020-03-25 23:14:30 -04:00
Gilbert Chen
22d6f3abfc Add a -chunks option to the check command to verify the integrity of chunks
This option will download and verify every chunk.  Unlike the -files option,
this option only downloads each chunk once.  There is also a new -threads
option to use multiple threads to download chunks.
2020-03-24 20:58:45 -04:00
Gilbert Chen
d26ffe2cff Add support for OneDrive for Business
The new storage prefix for OneDrive for Business is odb://

The token file can be downloaded from https://duplicacy.com/odb_start

OneDrive for Business requires basically the same set of API calls with
different endpoints.  However, one major difference is that for files larger
than 4MB, an upload session must be created first which is then used to upload
the file content.  Other than that, there are a few minor differences such as
creating an existing directory, or moving files to a non-existent directory.
2020-03-19 14:59:26 -04:00
Fabian Peters
a35f6c27be Fix "Failed to upload the chunk ... sync ...: operation not supported" issue when using SMB on MacOS. This is done by inspecting the error type and returning the error only if its operation is "sync" and the type is "operation not supported".
Note: this change is my first ever foray into go and based simply on the information provided in: https://forum.duplicacy.com/t/failed-to-upload-the-chunk-operation-not-supported/2875/11
2020-03-16 15:56:31 +01:00
Gilbert Chen
808ae4eb75 Bump version to 2.4.1 2020-03-13 20:44:00 -04:00
Gilbert Chen
6699e2f440 Fixed a bug that disabled RSA when copying from a non RSA-encrypted storage.
When copying to an RSA-encrypted storage, we relied on the RSA encryption
version to determine if a chunk is a snapshot chunk or a file chunk.  This is
wrong when the source storage is not encrypted or not RSA-encrypted.  There
is a more reliable to determine if a chunk is a snapshot chunk or not.
2020-03-13 20:13:27 -04:00
Gilbert Chen
733b68be2c Do not take an RSA private key if the storage wasn't RSA encrypted. 2020-03-11 23:14:01 -04:00
Gilbert Chen
b61906c99e Bump version to 2.4.0 2020-03-05 22:06:24 -05:00
gilbertchen
a0a07d18cc Merge pull request #589 from fracai/b2_download_url
support downloading from a custom URL pointed at B2
2020-03-05 22:00:53 -05:00
Gilbert Chen
a6ce64e715 Fixed handling of repository ids with spaces in the b2 backend
Usually a repository id should not contain spaces or other non-alphanum
characters, but if it does we should be able to handle it correctly.  This
commit fixes the b2 backend to convert file names in a proper way.
2020-03-05 14:45:09 -05:00
Arno Hautala
499b612a0d moving download url config from a key to the storage url pattern 2020-02-25 20:53:19 -05:00
Arno Hautala
46ce0ba1fb support downloading from a custom URL pointed at B2 2020-02-22 22:12:36 -05:00
Gilbert Chen
cc88abd547 Fixed a bug that caused all copied chunks to be RSA encrypted
The field encryptionVersion in the Chunk struct is supposed to pass the status
of RSA encrytpion from a source chunk to a destination chunk in a copy command.
This field needs to be a 3-state boolean in order to pass the status correctly.
2020-02-13 14:03:07 -05:00
Gilbert Chen
e888b6d7e5 Fix bugs in sftp retrying
* Fixed a bug caused by nil sftp client during retry
* Simplify the rework logic in UploadFile
* Change the number of tries from 6 to 8
2020-01-13 16:26:43 -05:00
Richard Sanger
aa07feeac0 Fix bug in gcd: init fails to create directories
init would not create directories in the root of a drive as
it did not know the root drive's ID.
2020-01-11 17:25:21 +13:00
Gilbert Chen
d43fe1a282 Release the list of chunk hashes after processing each snapshot.
The chunk hash list isn't needed any more after being consolidated.
Releasing it immediately after use helps reduce the memory usage.
2019-12-09 22:45:16 -05:00
Richard Sanger
7719bb9f29 Fix: backup to shared drive root
Allows writing to the drive root using:
gcd://driveid@ or gcd://driveid@/

To write to the root of the default user's drive use the special
shared drive named 'root':
gcd://root@/
2019-11-27 00:00:40 +13:00
Gilbert Chen
504d07bd51 Bump version to 2.3.0 2019-11-25 15:45:41 -05:00
Gilbert Chen
0abb4099f6 Fixed test errors -- parse test flags in one place 2019-11-25 15:44:03 -05:00
Gilbert Chen
694494ea54 Throw an error, instead of a warning, if pre/post script fails 2019-11-24 22:38:29 -05:00
Gilbert Chen
165152493c For the check command, -tabular should imply -all just like -stats 2019-11-24 20:45:05 -05:00
Gilbert Chen
e02041f4ed Increase the number of retries for the b2 backend from 10 to 15
Retrying 10 times means a retry window of about 5 minutes, which might be too
short.  15 corresponds to about 10 minutes.
2019-11-23 15:28:03 -05:00
Gilbert Chen
a99f059b52 Allow a custom location for the filters file
You can now add a key 'filters' in the preferences file that points to the
path of the filters file.  If this key is not found in the preferences,
the default location '.duplicacy/filters' is used.

There is a new option '-filters' for the set command that set this key in
the preferences, but you can also edit the file directly.
2019-11-23 15:23:26 -05:00
Gilbert Chen
f022a6f684 Fixed build errors in tests 2019-11-22 21:17:17 -05:00
Gilbert Chen
791c61eecb Fixed missing format parameters 2019-11-22 20:32:19 -05:00
gilbertchen
6ad27adaea Merge pull request #578 from gboudreau/vss-catalina
Bugfix: allow -vss usage on Mac OS Catalina
2019-11-22 16:46:31 -05:00
Gilbert Chen
9abfbe1ee0 Update pkg/sftp to 1.10.1
The old version has a bug where a connection closed by the server may cause
a deadlock due to a full channel buffer.
2019-11-21 23:36:17 -05:00
Gilbert Chen
b32c3b2cd5 If a symlink is a directory, match it against the patterns as a directory 2019-11-21 23:10:54 -05:00
Gilbert Chen
9baafdafa2 Remove a log message meant for debugging only 2019-11-21 21:23:31 -05:00
Gilbert Chen
ca7d927840 Use joinPath instead of filepath.Join to generate UNC paths
This fix isn't probably necessary since filepath.Join can now produce UNC
paths too with the latest versions of go.  However, we still want to keep
it for consistency.
2019-11-21 14:56:31 -05:00
Richard Sanger
426110e961 Adds support for GDrive Shared Drives
A shared drive can be accessed via
gcd://sharedDriveId@path/to/storage

sharedDriveId is optional and if omitted duplicacy stores to the user's drive.
This remains backwards compatible with existing drives. E.g.
gcd://path/to/storage

Note: Shared Drives were previously named Team Drives.
2019-11-06 00:51:12 +13:00
Guillaume Boudreau
0ca9cd476e Bugfix: allow -vss usage on Mac OS Catalina
Using `tmutil listlocalsnapshots` to find the snapshot name we need to use; fallback to `com.apple.TimeMachine.SNAPSHOT_DATE` (same as before) if we can't find it.
2019-10-28 11:55:15 -04:00
gilbertchen
abf9a94fc9 Merge pull request #575 from gilbertchen/rsa_encryption
Implement RSA encryption
2019-10-12 11:14:29 -04:00
Philipp Bandow
a55ac1b7ad Add option to use a ssh key signed with a certificate to authenticate 2019-02-28 01:37:14 +01:00
28 changed files with 719 additions and 256 deletions

137
Gopkg.lock generated
View File

@@ -7,17 +7,11 @@
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
version = "v0.16.0"
[[projects]]
name = "github.com/Azure/azure-sdk-for-go"
packages = ["version"]
revision = "b7fadebe0e7f5c5720986080a01495bd8d27be37"
version = "v14.2.0"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
revision = "0ae36a9e544696de46fdadb7b0d5fb38af48c063"
version = "v10.2.0"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date","logger","version"]
revision = "9bc4033dd347c7f416fca46b2f42a043dc1fbdf6"
version = "v10.15.5"
[[projects]]
branch = "master"
@@ -27,9 +21,9 @@
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/sts"]
revision = "a32b1dcd091264b5dee7b386149b6cc3823395c9"
version = "v1.12.31"
packages = ["aws","aws/arn","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/processcreds","aws/credentials/stscreds","aws/csm","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/context","internal/ini","internal/s3err","internal/sdkio","internal/sdkmath","internal/sdkrand","internal/sdkuri","internal/shareddefaults","internal/strings","internal/sync/singleflight","private/protocol","private/protocol/eventstream","private/protocol/eventstream/eventstreamapi","private/protocol/json/jsonutil","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/internal/arn","service/sts","service/sts/stsiface"]
revision = "851d5ffb66720c2540cc68020d4d8708950686c8"
version = "v1.30.7"
[[projects]]
name = "github.com/bkaradzic/go-lz4"
@@ -40,14 +34,14 @@
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29"
version = "v3.1.0"
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/azure-sdk-for-go"
packages = ["storage"]
revision = "bbf89bd4d716c184f158d1e1428c2dbef4a18307"
packages = ["storage","version"]
revision = "8fd4663cab7c7c1c46d00449291c92ad23b0d0d9"
[[projects]]
branch = "master"
@@ -59,7 +53,7 @@
branch = "master"
name = "github.com/gilbertchen/go-dropbox"
packages = ["."]
revision = "90711b603312b1f973f3a5da3793ac4f1e5c2f2a"
revision = "994e692c5061cefa14e4296600a773de7119aa15"
[[projects]]
name = "github.com/gilbertchen/go-ole"
@@ -98,33 +92,33 @@
revision = "68e7a6806b0137a396d7d05601d7403ae1abac58"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
version = "v1.32.0"
branch = "master"
name = "github.com/golang/groupcache"
packages = ["lru"]
revision = "8c9f03a8e57eb486e42badaed3fb287da51807ba"
[[projects]]
branch = "master"
name = "github.com/golang/protobuf"
packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
packages = ["proto","protoc-gen-go","protoc-gen-go/descriptor","protoc-gen-go/generator","protoc-gen-go/generator/internal/remap","protoc-gen-go/grpc","protoc-gen-go/plugin","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
revision = "84668698ea25b64748563aa20726db66a6b8d299"
version = "v1.3.5"
[[projects]]
name = "github.com/googleapis/gax-go"
packages = ["."]
revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f"
version = "v2.0.0"
packages = [".","v2"]
revision = "c8a15bac9b9fe955bd9f900272f9a306465d28cf"
version = "v2.0.3"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "0b12d6b5"
revision = "c2b33e84"
[[projects]]
branch = "master"
name = "github.com/kr/fs"
packages = ["."]
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
version = "v0.1.0"
[[projects]]
name = "github.com/marstr/guid"
@@ -139,22 +133,22 @@
revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4"
[[projects]]
branch = "master"
name = "github.com/ncw/swift"
packages = ["."]
revision = "ae9f0ea1605b9aa6434ed5c731ca35d83ba67c55"
revision = "3e1a09f21340e4828e7265aa89f4dc1495fa7ccc"
version = "v1.0.50"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
revision = "614d223910a179a466c1767a985424175c39b465"
version = "v0.9.1"
[[projects]]
name = "github.com/pkg/sftp"
packages = ["."]
revision = "98203f5a8333288eb3163b7c667d4260fe1333e9"
version = "1.0.0"
revision = "5616182052227b951e76d9c9b79a616c608bd91b"
version = "v1.11.0"
[[projects]]
name = "github.com/satori/go.uuid"
@@ -168,63 +162,92 @@
packages = ["."]
revision = "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
[[projects]]
name = "go.opencensus.io"
packages = [".","internal","internal/tagencoding","metric/metricdata","metric/metricproducer","plugin/ochttp","plugin/ochttp/propagation/b3","resource","stats","stats/internal","stats/view","tag","trace","trace/internal","trace/propagation","trace/tracestate"]
revision = "d835ff86be02193d324330acdb7d65546b05f814"
version = "v0.22.3"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","pbkdf2","ssh","ssh/agent","ssh/terminal"]
revision = "9f005a07e0d31d45e6656d241bb5c0f2efd4bc94"
packages = ["blowfish","chacha20","curve25519","ed25519","ed25519/internal/edwards25519","internal/subtle","pbkdf2","poly1305","ssh","ssh/agent","ssh/internal/bcrypt_pbkdf","ssh/terminal"]
revision = "056763e48d71961566155f089ac0f02f1dda9b5a"
[[projects]]
branch = "master"
name = "golang.org/x/exp"
packages = ["apidiff","cmd/apidiff"]
revision = "e8c3332aa8e5b8e6acb4707c3a7e5979052b20aa"
[[projects]]
name = "golang.org/x/mod"
packages = ["module","semver"]
revision = "ed3ec21bb8e252814c380df79a80f366440ddb2d"
version = "v0.2.0"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
revision = "9dfe39835686865bff950a07b394c12a98ddc811"
packages = ["context","context/ctxhttp","http/httpguts","http2","http2/hpack","idna","internal/timeseries","trace"]
revision = "d3edc9973b7eb1fb302b0ff2c62357091cea9a30"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"]
revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28"
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix","windows"]
revision = "82aafbf43bf885069dc71b7e7c2f9d7a614d47da"
packages = ["cpu","unix","windows"]
revision = "59c9f1ba88faf592b225274f69c5ef1e4ebacf82"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
revision = "88f656faf3f37f690df1a32515b479415e1a6769"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/language","internal/language/compact","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
version = "v0.3.2"
[[projects]]
branch = "master"
name = "golang.org/x/tools"
packages = ["cmd/goimports","go/ast/astutil","go/gcexportdata","go/internal/gcimporter","go/internal/packagesdriver","go/packages","go/types/typeutil","internal/fastwalk","internal/gocommand","internal/gopathwalk","internal/imports","internal/packagesinternal","internal/telemetry/event"]
revision = "700752c244080ed7ef6a61c3cfd73382cd334e57"
[[projects]]
branch = "master"
name = "golang.org/x/xerrors"
packages = [".","internal"]
revision = "9bdfabe68543c54f90421aeb9a60ef8061b5b544"
[[projects]]
name = "google.golang.org/api"
packages = ["drive/v3","gensupport","googleapi","googleapi/internal/uritemplates","googleapi/transport","internal","iterator","option","storage/v1","transport/http"]
revision = "17b5f22a248d6d3913171c1a557552ace0d9c806"
packages = ["drive/v3","googleapi","googleapi/transport","internal","internal/gensupport","internal/third_party/uritemplates","iterator","option","option/internaloption","storage/v1","transport/cert","transport/http","transport/http/internal/propagation"]
revision = "52f0532eadbcc6f6b82d6f5edf66e610d10bfde6"
version = "v0.21.0"
[[projects]]
name = "google.golang.org/appengine"
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0"
revision = "971852bfffca25b069c31162ae8f247a3dba083b"
version = "v1.6.5"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status"]
revision = "891aceb7c239e72692819142dfca057bdcbfcb96"
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status","googleapis/type/expr"]
revision = "baae70f3302d3efdff74db41e48a5d476d036906"
[[projects]]
name = "google.golang.org/grpc"
packages = [".","balancer","balancer/roundrobin","codes","connectivity","credentials","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
revision = "5a9f7b402fe85096d2e1d0383435ee1876e863d0"
version = "v1.8.0"
packages = [".","attributes","backoff","balancer","balancer/base","balancer/roundrobin","binarylog/grpc_binarylog_v1","codes","connectivity","credentials","credentials/internal","encoding","encoding/proto","grpclog","internal","internal/backoff","internal/balancerload","internal/binarylog","internal/buffer","internal/channelz","internal/envconfig","internal/grpclog","internal/grpcrand","internal/grpcsync","internal/grpcutil","internal/resolver/dns","internal/resolver/passthrough","internal/syscall","internal/transport","keepalive","metadata","naming","peer","resolver","serviceconfig","stats","status","tap"]
revision = "ac54eec90516cee50fc6b9b113b34628a85f976f"
version = "v1.28.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "eff5ae2d9507f0d62cd2e5bdedebb5c59d64f70f476b087c01c35d4a5e1be72d"
inputs-digest = "e124cf64f7f8770e51ae52ac89030d512da946e3fdc2666ebd3a604a624dd679"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -31,7 +31,7 @@
[[constraint]]
name = "github.com/aws/aws-sdk-go"
version = "1.12.31"
version = "1.30.7"
[[constraint]]
name = "github.com/bkaradzic/go-lz4"
@@ -75,7 +75,7 @@
[[constraint]]
name = "github.com/pkg/sftp"
version = "1.0.0"
version = "1.10.1"
[[constraint]]
branch = "master"
@@ -86,9 +86,13 @@
name = "golang.org/x/net"
[[constraint]]
branch = "master"
name = "golang.org/x/oauth2"
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
[[constraint]]
branch = "master"
name = "google.golang.org/api"
version = "0.21.0"
[[constraint]]
name = "google.golang.org/grpc"
version = "1.28.0"

View File

@@ -159,6 +159,10 @@ func setGlobalOptions(context *cli.Context) {
}()
}
for _, logID := range context.GlobalStringSlice("suppress") {
duplicacy.SuppressLog(logID)
}
duplicacy.RunInBackground = context.GlobalBool("background")
}
@@ -201,7 +205,7 @@ func runScript(context *cli.Context, storageName string, phase string) bool {
}
if err != nil {
duplicacy.LOG_WARN("SCRIPT_ERROR", "Failed to run script: %v", err)
duplicacy.LOG_ERROR("SCRIPT_ERROR", "Failed to run %s script: %v", script, err)
return false
}
@@ -548,7 +552,13 @@ func setPreference(context *cli.Context) {
newPreference.DoNotSavePassword = triBool.IsTrue()
}
newPreference.NobackupFile = context.String("nobackup-file")
if context.String("nobackup-file") != "" {
newPreference.NobackupFile = context.String("nobackup-file")
}
if context.String("filters") != "" {
newPreference.FiltersFile = context.String("filters")
}
key := context.String("key")
value := context.String("value")
@@ -731,7 +741,7 @@ func backupRepository(context *cli.Context) {
uploadRateLimit := context.Int("limit-rate")
enumOnly := context.Bool("enum-only")
storage.SetRateLimits(0, uploadRateLimit)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
@@ -808,7 +818,7 @@ func restoreRepository(context *cli.Context) {
duplicacy.LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
storage.SetRateLimits(context.Int("limit-rate"), 0)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile)
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
@@ -850,7 +860,7 @@ func listSnapshots(context *cli.Context) {
tag := context.String("t")
revisions := getRevisions(context)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
id := preference.SnapshotID
@@ -888,7 +898,12 @@ func checkSnapshots(context *cli.Context) {
runScript(context, preference.Name, "pre")
storage := duplicacy.CreateStorage(*preference, false, 1)
threads := context.Int("threads")
if threads < 1 {
threads = 1
}
storage := duplicacy.CreateStorage(*preference, false, threads)
if storage == nil {
return
}
@@ -901,7 +916,7 @@ func checkSnapshots(context *cli.Context) {
tag := context.String("t")
revisions := getRevisions(context)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
@@ -916,11 +931,12 @@ func checkSnapshots(context *cli.Context) {
showStatistics := context.Bool("stats")
showTabular := context.Bool("tabular")
checkFiles := context.Bool("files")
checkChunks := context.Bool("chunks")
searchFossils := context.Bool("fossils")
resurrect := context.Bool("resurrect")
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, checkChunks, searchFossils, resurrect, threads)
runScript(context, preference.Name, "post")
}
@@ -958,7 +974,7 @@ func printFile(context *cli.Context) {
snapshotID = context.String("id")
}
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
@@ -1016,13 +1032,13 @@ func diff(context *cli.Context) {
}
compareByHash := context.Bool("hash")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile)
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile, preference.FiltersFile)
runScript(context, preference.Name, "post")
}
@@ -1061,7 +1077,7 @@ func showHistory(context *cli.Context) {
revisions := getRevisions(context)
showLocalHash := context.Bool("hash")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
@@ -1124,7 +1140,7 @@ func pruneSnapshots(context *cli.Context) {
os.Exit(ArgumentExitCode)
}
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
@@ -1164,7 +1180,7 @@ func copySnapshots(context *cli.Context) {
sourcePassword = duplicacy.GetPassword(*source, "password", "Enter source storage password:", false, false)
}
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, source.NobackupFile)
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, "", "")
sourceManager.SetupSnapshotCache(source.Name)
duplicacy.SavePassword(*source, "password", sourcePassword)
@@ -1199,7 +1215,7 @@ func copySnapshots(context *cli.Context) {
destinationStorage.SetRateLimits(0, context.Int("upload-limit-rate"))
destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository,
destinationPassword, destination.NobackupFile)
destinationPassword, "", "")
duplicacy.SavePassword(*destination, "password", destinationPassword)
destinationManager.SetupSnapshotCache(destination.Name)
@@ -1583,6 +1599,10 @@ func main() {
Name: "files",
Usage: "verify the integrity of every file",
},
cli.BoolFlag{
Name: "chunks",
Usage: "verify the integrity of every chunk",
},
cli.BoolFlag{
Name: "stats",
Usage: "show deduplication statistics (imply -all and all revisions)",
@@ -1601,6 +1621,12 @@ func main() {
Usage: "the RSA private key to decrypt file chunks",
Argument: "<private key>",
},
cli.IntFlag{
Name: "threads",
Value: 1,
Usage: "number of threads used to verify chunks",
Argument: "<n>",
},
},
Usage: "Check the integrity of snapshots",
ArgsUsage: " ",
@@ -1883,6 +1909,11 @@ func main() {
Usage: "use the specified storage instead of the default one",
Argument: "<storage name>",
},
cli.StringFlag{
Name: "filters",
Usage: "specify the path of the filters file containing include/exclude patterns",
Argument: "<file path>",
},
},
Usage: "Change the options for the default or specified storage",
ArgsUsage: " ",
@@ -1932,7 +1963,7 @@ func main() {
cli.StringFlag{
Name: "key",
Usage: "the RSA private key to decrypt file chunks from the source storage",
Argument: "<public key>",
Argument: "<private key>",
},
},
Usage: "Copy snapshots between compatible storages",
@@ -2042,13 +2073,18 @@ func main() {
Name: "comment",
Usage: "add a comment to identify the process",
},
cli.StringSliceFlag{
Name: "suppress, s",
Usage: "suppress logs with the specified id",
Argument: "<id>",
},
}
app.HideVersion = true
app.Name = "duplicacy"
app.HelpName = "duplicacy"
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
app.Version = "2.2.3" + " (" + GitCommit + ")"
app.Version = "2.5.0" + " (" + GitCommit + ")"
// If the program is interrupted, call the RunAtError function.
c := make(chan os.Signal, 1)

View File

@@ -75,7 +75,7 @@ func B2Escape(path string) string {
return strings.Join(components, "/")
}
func NewB2Client(applicationKeyID string, applicationKey string, storageDir string, threads int) *B2Client {
func NewB2Client(applicationKeyID string, applicationKey string, downloadURL string, storageDir string, threads int) *B2Client {
for storageDir != "" && storageDir[0] == '/' {
storageDir = storageDir[1:]
@@ -85,7 +85,7 @@ func NewB2Client(applicationKeyID string, applicationKey string, storageDir stri
storageDir += "/"
}
maximumRetries := 10
maximumRetries := 15
if value, found := os.LookupEnv("DUPLICACY_B2_RETRIES"); found && value != "" {
maximumRetries, _ = strconv.Atoi(value)
LOG_INFO("B2_RETRIES", "Setting maximum retries for B2 to %d", maximumRetries)
@@ -95,6 +95,7 @@ func NewB2Client(applicationKeyID string, applicationKey string, storageDir stri
HTTPClient: http.DefaultClient,
ApplicationKeyID: applicationKeyID,
ApplicationKey: applicationKey,
DownloadURL: downloadURL,
StorageDir: storageDir,
UploadURLs: make([]string, threads),
UploadTokens: make([]string, threads),
@@ -325,7 +326,10 @@ func (client *B2Client) AuthorizeAccount(threadIndex int) (err error, allowed bo
client.AuthorizationToken = output.AuthorizationToken
client.APIURL = output.APIURL
client.DownloadURL = output.DownloadURL
if client.DownloadURL == "" {
client.DownloadURL = output.DownloadURL
}
LOG_INFO("BACKBLAZE_URL", "download URL is: %s", client.DownloadURL)
client.IsAuthorized = true
client.LastAuthorizationTime = time.Now().Unix()
@@ -413,16 +417,16 @@ func (client *B2Client) ListFileNames(threadIndex int, startFileName string, sin
input["prefix"] = client.StorageDir
for {
url := client.getAPIURL() + "/b2api/v1/b2_list_file_names"
apiURL := client.getAPIURL() + "/b2api/v1/b2_list_file_names"
requestHeaders := map[string]string{}
requestMethod := http.MethodPost
var requestInput interface{}
requestInput = input
if includeVersions {
url = client.getAPIURL() + "/b2api/v1/b2_list_file_versions"
apiURL = client.getAPIURL() + "/b2api/v1/b2_list_file_versions"
} else if singleFile {
// handle a single file with no versions as a special case to download the last byte of the file
url = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + startFileName)
apiURL = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + startFileName)
// requesting byte -1 works for empty files where 0-0 fails with a 416 error
requestHeaders["Range"] = "bytes=-1"
// HEAD request
@@ -432,7 +436,7 @@ func (client *B2Client) ListFileNames(threadIndex int, startFileName string, sin
var readCloser io.ReadCloser
var responseHeader http.Header
var err error
readCloser, responseHeader, _, err = client.call(threadIndex, url, requestMethod, requestHeaders, requestInput)
readCloser, responseHeader, _, err = client.call(threadIndex, apiURL, requestMethod, requestHeaders, requestInput)
if err != nil {
return nil, err
}
@@ -445,7 +449,7 @@ func (client *B2Client) ListFileNames(threadIndex int, startFileName string, sin
if singleFile && !includeVersions {
if responseHeader == nil {
LOG_DEBUG("BACKBLAZE_LIST", "%s did not return headers", url)
LOG_DEBUG("BACKBLAZE_LIST", "%s did not return headers", apiURL)
return []*B2Entry{}, nil
}
requiredHeaders := []string{
@@ -459,11 +463,17 @@ func (client *B2Client) ListFileNames(threadIndex int, startFileName string, sin
}
}
if len(missingKeys) > 0 {
return nil, fmt.Errorf("%s missing headers: %s", url, missingKeys)
return nil, fmt.Errorf("%s missing headers: %s", apiURL, missingKeys)
}
// construct the B2Entry from the response headers of the download request
fileID := responseHeader.Get("x-bz-file-id")
fileName := responseHeader.Get("x-bz-file-name")
unescapedFileName, err := url.QueryUnescape(fileName)
if err == nil {
fileName = unescapedFileName
} else {
LOG_WARN("BACKBLAZE_UNESCAPE", "Failed to unescape the file name %s", fileName)
}
fileAction := "upload"
// byte range that is returned: "bytes #-#/#
rangeString := responseHeader.Get("Content-Range")
@@ -476,10 +486,10 @@ func (client *B2Client) ListFileNames(threadIndex int, startFileName string, sin
// this should only execute if the requested file is empty and the range request didn't result in a Content-Range header
fileSize, _ = strconv.ParseInt(lengthString, 0, 64)
if fileSize != 0 {
return nil, fmt.Errorf("%s returned non-zero file length", url)
return nil, fmt.Errorf("%s returned non-zero file length", apiURL)
}
} else {
return nil, fmt.Errorf("could not parse headers returned by %s", url)
return nil, fmt.Errorf("could not parse headers returned by %s", apiURL)
}
fileUploadTimestamp, _ := strconv.ParseInt(responseHeader.Get("X-Bz-Upload-Timestamp"), 0, 64)

View File

@@ -37,7 +37,7 @@ func createB2ClientForTest(t *testing.T) (*B2Client, string) {
return nil, ""
}
return NewB2Client(b2["account"], b2["key"], b2["directory"], 1), b2["bucket"]
return NewB2Client(b2["account"], b2["key"], "", b2["directory"], 1), b2["bucket"]
}

View File

@@ -15,9 +15,9 @@ type B2Storage struct {
}
// CreateB2Storage creates a B2 storage object.
func CreateB2Storage(accountID string, applicationKey string, bucket string, storageDir string, threads int) (storage *B2Storage, err error) {
func CreateB2Storage(accountID string, applicationKey string, downloadURL string, bucket string, storageDir string, threads int) (storage *B2Storage, err error) {
client := NewB2Client(accountID, applicationKey, storageDir, threads)
client := NewB2Client(accountID, applicationKey, downloadURL, storageDir, threads)
err, _ = client.AuthorizeAccount(0)
if err != nil {
@@ -204,7 +204,6 @@ func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist b
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
filePath = strings.Replace(filePath, " ", "%20", -1)
readCloser, _, err := storage.client.DownloadFile(threadIndex, filePath)
if err != nil {
return err
@@ -218,7 +217,6 @@ func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
filePath = strings.Replace(filePath, " ", "%20", -1)
return storage.client.UploadFile(threadIndex, filePath, content, storage.UploadRateLimit/storage.client.Threads)
}

View File

@@ -35,6 +35,7 @@ type BackupManager struct {
config *Config // contains a number of options
nobackupFile string // don't backup directory when this file name is found
filtersFile string // the path to the filters file
}
func (manager *BackupManager) SetDryRun(dryRun bool) {
@@ -44,7 +45,7 @@ func (manager *BackupManager) SetDryRun(dryRun bool) {
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
// master key which can be nil if encryption is not enabled.
func CreateBackupManager(snapshotID string, storage Storage, top string, password string, nobackupFile string) *BackupManager {
func CreateBackupManager(snapshotID string, storage Storage, top string, password string, nobackupFile string, filtersFile string) *BackupManager {
config, _, err := DownloadConfig(storage, password)
if err != nil {
@@ -67,6 +68,7 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
config: config,
nobackupFile: nobackupFile,
filtersFile: filtersFile,
}
if IsDebugging() {
@@ -198,7 +200,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
defer DeleteShadowCopy()
LOG_INFO("BACKUP_INDEXING", "Indexing %s", top)
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop, manager.nobackupFile)
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop,
manager.nobackupFile, manager.filtersFile)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
return false
@@ -208,6 +211,11 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
return true
}
if len(localSnapshot.Files) == 0 {
LOG_ERROR("SNAPSHOT_EMPTY", "No files under the repository to be backed up")
return false
}
// This cache contains all chunks referenced by last snasphot. Any other chunks will lead to a call to
// UploadChunk.
chunkCache := make(map[string]bool)
@@ -770,7 +778,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision)
manager.SnapshotManager.DownloadSnapshotContents(remoteSnapshot, patterns, true)
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile)
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile,
manager.filtersFile)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the repository: %v", err)
return false
@@ -1622,6 +1631,9 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
return true
}
// These two maps store hashes of chunks in the source and destination storages, respectively. Note that
// the value of 'chunks' is used to indicated if the chunk is a snapshot chunk, while the value of 'otherChunks'
// is not used.
chunks := make(map[string]bool)
otherChunks := make(map[string]bool)
@@ -1634,21 +1646,15 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
LOG_TRACE("SNAPSHOT_COPY", "Copying snapshot %s at revision %d", snapshot.ID, snapshot.Revision)
for _, chunkHash := range snapshot.FileSequence {
if _, found := chunks[chunkHash]; !found {
chunks[chunkHash] = true
}
chunks[chunkHash] = true // The chunk is a snapshot chunk
}
for _, chunkHash := range snapshot.ChunkSequence {
if _, found := chunks[chunkHash]; !found {
chunks[chunkHash] = true
}
chunks[chunkHash] = true // The chunk is a snapshot chunk
}
for _, chunkHash := range snapshot.LengthSequence {
if _, found := chunks[chunkHash]; !found {
chunks[chunkHash] = true
}
chunks[chunkHash] = true // The chunk is a snapshot chunk
}
description := manager.SnapshotManager.DownloadSequence(snapshot.ChunkSequence)
@@ -1661,9 +1667,11 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
for _, chunkHash := range snapshot.ChunkHashes {
if _, found := chunks[chunkHash]; !found {
chunks[chunkHash] = true
chunks[chunkHash] = false // The chunk is a file chunk
}
}
snapshot.ChunkHashes = nil
}
otherChunkFiles, otherChunkSizes := otherManager.SnapshotManager.ListAllFiles(otherManager.storage, "chunks/")
@@ -1715,7 +1723,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
totalSkipped := 0
chunkIndex := 0
for chunkHash := range chunks {
for chunkHash, isSnapshot := range chunks {
chunkIndex++
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
newChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
@@ -1726,7 +1734,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
newChunk := otherManager.config.GetChunk()
newChunk.Reset(true)
newChunk.Write(chunk.GetBytes())
newChunk.encryptionVersion = chunk.encryptionVersion
newChunk.isSnapshot = isSnapshot
chunkUploader.StartChunk(newChunk, chunkIndex)
totalCopied++
} else {

View File

@@ -227,11 +227,11 @@ func TestBackupManager(t *testing.T) {
time.Sleep(time.Duration(delay) * time.Second)
if testFixedChunkSize {
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false) {
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "") {
t.Errorf("Failed to initialize the storage")
}
} else {
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false) {
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false, "") {
t.Errorf("Failed to initialize the storage")
}
}
@@ -239,7 +239,7 @@ func TestBackupManager(t *testing.T) {
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager := CreateBackupManager("host1", storage, testDir, password, "")
backupManager := CreateBackupManager("host1", storage, testDir, password, "", "")
backupManager.SetupSnapshotCache("default")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
@@ -341,7 +341,7 @@ func TestBackupManager(t *testing.T) {
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
}
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1, 2, 3} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1)
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, []int{1} /*tags*/, nil /*retentions*/, nil,
/*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
@@ -349,7 +349,7 @@ func TestBackupManager(t *testing.T) {
t.Errorf("Expected 2 snapshots but got %d", numberOfSnapshots)
}
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false)
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, nil /*tags*/, nil /*retentions*/, nil,
/*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
@@ -358,7 +358,7 @@ func TestBackupManager(t *testing.T) {
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
}
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3, 4} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1)
/*buf := make([]byte, 1<<16)
runtime.Stack(buf, true)

View File

@@ -63,12 +63,14 @@ type Chunk struct {
config *Config // Every chunk is associated with a Config object. Which hashing algorithm to use is determined
// by the config
encryptionVersion byte // The version type in the encrytion header
isSnapshot bool // Indicates if the chunk is a snapshot chunk (instead of a file chunk). This is only used by RSA
// encryption, where a snapshot chunk is not encrypted by RSA
}
// Magic word to identify a duplicacy format encrypted file, plus a version number.
var ENCRYPTION_HEADER = "duplicacy\000"
// RSA encrypted chunks start with "duplicacy\002"
var ENCRYPTION_VERSION_RSA byte = 2
// CreateChunk creates a new chunk.
@@ -119,6 +121,7 @@ func (chunk *Chunk) Reset(hashNeeded bool) {
chunk.hash = nil
chunk.id = ""
chunk.size = 0
chunk.isSnapshot = false
}
// Write implements the Writer interface.
@@ -193,8 +196,8 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
key := encryptionKey
usingRSA := false
if chunk.config.rsaPublicKey != nil && (!isSnapshot || chunk.encryptionVersion == ENCRYPTION_VERSION_RSA) {
// If the chunk is not a snpashot chunk, we attempt to encrypt it with the RSA publick key if there is one
// Enable RSA encryption only when the chunk is not a snapshot chunk
if chunk.config.rsaPublicKey != nil && !isSnapshot && !chunk.isSnapshot {
randomKey := make([]byte, 32)
_, err := rand.Read(randomKey)
if err != nil {
@@ -321,8 +324,6 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
headerLength := len(ENCRYPTION_HEADER)
chunk.encryptionVersion = 0
if len(encryptionKey) > 0 {
key := encryptionKey
@@ -347,12 +348,12 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
return fmt.Errorf("The storage doesn't seem to be encrypted")
}
chunk.encryptionVersion = encryptedBuffer.Bytes()[headerLength-1]
if chunk.encryptionVersion != 0 && chunk.encryptionVersion != ENCRYPTION_VERSION_RSA {
return fmt.Errorf("Unsupported encryption version %d", chunk.encryptionVersion)
encryptionVersion := encryptedBuffer.Bytes()[headerLength-1]
if encryptionVersion != 0 && encryptionVersion != ENCRYPTION_VERSION_RSA {
return fmt.Errorf("Unsupported encryption version %d", encryptionVersion)
}
if chunk.encryptionVersion == ENCRYPTION_VERSION_RSA {
if encryptionVersion == ENCRYPTION_VERSION_RSA {
if chunk.config.rsaPrivateKey == nil {
LOG_ERROR("CHUNK_DECRYPT", "An RSA private key is required to decrypt the chunk")
return fmt.Errorf("An RSA private key is required to decrypt the chunk")

View File

@@ -5,7 +5,6 @@
package duplicacy
import (
"flag"
"bytes"
crypto_rand "crypto/rand"
"crypto/rsa"
@@ -13,14 +12,6 @@ import (
"testing"
)
var testRSAEncryption bool
func init() {
flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption")
flag.Parse()
}
func TestChunk(t *testing.T) {
key := []byte("duplicacydefault")

View File

@@ -126,6 +126,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files []*Entry)
// AddChunk adds a single chunk the download list.
func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
task := ChunkDownloadTask{
chunkIndex: len(downloader.taskList),
chunkHash: chunkHash,
@@ -253,6 +254,47 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
return downloader.taskList[chunkIndex].chunk
}
// WaitForCompletion waits until all chunks have been downloaded
func (downloader *ChunkDownloader) WaitForCompletion() {
// Tasks in completedTasks have not been counted by numberOfActiveChunks
downloader.numberOfActiveChunks -= len(downloader.completedTasks)
// find the completed task with the largest index; we'll start from the next index
for index := range downloader.completedTasks {
if downloader.lastChunkIndex < index {
downloader.lastChunkIndex = index
}
}
// Looping until there isn't a download task in progress
for downloader.numberOfActiveChunks > 0 || downloader.lastChunkIndex + 1 < len(downloader.taskList) {
// Wait for a completion event first
if downloader.numberOfActiveChunks > 0 {
completion := <-downloader.completionChannel
downloader.config.PutChunk(completion.chunk)
downloader.numberOfActiveChunks--
downloader.numberOfDownloadedChunks++
downloader.numberOfDownloadingChunks--
}
// Pass the tasks one by one to the download queue
if downloader.lastChunkIndex + 1 < len(downloader.taskList) {
task := &downloader.taskList[downloader.lastChunkIndex + 1]
if task.isDownloading {
downloader.lastChunkIndex++
continue
}
downloader.taskQueue <- *task
task.isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
downloader.lastChunkIndex++
}
}
}
// Stop terminates all downloading goroutines
func (downloader *ChunkDownloader) Stop() {
for downloader.numberOfDownloadingChunks > 0 {

View File

@@ -172,11 +172,11 @@ func (config *Config) Print() {
LOG_TRACE("CONFIG_INFO", "Hash key: %x", config.HashKey)
LOG_TRACE("CONFIG_INFO", "ID key: %x", config.IDKey)
if len(config.ChunkKey) >= 0 {
if len(config.ChunkKey) > 0 {
LOG_TRACE("CONFIG_INFO", "File chunks are encrypted")
}
if len(config.FileKey) >= 0 {
if len(config.FileKey) > 0 {
LOG_TRACE("CONFIG_INFO", "Metadata chunks are encrypted")
}
@@ -588,6 +588,11 @@ func (config *Config) loadRSAPublicKey(keyFile string) {
// loadRSAPrivateKey loads the specifed private key file for decrypting file chunks
func (config *Config) loadRSAPrivateKey(keyFile string, passphrase string) {
if config.rsaPublicKey == nil {
LOG_ERROR("RSA_PUBLIC", "The storage was not encrypted by an RSA key")
return
}
encodedKey, err := ioutil.ReadFile(keyFile)
if err != nil {
LOG_ERROR("RSA_PRIVATE", "Failed to read the private key file: %v", err)

View File

@@ -6,6 +6,7 @@ package duplicacy
import (
"fmt"
"io/ioutil"
"strings"
"github.com/gilbertchen/go-dropbox"
@@ -199,6 +200,7 @@ func (storage *DropboxStorage) DownloadFile(threadIndex int, filePath string, ch
}
defer output.Body.Close()
defer ioutil.ReadAll(output.Body)
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.clients))
return err

View File

@@ -490,7 +490,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
}
if entry.IsLink() {
isRegular := false
isRegular, entry.Link, err = Readlink(filepath.Join(top, entry.Path))
isRegular, entry.Link, err = Readlink(joinPath(top, entry.Path))
if err != nil {
LOG_WARN("LIST_LINK", "Failed to read the symlink %s: %v", entry.Path, err)
skippedFiles = append(skippedFiles, entry.Path)
@@ -500,7 +500,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
if isRegular {
entry.Mode ^= uint32(os.ModeSymlink)
} else if path == "" && (filepath.IsAbs(entry.Link) || filepath.HasPrefix(entry.Link, `\\`)) && !strings.HasPrefix(entry.Link, normalizedTop) {
stat, err := os.Stat(filepath.Join(top, entry.Path))
stat, err := os.Stat(joinPath(top, entry.Path))
if err != nil {
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err)
skippedFiles = append(skippedFiles, entry.Path)
@@ -513,6 +513,9 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
// path from f.Name(); note that a "/" is append assuming a symbolic link is always a directory
newEntry.Path = filepath.Join(normalizedPath, f.Name()) + "/"
}
if len(patterns) > 0 && !MatchPath(newEntry.Path, patterns) {
continue
}
entry = newEntry
}
}

View File

@@ -12,6 +12,7 @@ import (
"os"
"path"
"strings"
"syscall"
"time"
)
@@ -190,10 +191,13 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
return err
}
err = file.Sync()
if err != nil {
file.Close()
return err
if err = file.Sync(); err != nil {
pathErr, ok := err.(*os.PathError)
isNotSupported := ok && pathErr.Op == "sync" && pathErr.Err == syscall.ENOTSUP
if !isNotSupported {
_ = file.Close()
return err
}
}
err = file.Close()

View File

@@ -20,13 +20,16 @@ import (
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
)
var (
GCDFileMimeType = "application/octet-stream"
GCDDirectoryMimeType = "application/vnd.google-apps.folder"
GCDUserDrive = "root"
)
type GCDStorage struct {
@@ -37,6 +40,7 @@ type GCDStorage struct {
idCacheLock sync.Mutex
backoffs []int // desired backoff time in seconds for each thread
attempts []int // number of failed attempts since last success for each thread
driveID string // the ID of the shared drive or 'root' (GCDUserDrive) if the user's drive
createDirectoryLock sync.Mutex
isConnected bool
@@ -191,7 +195,11 @@ func (storage *GCDStorage) listFiles(threadIndex int, parentID string, listFiles
var err error
for {
fileList, err = storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount).Do()
q := storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount)
if storage.driveID != GCDUserDrive {
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
}
fileList, err = q.Do()
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
break
} else if retry {
@@ -219,7 +227,11 @@ func (storage *GCDStorage) listByName(threadIndex int, parentID string, name str
for {
query := "name = '" + name + "' and '" + parentID + "' in parents and trashed = false "
fileList, err = storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)").Do()
q := storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)")
if storage.driveID != GCDUserDrive {
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
}
fileList, err = q.Do()
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
break
@@ -248,7 +260,7 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, creat
return fileID, nil
}
fileID := "root"
fileID := storage.driveID
if rootID, ok := storage.findPathID(""); ok {
fileID = rootID
@@ -303,37 +315,85 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, creat
}
// CreateGCDStorage creates a GCD storage object.
func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storage *GCDStorage, err error) {
func CreateGCDStorage(tokenFile string, driveID string, storagePath string, threads int) (storage *GCDStorage, err error) {
ctx := context.Background()
description, err := ioutil.ReadFile(tokenFile)
if err != nil {
return nil, err
}
gcdConfig := &GCDConfig{}
if err := json.Unmarshal(description, gcdConfig); err != nil {
return nil, err
}
var object map[string]interface{}
oauth2Config := oauth2.Config{
ClientID: gcdConfig.ClientID,
ClientSecret: gcdConfig.ClientSecret,
Endpoint: gcdConfig.Endpoint,
}
authClient := oauth2Config.Client(context.Background(), &gcdConfig.Token)
service, err := drive.New(authClient)
err = json.Unmarshal(description, &object)
if err != nil {
return nil, err
}
isServiceAccount := false
if value, ok := object["type"]; ok {
if authType, ok := value.(string); ok && authType == "service_account" {
isServiceAccount = true
}
}
var tokenSource oauth2.TokenSource
if isServiceAccount {
config, err := google.JWTConfigFromJSON(description, drive.DriveScope)
if err != nil {
return nil, err
}
tokenSource = config.TokenSource(ctx)
} else {
gcdConfig := &GCDConfig{}
if err := json.Unmarshal(description, gcdConfig); err != nil {
return nil, err
}
config := oauth2.Config{
ClientID: gcdConfig.ClientID,
ClientSecret: gcdConfig.ClientSecret,
Endpoint: gcdConfig.Endpoint,
}
tokenSource = config.TokenSource(ctx, &gcdConfig.Token)
}
service, err := drive.NewService(ctx, option.WithTokenSource(tokenSource))
if err != nil {
return nil, err
}
if len(driveID) == 0 {
driveID = GCDUserDrive
} else {
driveList, err := drive.NewTeamdrivesService(service).List().Do()
if err != nil {
return nil, fmt.Errorf("Failed to look up the drive id: %v", err)
}
found := false
for _, teamDrive := range driveList.TeamDrives {
if teamDrive.Id == driveID || teamDrive.Name == driveID {
driveID = teamDrive.Id
found = true
break
}
}
if !found {
return nil, fmt.Errorf("%s is not the id or name of a shared drive", driveID)
}
}
storage = &GCDStorage{
service: service,
numberOfThreads: threads,
idCache: make(map[string]string),
backoffs: make([]int, threads),
attempts: make([]int, threads),
driveID: driveID,
}
for i := range storage.backoffs {
@@ -341,6 +401,7 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
storage.attempts[i] = 0
}
storage.savePathID("", driveID)
storagePathID, err := storage.getIDFromPath(0, storagePath, true)
if err != nil {
return nil, err
@@ -462,7 +523,7 @@ func (storage *GCDStorage) DeleteFile(threadIndex int, filePath string) (err err
}
for {
err = storage.service.Files.Delete(fileID).Fields("id").Do()
err = storage.service.Files.Delete(fileID).SupportsAllDrives(true).Fields("id").Do()
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
storage.deletePathID(filePath)
return nil
@@ -508,7 +569,7 @@ func (storage *GCDStorage) MoveFile(threadIndex int, from string, to string) (er
}
for {
_, err = storage.service.Files.Update(fileID, nil).AddParents(toParentID).RemoveParents(fromParentID).Do()
_, err = storage.service.Files.Update(fileID, nil).SupportsAllDrives(true).AddParents(toParentID).RemoveParents(fromParentID).Do()
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
break
} else if retry {
@@ -559,7 +620,7 @@ func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err err
Parents: []string{parentID},
}
file, err = storage.service.Files.Create(file).Fields("id").Do()
file, err = storage.service.Files.Create(file).SupportsAllDrives(true).Fields("id").Do()
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
break
} else {
@@ -630,7 +691,7 @@ func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk
for {
// AcknowledgeAbuse(true) lets the download proceed even if GCD thinks that it contains malware.
// TODO: Should this prompt the user or log a warning?
req := storage.service.Files.Get(fileID)
req := storage.service.Files.Get(fileID).SupportsAllDrives(true)
if e, ok := err.(*googleapi.Error); ok {
if strings.Contains(err.Error(), "cannotDownloadAbusiveFile") || len(e.Errors) > 0 && e.Errors[0].Reason == "cannotDownloadAbusiveFile" {
LOG_WARN("GCD_STORAGE", "%s is marked as abusive, will download anyway.", filePath)
@@ -676,7 +737,7 @@ func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content
for {
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
_, err = storage.service.Files.Create(file).Media(reader).Fields("id").Do()
_, err = storage.service.Files.Create(file).SupportsAllDrives(true).Media(reader).Fields("id").Do()
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
break
} else if retry {

View File

@@ -7,10 +7,12 @@ package duplicacy
import (
"fmt"
"os"
"log"
"runtime/debug"
"sync"
"testing"
"time"
"regexp"
)
const (
@@ -43,6 +45,13 @@ func setTestingT(t *testing.T) {
testingT = t
}
// Contains the ids of logs that won't be displayed
var suppressedLogs map[string]bool = map[string]bool{}
func SuppressLog(id string) {
suppressedLogs[id] = true
}
func getLevelName(level int) string {
switch level {
case DEBUG:
@@ -143,6 +152,12 @@ func logf(level int, logID string, format string, v ...interface{}) {
defer logMutex.Unlock()
if level >= loggingLevel {
if level <= ERROR && len(suppressedLogs) > 0 {
if _, found := suppressedLogs[logID]; found {
return
}
}
if printLogHeader {
fmt.Printf("%s %s %s %s\n",
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
@@ -161,6 +176,32 @@ func logf(level int, logID string, format string, v ...interface{}) {
}
}
// Set up logging for libraries that Duplicacy depends on. They can call 'log.Printf("[ID] message")'
// to produce logs in Duplicacy's format
type Logger struct {
formatRegex *regexp.Regexp
}
func (logger *Logger) Write(line []byte) (n int, err error) {
n = len(line)
for len(line) > 0 && line[len(line) - 1] == '\n' {
line = line[:len(line) - 1]
}
matched := logger.formatRegex.FindStringSubmatch(string(line))
if matched != nil {
LOG_INFO(matched[1], "%s", matched[2])
} else {
LOG_INFO("LOG_DEFAULT", "%s", line)
}
return
}
func init() {
log.SetFlags(0)
log.SetOutput(&Logger{ formatRegex: regexp.MustCompile(`^\[(.+)\]\s*(.+)`) })
}
const (
duplicacyExitCode = 100
otherExitCode = 101

View File

@@ -15,6 +15,7 @@ import (
"strings"
"sync"
"time"
"path/filepath"
"golang.org/x/oauth2"
)
@@ -32,9 +33,6 @@ type OneDriveErrorResponse struct {
Error OneDriveError `json:"error"`
}
var OneDriveRefreshTokenURL = "https://duplicacy.com/one_refresh"
var OneDriveAPIURL = "https://api.onedrive.com/v1.0"
type OneDriveClient struct {
HTTPClient *http.Client
@@ -44,9 +42,13 @@ type OneDriveClient struct {
IsConnected bool
TestMode bool
IsBusiness bool
RefreshTokenURL string
APIURL string
}
func NewOneDriveClient(tokenFile string) (*OneDriveClient, error) {
func NewOneDriveClient(tokenFile string, isBusiness bool) (*OneDriveClient, error) {
description, err := ioutil.ReadFile(tokenFile)
if err != nil {
@@ -63,6 +65,15 @@ func NewOneDriveClient(tokenFile string) (*OneDriveClient, error) {
TokenFile: tokenFile,
Token: token,
TokenLock: &sync.Mutex{},
IsBusiness: isBusiness,
}
if isBusiness {
client.RefreshTokenURL = "https://duplicacy.com/odb_refresh"
client.APIURL = "https://graph.microsoft.com/v1.0/me"
} else {
client.RefreshTokenURL = "https://duplicacy.com/one_refresh"
client.APIURL = "https://api.onedrive.com/v1.0"
}
client.RefreshToken(false)
@@ -106,9 +117,10 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
if reader, ok := inputReader.(*RateLimitedReader); ok {
request.ContentLength = reader.Length()
request.Header.Set("Content-Range", fmt.Sprintf("bytes 0-%d/%d", reader.Length() - 1, reader.Length()))
}
if url != OneDriveRefreshTokenURL {
if url != client.RefreshTokenURL {
client.TokenLock.Lock()
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
client.TokenLock.Unlock()
@@ -152,7 +164,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
if response.StatusCode == 401 {
if url == OneDriveRefreshTokenURL {
if url == client.RefreshTokenURL {
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
}
@@ -161,6 +173,8 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
return nil, 0, err
}
continue
} else if response.StatusCode == 409 {
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Conflict"}
} else if response.StatusCode > 401 && response.StatusCode != 404 {
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
@@ -188,7 +202,7 @@ func (client *OneDriveClient) RefreshToken(force bool) (err error) {
return nil
}
readCloser, _, err := client.call(OneDriveRefreshTokenURL, "POST", client.Token, "")
readCloser, _, err := client.call(client.RefreshTokenURL, "POST", client.Token, "")
if err != nil {
return fmt.Errorf("failed to refresh the access token: %v", err)
}
@@ -228,9 +242,9 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
entries := []OneDriveEntry{}
url := OneDriveAPIURL + "/drive/root:/" + path + ":/children"
url := client.APIURL + "/drive/root:/" + path + ":/children"
if path == "" {
url = OneDriveAPIURL + "/drive/root/children"
url = client.APIURL + "/drive/root/children"
}
if client.TestMode {
url += "?top=8"
@@ -266,7 +280,7 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) {
url := OneDriveAPIURL + "/drive/root:/" + path
url := client.APIURL + "/drive/root:/" + path
url += "?select=id,name,size,folder"
readCloser, _, err := client.call(url, "GET", 0, "")
@@ -291,28 +305,95 @@ func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, err
func (client *OneDriveClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
url := OneDriveAPIURL + "/drive/items/root:/" + path + ":/content"
url := client.APIURL + "/drive/items/root:/" + path + ":/content"
return client.call(url, "GET", 0, "")
}
func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
url := OneDriveAPIURL + "/drive/root:/" + path + ":/content"
// Upload file using the simple method; this is only possible for OneDrive Personal or if the file
// is smaller than 4MB for OneDrive Business
if !client.IsBusiness || len(content) < 4 * 1024 * 1024 || (client.TestMode && rand.Int() % 2 == 0) {
url := client.APIURL + "/drive/root:/" + path + ":/content"
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
if err != nil {
return err
}
readCloser.Close()
return nil
}
// For large files, create an upload session first
uploadURL, err := client.CreateUploadSession(path)
if err != nil {
return err
}
return client.UploadFileSession(uploadURL, content, rateLimit)
}
func (client *OneDriveClient) CreateUploadSession(path string) (uploadURL string, err error) {
type CreateUploadSessionItem struct {
ConflictBehavior string `json:"@microsoft.graph.conflictBehavior"`
Name string `json:"name"`
}
input := map[string]interface{} {
"item": CreateUploadSessionItem {
ConflictBehavior: "replace",
Name: filepath.Base(path),
},
}
readCloser, _, err := client.call(client.APIURL + "/drive/root:/" + path + ":/createUploadSession", "POST", input, "application/json")
if err != nil {
return "", err
}
type CreateUploadSessionOutput struct {
UploadURL string `json:"uploadUrl"`
}
output := &CreateUploadSessionOutput{}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return "", err
}
readCloser.Close()
return output.UploadURL, nil
}
func (client *OneDriveClient) UploadFileSession(uploadURL string, content []byte, rateLimit int) (err error) {
readCloser, _, err := client.call(uploadURL, "PUT", CreateRateLimitedReader(content, rateLimit), "")
if err != nil {
return err
}
type UploadFileSessionOutput struct {
Size int `json:"size"`
}
output := &UploadFileSessionOutput{}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return fmt.Errorf("Failed to complete the file upload session: %v", err)
}
if output.Size != len(content) {
return fmt.Errorf("Uploaded %d bytes out of %d bytes", output.Size, len(content))
}
readCloser.Close()
return nil
}
func (client *OneDriveClient) DeleteFile(path string) error {
url := OneDriveAPIURL + "/drive/root:/" + path
url := client.APIURL + "/drive/root:/" + path
readCloser, _, err := client.call(url, "DELETE", 0, "")
if err != nil {
@@ -325,7 +406,7 @@ func (client *OneDriveClient) DeleteFile(path string) error {
func (client *OneDriveClient) MoveFile(path string, parent string) error {
url := OneDriveAPIURL + "/drive/root:/" + path
url := client.APIURL + "/drive/root:/" + path
parentReference := make(map[string]string)
parentReference["path"] = "/drive/root:/" + parent
@@ -335,6 +416,20 @@ func (client *OneDriveClient) MoveFile(path string, parent string) error {
readCloser, _, err := client.call(url, "PATCH", parameters, "application/json")
if err != nil {
if e, ok := err.(OneDriveError); ok && e.Status == 400 {
// The destination directory doesn't exist; trying to create it...
dir := filepath.Dir(parent)
if dir == "." {
dir = ""
}
client.CreateDirectory(dir, filepath.Base(parent))
readCloser, _, err = client.call(url, "PATCH", parameters, "application/json")
if err != nil {
return nil
}
}
return err
}
@@ -344,24 +439,29 @@ func (client *OneDriveClient) MoveFile(path string, parent string) error {
func (client *OneDriveClient) CreateDirectory(path string, name string) error {
url := OneDriveAPIURL + "/root/children"
url := client.APIURL + "/root/children"
if path != "" {
parentID, isDir, _, err := client.GetFileInfo(path)
pathID, isDir, _, err := client.GetFileInfo(path)
if err != nil {
return err
}
if parentID == "" {
return fmt.Errorf("The path '%s' does not exist", path)
if pathID == "" {
dir := filepath.Dir(path)
if dir != "." {
// The parent directory doesn't exist; trying to create it...
client.CreateDirectory(dir, filepath.Base(path))
isDir = true
}
}
if !isDir {
return fmt.Errorf("The path '%s' is not a directory", path)
}
url = OneDriveAPIURL + "/drive/items/" + parentID + "/children"
url = client.APIURL + "/drive/root:/" + path + ":/children"
}
parameters := make(map[string]interface{})
@@ -370,6 +470,11 @@ func (client *OneDriveClient) CreateDirectory(path string, name string) error {
readCloser, _, err := client.call(url, "POST", parameters, "application/json")
if err != nil {
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
// This error usually means the directory already exists
LOG_TRACE("ONEDRIVE_MKDIR", "The directory '%s/%s' already exists", path, name)
return nil
}
return err
}

View File

@@ -17,7 +17,7 @@ import (
func TestOneDriveClient(t *testing.T) {
oneDriveClient, err := NewOneDriveClient("one-token.json")
oneDriveClient, err := NewOneDriveClient("one-token.json", false)
if err != nil {
t.Errorf("Failed to create the OneDrive client: %v", err)
return

View File

@@ -19,13 +19,13 @@ type OneDriveStorage struct {
}
// CreateOneDriveStorage creates an OneDrive storage object.
func CreateOneDriveStorage(tokenFile string, storagePath string, threads int) (storage *OneDriveStorage, err error) {
func CreateOneDriveStorage(tokenFile string, isBusiness bool, storagePath string, threads int) (storage *OneDriveStorage, err error) {
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
storagePath = storagePath[:len(storagePath)-1]
}
client, err := NewOneDriveClient(tokenFile)
client, err := NewOneDriveClient(tokenFile, isBusiness)
if err != nil {
return nil, err
}
@@ -80,6 +80,7 @@ func (storage *OneDriveStorage) convertFilePath(filePath string) string {
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
for len(dir) > 0 && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}

View File

@@ -25,6 +25,7 @@ type Preference struct {
DoNotSavePassword bool `json:"no_save_password"`
NobackupFile string `json:"nobackup_file"`
Keys map[string]string `json:"keys"`
FiltersFile string `json:"filters"`
}
var preferencePath string

View File

@@ -91,7 +91,7 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
storageDir: storageDir,
minimumNesting: minimumNesting,
numberOfThreads: threads,
numberOfTries: 6,
numberOfTries: 8,
serverAddress: serverAddress,
sftpConfig: sftpConfig,
}
@@ -129,22 +129,19 @@ func (storage *SFTPStorage) retry(f func () error) error {
delay *= 2
storage.clientLock.Lock()
if storage.client != nil {
storage.client.Close()
storage.client = nil
}
connection, err := ssh.Dial("tcp", storage.serverAddress, storage.sftpConfig)
if err != nil {
LOG_WARN("SFT_RECONNECT", "Failed to connect to %s: %v; retrying", storage.serverAddress, err)
storage.clientLock.Unlock()
return err
continue
}
client, err := sftp.NewClient(connection)
if err != nil {
LOG_WARN("SFT_RECONNECT", "Failed to create a new SFTP client to %s: %v; retrying", storage.serverAddress, err)
connection.Close()
storage.clientLock.Unlock()
return err
continue
}
storage.client = client
storage.clientLock.Unlock()
@@ -275,36 +272,19 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
fullPath := path.Join(storage.storageDir, filePath)
dirs := strings.Split(filePath, "/")
if len(dirs) > 1 {
fullDir := path.Dir(fullPath)
err = storage.retry(func() error {
_, err := storage.getSFTPClient().Stat(fullDir)
return err
})
if err != nil {
// The error may be caused by a non-existent fullDir, or a broken connection. In either case,
// we just assume it is the former because there isn't a way to tell which is the case.
for i := range dirs[1 : len(dirs)-1] {
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
// We don't check the error; just keep going blindly but always store the last err
err = storage.getSFTPClient().Mkdir(subDir)
}
fullDir := path.Dir(fullPath)
return storage.retry(func() error {
// If there is an error creating the dirs, we check fullDir one more time, because another thread
// may happen to create the same fullDir ahead of this thread
if err != nil {
err = storage.retry(func() error {
_, err := storage.getSFTPClient().Stat(fullDir)
return err
})
if err != nil {
return err
if len(dirs) > 1 {
_, err := storage.getSFTPClient().Stat(fullDir)
if os.IsNotExist(err) {
for i := range dirs[1 : len(dirs)-1] {
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
// We don't check the error; just keep going blindly
storage.getSFTPClient().Mkdir(subDir)
}
}
}
}
return storage.retry(func() error {
letters := "abcdefghijklmnopqrstuvwxyz"
suffix := make([]byte, 8)

View File

@@ -13,6 +13,7 @@ import (
"io/ioutil"
"os"
"os/exec"
"regexp"
"strings"
"syscall"
"time"
@@ -123,11 +124,11 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
}
deviceIdRepository, err := GetPathDeviceId(top)
if err != nil {
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: ", top)
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: %s", top)
return top
}
if deviceIdLocal != deviceIdRepository {
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: ", top)
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: %s", top)
return top
}
@@ -145,22 +146,37 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
// Use tmutil to create snapshot
tmutilOutput, err := CommandWithTimeout(timeoutInSeconds, "tmutil", "snapshot")
if err != nil {
LOG_ERROR("VSS_CREATE", "Error while calling tmutil: ", err)
LOG_ERROR("VSS_CREATE", "Error while calling tmutil: %v", err)
return top
}
colonPos := strings.IndexByte(tmutilOutput, ':')
if colonPos < 0 {
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: ", tmutilOutput)
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: %s", tmutilOutput)
return top
}
snapshotDate = strings.TrimSpace(tmutilOutput[colonPos+1:])
tmutilOutput, err = CommandWithTimeout(timeoutInSeconds, "tmutil", "listlocalsnapshots", ".")
if err != nil {
LOG_ERROR("VSS_CREATE", "Error while calling 'tmutil listlocalsnapshots': %v", err)
return top
}
snapshotName := "com.apple.TimeMachine." + snapshotDate
r := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`)
snapshotNames := r.FindStringSubmatch(tmutilOutput)
if len(snapshotNames) > 0 {
snapshotName = snapshotNames[0]
} else {
LOG_WARN("VSS_CREATE", "Error while using 'tmutil listlocalsnapshots' to find snapshot name. Will fallback to 'com.apple.TimeMachine.SNAPSHOT_DATE'")
}
// Mount snapshot as readonly and hide from GUI i.e. Finder
_, err = CommandWithTimeout(timeoutInSeconds,
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s=com.apple.TimeMachine."+snapshotDate, "/", snapshotPath)
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/", snapshotPath)
if err != nil {
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: ", err)
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: %v", err)
return top
}

View File

@@ -58,7 +58,7 @@ func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
// CreateSnapshotFromDirectory creates a snapshot from the local directory 'top'. Only 'Files'
// will be constructed, while 'ChunkHashes' and 'ChunkLengths' can only be populated after uploading.
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (snapshot *Snapshot, skippedDirectories []string,
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, filtersFile string) (snapshot *Snapshot, skippedDirectories []string,
skippedFiles []string, err error) {
snapshot = &Snapshot{
@@ -69,7 +69,10 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (sn
var patterns []string
patterns = ProcessFilters()
if filtersFile == "" {
filtersFile = joinPath(GetDuplicacyPreferencePath(), "filters")
}
patterns = ProcessFilters(filtersFile)
directories := make([]*Entry, 0, 256)
directories = append(directories, CreateEntry("", 0, 0, 0))
@@ -88,6 +91,10 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (sn
snapshot.Files = append(snapshot.Files, directory)
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, nobackupFile, snapshot.discardAttributes)
if err != nil {
if directory.Path == "" {
LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err)
return nil, nil, nil, err
}
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory: %v", err)
skippedDirectories = append(skippedDirectories, directory.Path)
continue
@@ -121,8 +128,8 @@ func AppendPattern(patterns []string, new_pattern string) (new_patterns []string
new_patterns = append(patterns, new_pattern)
return new_patterns
}
func ProcessFilters() (patterns []string) {
patterns = ProcessFilterFile(joinPath(GetDuplicacyPreferencePath(), "filters"), make([]string, 0))
func ProcessFilters(filtersFile string) (patterns []string) {
patterns = ProcessFilterFile(filtersFile, make([]string, 0))
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))

View File

@@ -653,6 +653,51 @@ func (manager *SnapshotManager) GetSnapshotChunks(snapshot *Snapshot, keepChunkH
return chunks
}
// GetSnapshotChunkHashes has an option to retrieve chunk hashes in addition to chunk ids.
func (manager *SnapshotManager) GetSnapshotChunkHashes(snapshot *Snapshot, chunkHashes *map[string]bool, chunkIDs map[string]bool) {
for _, chunkHash := range snapshot.FileSequence {
if chunkHashes != nil {
(*chunkHashes)[chunkHash] = true
}
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
}
for _, chunkHash := range snapshot.ChunkSequence {
if chunkHashes != nil {
(*chunkHashes)[chunkHash] = true
}
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
}
for _, chunkHash := range snapshot.LengthSequence {
if chunkHashes != nil {
(*chunkHashes)[chunkHash] = true
}
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
}
if len(snapshot.ChunkHashes) == 0 {
description := manager.DownloadSequence(snapshot.ChunkSequence)
err := snapshot.LoadChunks(description)
if err != nil {
LOG_ERROR("SNAPSHOT_CHUNK", "Failed to load chunks for snapshot %s at revision %d: %v",
snapshot.ID, snapshot.Revision, err)
return
}
}
for _, chunkHash := range snapshot.ChunkHashes {
if chunkHashes != nil {
(*chunkHashes)[chunkHash] = true
}
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
}
snapshot.ClearChunks()
}
// ListSnapshots shows the information about a snapshot.
func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList []int, tag string,
showFiles bool, showChunks bool) int {
@@ -757,10 +802,12 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
// ListSnapshots shows the information about a snapshot.
func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToCheck []int, tag string, showStatistics bool, showTabular bool,
checkFiles bool, searchFossils bool, resurrect bool) bool {
checkFiles bool, checkChunks, searchFossils bool, resurrect bool, threads int) bool {
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
snapshotID, revisionsToCheck, tag, showStatistics, checkFiles, searchFossils, resurrect)
manager.chunkDownloader = CreateChunkDownloader(manager.config, manager.storage, manager.snapshotCache, false, threads)
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, showTabular: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
snapshotID, revisionsToCheck, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
snapshotMap := make(map[string][]*Snapshot)
var err error
@@ -790,7 +837,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
chunkSizeMap[chunk] = allSizes[i]
}
if snapshotID == "" || showStatistics {
if snapshotID == "" || showStatistics || showTabular {
snapshotIDs, err := manager.ListSnapshotIDs()
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
@@ -810,7 +857,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
for snapshotID = range snapshotMap {
revisions := revisionsToCheck
if len(revisions) == 0 || showStatistics {
if len(revisions) == 0 || showStatistics || showTabular {
revisions, err = manager.ListSnapshotRevisions(snapshotID)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", snapshotID, err)
@@ -839,6 +886,12 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
}
LOG_INFO("SNAPSHOT_CHECK", "Total chunk size is %s in %d chunks", PrettyNumber(totalChunkSize), len(chunkSizeMap))
var allChunkHashes *map[string]bool
if checkChunks && !checkFiles {
m := make(map[string]bool)
allChunkHashes = &m
}
for snapshotID = range snapshotMap {
for _, snapshot := range snapshotMap[snapshotID] {
@@ -850,9 +903,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
}
chunks := make(map[string]bool)
for _, chunkID := range manager.GetSnapshotChunks(snapshot, false) {
chunks[chunkID] = true
}
manager.GetSnapshotChunkHashes(snapshot, allChunkHashes, chunks)
missingChunks := 0
for chunkID := range chunks {
@@ -946,6 +997,14 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
manager.ShowStatistics(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
}
if checkChunks && !checkFiles {
LOG_INFO("SNAPSHOT_VERIFY", "Verifying %d chunks", len(*allChunkHashes))
for chunkHash := range *allChunkHashes {
manager.chunkDownloader.AddChunk(chunkHash)
}
manager.chunkDownloader.WaitForCompletion()
LOG_INFO("SNAPSHOT_VERIFY", "All %d chunks have been successfully verified", len(*allChunkHashes))
}
return true
}
@@ -1299,7 +1358,7 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
// Diff compares two snapshots, or two revision of a file if the file argument is given.
func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []int,
filePath string, compareByHash bool, nobackupFile string) bool {
filePath string, compareByHash bool, nobackupFile string, filtersFile string) bool {
LOG_DEBUG("DIFF_PARAMETERS", "top: %s, id: %s, revision: %v, path: %s, compareByHash: %t",
top, snapshotID, revisions, filePath, compareByHash)
@@ -1312,7 +1371,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
if len(revisions) <= 1 {
// Only scan the repository if filePath is not provided
if len(filePath) == 0 {
rightSnapshot, _, _, err = CreateSnapshotFromDirectory(snapshotID, top, nobackupFile)
rightSnapshot, _, _, err = CreateSnapshotFromDirectory(snapshotID, top, nobackupFile, filtersFile)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
return false

View File

@@ -620,7 +620,7 @@ func TestPruneNewSnapshots(t *testing.T) {
// Now chunkHash1 wil be resurrected
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 4, 0)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false, false, 1)
}
// A fossil collection left by an aborted prune should be ignored if any supposedly deleted snapshot exists
@@ -669,7 +669,7 @@ func TestPruneGhostSnapshots(t *testing.T) {
// Run the prune again but the fossil collection should be igored, since revision 1 still exists
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 2)
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, true /*searchFossils*/, false)
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, false, true /*searchFossils*/, false, 1)
// Prune snapshot 1 again
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
@@ -683,5 +683,5 @@ func TestPruneGhostSnapshots(t *testing.T) {
// Run the prune again and this time the fossil collection will be processed and the fossils removed
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 0)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false, false, 1)
}

View File

@@ -336,7 +336,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
keyFile = GetPassword(preference, "ssh_key_file", "Enter the path of the private key file:",
true, resetPassword)
var key ssh.Signer
var keySigner ssh.Signer
var err error
if keyFile == "" {
@@ -347,7 +347,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
if err != nil {
LOG_INFO("SSH_PUBLICKEY", "Failed to read the private key file: %v", err)
} else {
key, err = ssh.ParsePrivateKey(content)
keySigner, err = ssh.ParsePrivateKey(content)
if err != nil {
if strings.Contains(err.Error(), "cannot decode encrypted private keys") {
LOG_TRACE("SSH_PUBLICKEY", "The private key file is encrypted")
@@ -355,7 +355,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
if len(passphrase) == 0 {
LOG_INFO("SSH_PUBLICKEY", "No passphrase to descrypt the private key file %s", keyFile)
} else {
key, err = ssh.ParsePrivateKeyWithPassphrase(content, []byte(passphrase))
keySigner, err = ssh.ParsePrivateKeyWithPassphrase(content, []byte(passphrase))
if err != nil {
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the encrypted private key file %s: %v", keyFile, err)
}
@@ -364,11 +364,35 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the private key file %s: %v", keyFile, err)
}
}
if keySigner != nil {
certFile := keyFile + "-cert.pub"
if stat, err := os.Stat(certFile); err == nil && !stat.IsDir() {
LOG_DEBUG("SSH_CERTIFICATE", "Attempting to use ssh certificate from file %s", certFile)
var content []byte
content, err = ioutil.ReadFile(certFile)
if err != nil {
LOG_INFO("SSH_CERTIFICATE", "Failed to read ssh certificate file %s: %v", certFile, err)
} else {
pubKey, _, _, _, err := ssh.ParseAuthorizedKey(content)
if err != nil {
LOG_INFO("SSH_CERTIFICATE", "Failed parse ssh certificate file %s: %v", certFile, err)
} else {
certSigner, err := ssh.NewCertSigner(pubKey.(*ssh.Certificate), keySigner)
if err != nil {
LOG_INFO("SSH_CERTIFICATE", "Failed to create certificate signer: %v", err)
} else {
keySigner = certSigner
}
}
}
}
}
}
}
if key != nil {
signers = append(signers, key)
if keySigner != nil {
signers = append(signers, keySigner)
}
if len(signers) > 0 {
@@ -531,7 +555,25 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
accountID := GetPassword(preference, "b2_id", "Enter Backblaze account or application id:", true, resetPassword)
applicationKey := GetPassword(preference, "b2_key", "Enter corresponding Backblaze application key:", true, resetPassword)
b2Storage, err := CreateB2Storage(accountID, applicationKey, bucket, storageDir, threads)
b2Storage, err := CreateB2Storage(accountID, applicationKey, "", bucket, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "b2_id", accountID)
SavePassword(preference, "b2_key", applicationKey)
return b2Storage
} else if matched[1] == "b2-custom" {
b2customUrlRegex := regexp.MustCompile(`^b2-custom://([^/]+)/([^/]+)(/(.+))?`)
matched := b2customUrlRegex.FindStringSubmatch(storageURL)
downloadURL := "https://" + matched[1]
bucket := matched[2]
storageDir := matched[4]
accountID := GetPassword(preference, "b2_id", "Enter Backblaze account or application id:", true, resetPassword)
applicationKey := GetPassword(preference, "b2_key", "Enter corresponding Backblaze application key:", true, resetPassword)
b2Storage, err := CreateB2Storage(accountID, applicationKey, downloadURL, bucket, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
return nil
@@ -582,21 +624,30 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
SavePassword(preference, "gcs_token", tokenFile)
return gcsStorage
} else if matched[1] == "gcd" {
// Handle writing directly to the root of the drive
// For gcd://driveid@/, driveid@ is match[3] not match[2]
if matched[2] == "" && strings.HasSuffix(matched[3], "@") {
matched[2], matched[3] = matched[3], matched[2]
}
driveID := matched[2]
if driveID != "" {
driveID = driveID[:len(driveID)-1]
}
storagePath := matched[3] + matched[4]
prompt := fmt.Sprintf("Enter the path of the Google Drive token file (downloadable from https://duplicacy.com/gcd_start):")
tokenFile := GetPassword(preference, "gcd_token", prompt, true, resetPassword)
gcdStorage, err := CreateGCDStorage(tokenFile, storagePath, threads)
gcdStorage, err := CreateGCDStorage(tokenFile, driveID, storagePath, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Drive storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "gcd_token", tokenFile)
return gcdStorage
} else if matched[1] == "one" {
} else if matched[1] == "one" || matched[1] == "odb" {
storagePath := matched[3] + matched[4]
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
tokenFile := GetPassword(preference, "one_token", prompt, true, resetPassword)
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, storagePath, threads)
tokenFile := GetPassword(preference, matched[1] + "_token", prompt, true, resetPassword)
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
return nil

View File

@@ -27,6 +27,7 @@ var testRateLimit int
var testQuickMode bool
var testThreads int
var testFixedChunkSize bool
var testRSAEncryption bool
func init() {
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
@@ -34,6 +35,7 @@ func init() {
flag.BoolVar(&testQuickMode, "quick", false, "quick test")
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption")
flag.Parse()
}
@@ -107,7 +109,7 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "b2" {
storage, err := CreateB2Storage(config["account"], config["key"], config["bucket"], config["directory"], threads)
storage, err := CreateB2Storage(config["account"], config["key"], "", config["bucket"], config["directory"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcs-s3" {
@@ -131,11 +133,23 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcd" {
storage, err := CreateGCDStorage(config["token_file"], config["storage_path"], threads)
storage, err := CreateGCDStorage(config["token_file"], "", config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcd-shared" {
storage, err := CreateGCDStorage(config["token_file"], config["drive"], config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "one" {
storage, err := CreateOneDriveStorage(config["token_file"], config["storage_path"], threads)
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "odb" {
storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "one" {
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "hubic" {