1
0
mirror of https://github.com/gilbertchen/duplicacy synced 2025-12-06 00:03:38 +00:00

Compare commits

...

115 Commits

Author SHA1 Message Date
Gilbert Chen
51cbf73caa Bump version to 2.5.1 2020-04-17 15:57:08 -04:00
Gilbert Chen
835af11334 Fixed a bug in ssh login with encrypted private key
Check the type of the returned error instead of the error message to
determine if the private key file is encrypted by a passphrase.
2020-04-17 15:55:30 -04:00
Gilbert Chen
4c3557eb80 Bump version to 2.5.0 2020-04-09 23:22:32 -04:00
Gilbert Chen
eebcece9e0 Update github.com/aws/aws-sdk-go and google.golang.org/api to the latest 2020-04-09 23:21:55 -04:00
gilbertchen
8c80470c29 Merge pull request #593 from freaksdotcom/readall
Call ReadAll() on the body io.ReadCloser to allow the http keepalive connection to be reused.
2020-04-09 21:12:41 -04:00
Gilbert Chen
bcb889272d Add test for Google Shared Drive 2020-04-09 00:04:30 -04:00
Gilbert Chen
79d8654a12 Allow the name of Google Shared Drive to be used in the storage url
The previous PR only accepts the id of the shared drive, which is not very
memorable.  This commit makes it able to specify the drive by id or by name.
2020-04-08 23:59:15 -04:00
Brandon High
6bf0d2265c Call ReadAll() on the body io.ReadCloser to allow the http keepalive connection to be reused. 2020-04-07 22:07:41 -07:00
Gilbert Chen
749db78a1f Implemented a global option to suppress logs by ids
You can now use -suppress LOGID or -s LOGID to not print logs with the given
ids.  This is a global option which means it is applicable to all commands.
It can be specified more than once.
2020-04-07 23:22:17 -04:00
Gilbert Chen
0a51bd8d1a Make log.Printf print to Duplicacy's logging system 2020-04-07 13:52:54 -04:00
Gilbert Chen
7208adbce2 Access Google Drive via service account.
Our GCS backend already supports service account.  This just copies relevant
code from there.
2020-04-06 23:25:47 -04:00
gilbertchen
e827662869 Merge pull request #579 from rsanger/master
Add support for Shared Google Drives
2020-04-06 22:55:46 -04:00
gilbertchen
57dd5ba927 Merge pull request #590 from fbarthez/macos_sync_error
Fix "Failed to upload the chunk ... sync ...: operation not supported"
2020-04-06 22:54:44 -04:00
Gilbert Chen
01a37b7828 Fixed a typo in command line arguments 2020-04-06 22:13:08 -04:00
Gilbert Chen
57cd20bb84 Fixed the condition to show 'chunks are encrypted' messages
'File/Metadata chunks are encrypted' were always shown even if the storage
wasn't encrypted.
2020-04-06 12:22:47 -04:00
Gilbert Chen
0e970da222 Fixed build errors in tests caused by snapshotManager.CheckSnapshots 2020-04-06 12:19:42 -04:00
Gilbert Chen
e880636502 Fixed test build errors caused by the prototype change in CheckSnapshots() 2020-03-30 17:49:26 -04:00
Gilbert Chen
810303ce25 Fail the backup if the repository can't be accessed or there are no files
This is mainly to avoid creating an empty snapshot when a drive/share is
not mounted which causes the subsequent backup to scan all files again.
2020-03-30 17:44:53 -04:00
Gilbert Chen
ffac83dd80 Assume the signed certificate of a ssh key file has the suffix '-cert.pub'.
So if the ssh key file is 'mykey' then Duplicacy will check if the signed
certificate can be loaded from the file 'mykey-cert.pub'.  This avoids the
use of another preference variable 'ssh_cert_file'.
2020-03-25 23:50:35 -04:00
gilbertchen
05674871fe Merge pull request #547 from philband/ssh_signed_certificate
Add option to use a ssh key signed with a certificate to authenticate
2020-03-25 23:14:30 -04:00
Gilbert Chen
22d6f3abfc Add a -chunks option to the check command to verify the integrity of chunks
This option will download and verify every chunk.  Unlike the -files option,
this option only downloads each chunk once.  There is also a new -threads
option to use multiple threads to download chunks.
2020-03-24 20:58:45 -04:00
Gilbert Chen
d26ffe2cff Add support for OneDrive for Business
The new storage prefix for OneDrive for Business is odb://

The token file can be downloaded from https://duplicacy.com/odb_start

OneDrive for Business requires basically the same set of API calls with
different endpoints.  However, one major difference is that for files larger
than 4MB, an upload session must be created first which is then used to upload
the file content.  Other than that, there are a few minor differences such as
creating an existing directory, or moving files to a non-existent directory.
2020-03-19 14:59:26 -04:00
Fabian Peters
a35f6c27be Fix "Failed to upload the chunk ... sync ...: operation not supported" issue when using SMB on MacOS. This is done by inspecting the error type and returning the error only if its operation is "sync" and the type is "operation not supported".
Note: this change is my first ever foray into go and based simply on the information provided in: https://forum.duplicacy.com/t/failed-to-upload-the-chunk-operation-not-supported/2875/11
2020-03-16 15:56:31 +01:00
Gilbert Chen
808ae4eb75 Bump version to 2.4.1 2020-03-13 20:44:00 -04:00
Gilbert Chen
6699e2f440 Fixed a bug that disabled RSA when copying from a non RSA-encrypted storage.
When copying to an RSA-encrypted storage, we relied on the RSA encryption
version to determine if a chunk is a snapshot chunk or a file chunk.  This is
wrong when the source storage is not encrypted or not RSA-encrypted.  There
is a more reliable to determine if a chunk is a snapshot chunk or not.
2020-03-13 20:13:27 -04:00
Gilbert Chen
733b68be2c Do not take an RSA private key if the storage wasn't RSA encrypted. 2020-03-11 23:14:01 -04:00
Gilbert Chen
b61906c99e Bump version to 2.4.0 2020-03-05 22:06:24 -05:00
gilbertchen
a0a07d18cc Merge pull request #589 from fracai/b2_download_url
support downloading from a custom URL pointed at B2
2020-03-05 22:00:53 -05:00
Gilbert Chen
a6ce64e715 Fixed handling of repository ids with spaces in the b2 backend
Usually a repository id should not contain spaces or other non-alphanum
characters, but if it does we should be able to handle it correctly.  This
commit fixes the b2 backend to convert file names in a proper way.
2020-03-05 14:45:09 -05:00
Arno Hautala
499b612a0d moving download url config from a key to the storage url pattern 2020-02-25 20:53:19 -05:00
Arno Hautala
46ce0ba1fb support downloading from a custom URL pointed at B2 2020-02-22 22:12:36 -05:00
Gilbert Chen
cc88abd547 Fixed a bug that caused all copied chunks to be RSA encrypted
The field encryptionVersion in the Chunk struct is supposed to pass the status
of RSA encrytpion from a source chunk to a destination chunk in a copy command.
This field needs to be a 3-state boolean in order to pass the status correctly.
2020-02-13 14:03:07 -05:00
Gilbert Chen
e888b6d7e5 Fix bugs in sftp retrying
* Fixed a bug caused by nil sftp client during retry
* Simplify the rework logic in UploadFile
* Change the number of tries from 6 to 8
2020-01-13 16:26:43 -05:00
Richard Sanger
aa07feeac0 Fix bug in gcd: init fails to create directories
init would not create directories in the root of a drive as
it did not know the root drive's ID.
2020-01-11 17:25:21 +13:00
Gilbert Chen
d43fe1a282 Release the list of chunk hashes after processing each snapshot.
The chunk hash list isn't needed any more after being consolidated.
Releasing it immediately after use helps reduce the memory usage.
2019-12-09 22:45:16 -05:00
Richard Sanger
7719bb9f29 Fix: backup to shared drive root
Allows writing to the drive root using:
gcd://driveid@ or gcd://driveid@/

To write to the root of the default user's drive use the special
shared drive named 'root':
gcd://root@/
2019-11-27 00:00:40 +13:00
Gilbert Chen
504d07bd51 Bump version to 2.3.0 2019-11-25 15:45:41 -05:00
Gilbert Chen
0abb4099f6 Fixed test errors -- parse test flags in one place 2019-11-25 15:44:03 -05:00
Gilbert Chen
694494ea54 Throw an error, instead of a warning, if pre/post script fails 2019-11-24 22:38:29 -05:00
Gilbert Chen
165152493c For the check command, -tabular should imply -all just like -stats 2019-11-24 20:45:05 -05:00
Gilbert Chen
e02041f4ed Increase the number of retries for the b2 backend from 10 to 15
Retrying 10 times means a retry window of about 5 minutes, which might be too
short.  15 corresponds to about 10 minutes.
2019-11-23 15:28:03 -05:00
Gilbert Chen
a99f059b52 Allow a custom location for the filters file
You can now add a key 'filters' in the preferences file that points to the
path of the filters file.  If this key is not found in the preferences,
the default location '.duplicacy/filters' is used.

There is a new option '-filters' for the set command that set this key in
the preferences, but you can also edit the file directly.
2019-11-23 15:23:26 -05:00
Gilbert Chen
f022a6f684 Fixed build errors in tests 2019-11-22 21:17:17 -05:00
Gilbert Chen
791c61eecb Fixed missing format parameters 2019-11-22 20:32:19 -05:00
gilbertchen
6ad27adaea Merge pull request #578 from gboudreau/vss-catalina
Bugfix: allow -vss usage on Mac OS Catalina
2019-11-22 16:46:31 -05:00
Gilbert Chen
9abfbe1ee0 Update pkg/sftp to 1.10.1
The old version has a bug where a connection closed by the server may cause
a deadlock due to a full channel buffer.
2019-11-21 23:36:17 -05:00
Gilbert Chen
b32c3b2cd5 If a symlink is a directory, match it against the patterns as a directory 2019-11-21 23:10:54 -05:00
Gilbert Chen
9baafdafa2 Remove a log message meant for debugging only 2019-11-21 21:23:31 -05:00
Gilbert Chen
ca7d927840 Use joinPath instead of filepath.Join to generate UNC paths
This fix isn't probably necessary since filepath.Join can now produce UNC
paths too with the latest versions of go.  However, we still want to keep
it for consistency.
2019-11-21 14:56:31 -05:00
Richard Sanger
426110e961 Adds support for GDrive Shared Drives
A shared drive can be accessed via
gcd://sharedDriveId@path/to/storage

sharedDriveId is optional and if omitted duplicacy stores to the user's drive.
This remains backwards compatible with existing drives. E.g.
gcd://path/to/storage

Note: Shared Drives were previously named Team Drives.
2019-11-06 00:51:12 +13:00
Guillaume Boudreau
0ca9cd476e Bugfix: allow -vss usage on Mac OS Catalina
Using `tmutil listlocalsnapshots` to find the snapshot name we need to use; fallback to `com.apple.TimeMachine.SNAPSHOT_DATE` (same as before) if we can't find it.
2019-10-28 11:55:15 -04:00
gilbertchen
abf9a94fc9 Merge pull request #575 from gilbertchen/rsa_encryption
Implement RSA encryption
2019-10-12 11:14:29 -04:00
Gilbert Chen
9a0d60ca84 Store the public key in the config to ensure one key policy.
Also make sure that RSA encrpytion works with the copy command.
2019-09-23 12:53:43 -04:00
Gilbert Chen
90833f9d86 Implement RSA encryption
This is to support public key encryption in the backup operation.  You can use
the -key option to supply the public key to the backup command, and then the
same option to supply the private key when restoring a previous revision.

The storage must be encrypted for this to work.
2019-09-20 14:19:18 -04:00
Gilbert Chen
58387c0951 Bump version to 2.2.3 2019-06-28 10:06:55 -04:00
gilbertchen
81bb188211 Merge pull request #570 from philband/fix-b2_findbucket_401
Bugfix [B2]: Add BucketName to API call in FindBucket function
2019-06-28 09:53:36 -04:00
Philipp Bandow
5821cad8c5 Add BucketName to API call in FindBucket function 2019-06-28 12:15:45 +02:00
Gilbert Chen
662805fbbd Update ACKNOWLEDGEMENTS.md 2019-06-25 22:59:22 -04:00
Gilbert Chen
fc35ddf7d1 Bump version to 2.2.2 2019-06-20 22:22:41 -04:00
gilbertchen
6efcd37c5c Merge pull request #562 from gilbertchen/azure_retry
Retry on broken pipe in Azure backend
2019-06-20 12:14:48 -04:00
gilbertchen
58558b8a2f Merge pull request #566 from TheBestPessimist/patch-1
Update the issue template
2019-06-20 12:14:08 -04:00
Gilbert Chen
045be3905b Better handling of B2 authorization failures
This commit fixed 2 issues wrt Backblaze B2 authorization:
* every thread may call b2_authorize_account at the same time when there
are 401 errors
* if B2 has a login outage, then all threads will call b2_authorize_account
repeatedly without delay

A simple solution is to limit one b2_authorize_account call to once every
30 second regardless of how many threads there are.  If the call to
b2_authorize_account is not allowed, the random exponential backoff will
be performed.
2019-06-13 22:43:07 -04:00
Gilbert Chen
4da7f7b6f9 Check -files may download a chunk multple times
This commit fixed a bug that caused 'check -files' to download the same chunk
multiple times if shared by multiple small files.
2019-06-13 14:47:21 -04:00
Gilbert Chen
41668d4bbd Update dependency github.com/gilbertchen/go.dbus 2019-06-07 15:17:46 -04:00
Gilbert Chen
9d4ac34f4b Don't compare hashes of empty files in the diff command
Empty files may or may not have a hash depending if the -hash option is used
during backup.
2019-06-06 12:35:34 -04:00
Gilbert Chen
eba5aa6eea Bump version to 2.2.1 2019-06-04 22:28:04 -04:00
Gilbert Chen
47c4c25d8b Fixed a bug that restoring files doesn't work due to missing parent directory
The root cause was path.Dir can't handle Windows paths that use \ as the
separator.
2019-06-04 21:57:10 -04:00
Gilbert Chen
37781f9540 Swtich CLI licensing to per-computer 2019-05-30 13:35:09 -04:00
TheBestPessimist
282fe4edd2 Update the issue template 2019-05-24 21:10:14 +03:00
TheBestPessimist
33c71ca5f8 Update the issue template
Use the new template format and ask people to use the forum **more thoroughly**.
2019-05-24 16:19:59 +03:00
Gilbert Chen
6e7d45caac Add a TRACE message when skipping a file to be restored 2019-05-22 12:03:21 -04:00
Gilbert Chen
8e9caea201 Retry on broken pipe in Azure backend
Azure sometimes disconnect the connection randomly when uploading files.  The
returned error was 'broken pipe' but this error is wrapped deep in multiple
levels of errors so we have to check the error string instead.
2019-05-07 22:35:51 -04:00
Gilbert Chen
18ba415f56 Bump version to 2.2.0 2019-05-06 12:26:40 -04:00
Gilbert Chen
458687d543 The cat command doesn't need to load the entire file into memory
It can print out the chunk as soon as a chunk is retrieved.  This avoids
reconstructing the file in the memory which can be an issue with large files.
2019-05-03 11:33:16 -04:00
Gilbert Chen
57a408a577 Rework the Backblaze B2 backend
* All APIs include UploadFile are done via the call() function
* New retry mechanism limiting the maximum backoff each time to 1 minute
* Add an env var DUPLICACY_B2_RETRIES to specify the number of retries
* Handle special/unicode characters in repositor ids
* Allow a directory in a bucket to be used as the storage destination
2019-04-30 23:31:57 -04:00
Gilbert Chen
a73ed462b6 Roll back the import path change 'import duplicacy/src'
Import paths are relative to $GOPATH and $GOROOT, so 'import duplicacy/src'
unfortunately doesn't work.
2019-04-27 22:01:17 -04:00
gilbertchen
e56efc1d3a Merge pull request #554 from arikorn/ask_b2_application_key
Request B2 "Backblaze Account or Application ID"
2019-04-27 10:55:12 -04:00
gilbertchen
bb58f42a37 Merge pull request #529 from turtleleo/patch-1
Retry on 408 error from Google Drive (Update to duplicacy_gcdstorage.go)
2019-04-27 10:53:26 -04:00
Thomas Tempelmann
22e8d9e60a Change the import from "github.com/gilbertchen/duplicacy/src" to "duplicacy/src" so that a forked project uses the forked "src" dir and not the original one. 2019-04-27 00:10:50 -04:00
Gilbert Chen
4eb174cec5 Remove a few util functions that aren't necessary 2019-04-26 23:47:25 -04:00
gilbertchen
6fd3fbd568 Merge pull request #514 from a-s-z-home/filter_extension
Filter extension: @ to include another file
2019-04-26 21:56:42 -04:00
Gilbert Chen
a6fe3d785e Fixed a MoveFile bug in Wasabi when the storage is at the root of a bucket
When the storage dir is empty, the destination path passed to the MOVE api starts
with a / which causes Wasabi to fail silently.
2019-04-24 16:48:25 -04:00
Gilbert Chen
1da151f9d9 Add an additional lookup for a chunk that isn't in the chunk list
A chunk not in the chunk list may actually exists in two scenarios:
* the chunk may be a special snapshot chunk that contains the chunk sequence,
  so it may be resurrected by the chunk downloader if it had been turned into
  a fossil before
* if the API to list all chunks doesn't return the complete list due to some
  bug

This additional lookup avoid reporting the missing chunk prematurely.
2019-04-21 20:32:21 -04:00
Gilbert Chen
4b69c1162e Fix a memory issue that check -tabular uses too much memory with many revisions
The call to GetSnapshotChunks in ShowStatisticsTabular sets keepChunkHashes to
true -- this can cause too much memory consumption with hundreds of revisions.
2019-04-20 22:47:03 -04:00
Gilbert Chen
abcb4d75c1 Fixed a bug where filenames starting with i or e are mistakenly interpreted as regex 2019-04-07 22:43:36 -04:00
Ari Kornfeld
10d2058738 Request B2 "Backblaze Account or Application ID" (rather than "Account ID")
fixes #539 (Duplicacy init for B2 storage still ask for account ID)
2019-04-02 22:20:29 -07:00
Gilbert Chen
43a5ffe011 Fixed a bug where a wrong variable is used as the number of threads 2019-03-13 15:38:26 -04:00
Gilbert Chen
d16273fe2b Set the content length for upload 2019-03-04 15:34:32 -05:00
Philipp Bandow
a55ac1b7ad Add option to use a ssh key signed with a certificate to authenticate 2019-02-28 01:37:14 +01:00
Gilbert Chen
2b56d576c7 Fixed a webdav compatibility issue with rclone and other bugs 2019-02-26 14:00:02 -05:00
turtleleo
82c6c15f1c Update duplicacy_gcdstorage.go
Add automatic retry on receiving error 408 (request timeout) from Google Drive.
2019-01-16 13:12:46 -05:00
gilbertchen
bebd7c4b77 Merge pull request #495 from plasticrake/environment-variables
Replace special characters in environment variable name with underscores
2019-01-04 17:04:29 -05:00
gilbertchen
46376d82ed Merge pull request #489 from gilbertchen/sftp_retry
Retry on EOF errors in the SFTP backend
2019-01-04 13:53:44 -05:00
gilbertchen
c4a3dd1eeb Merge pull request #454 from mikecook/master
spelling fix, go fmt, go vet
2019-01-04 13:50:17 -05:00
gilbertchen
31c25e98f7 Merge branch 'master' into master 2019-01-04 13:48:44 -05:00
gilbertchen
242db8377e Merge pull request #447 from s4y/patch-1
Acknowledge malware/spam warnings from GCD
2019-01-04 13:33:11 -05:00
Gilbert Chen
e6d8b7d070 Use 1024*1024 as 1M as opposed to 10^6 2019-01-04 13:29:30 -05:00
Gilbert Chen
bb652d0a8c Add a Sync call before close when uploading a file to local storage 2019-01-03 12:44:50 -05:00
Gilbert Chen
a354d03bc9 Remove a binary file accidentally checked in 2019-01-02 21:36:04 -05:00
Michael Cook
4b9524bd43 go vet: unreachable code 2018-12-29 13:20:11 +01:00
Michael Cook
a782d42ad6 go vet: result of fmt.Errorf call not used 2018-12-29 13:20:10 +01:00
Michael Cook
0762c448c4 gofmt -s 2018-12-29 13:20:10 +01:00
Michael Cook
741644b575 spelling 2018-12-29 13:04:40 +01:00
a-s-z-home
df7487cc0b Merge remote-tracking branch 'origin/master' into filter_extension 2018-11-15 01:40:39 +01:00
Gilbert Chen
8aa67c8162 Support ssh private key files encrypted by passphrases 2018-11-09 14:17:56 -05:00
Gilbert Chen
53548a895f Add the \?\ prefix to all paths on Windows 2018-11-08 21:29:02 -05:00
a-s-z-home
5e8baab4ec - Reverted changes to exclude mechanism of .duplicacy directory. 2018-11-05 22:39:11 +01:00
a-s-z-home
e1fa39008d Use new filter processing function for restore command.
- You can now include a filter file by using "@<filename>".
2018-11-05 00:59:39 +01:00
a-s-z-home
aaebf4510c - Replaced static check for .duplicacy directory with usage of predefined filters.
Do not "misuse" property nobackupFile to trigger this feature.
- Restructured ProcessFilterFile function and splitted it in smaller parts.
- Prepare usage of new filter syntax for arguments of restore command.
2018-11-05 00:32:12 +01:00
a-s-z-home
96dd28995b Added an include mechanism for filter file.
- Using @<filename>, you can now include other files. Relative paths are supported.
  This is useful, if you have several repositories with some different filters and a common filter base set.
2018-11-03 20:39:03 +01:00
a-s-z-home
166f6e6266 Added string array helper functions Contains and Find. 2018-11-03 20:20:00 +01:00
a-s-z-home
86c89f43a0 Automatically exclude .duplicacy directory only, if nobackup_file is not
set.
2018-11-03 20:13:43 +01:00
Patrick Seal
cce798ceac Replace special characters in environment variable name with underscores 2018-09-18 11:16:31 -07:00
Gilbert Chen
ab28115f95 Retry on EOF errors in the SFTP backend 2018-08-29 23:15:00 -04:00
Sidney San Martín
20172e07e6 Acknowledge malware/spam warnings from GCD
If Google thinks that a file is malware or spam (which can happen
spuriously to blobs of encrypted data), it will prevent the initial
download and return an error with reason "cannotDownloadAbusiveFile".
The API expects a program to prompt the user in this case and then,
optionally, let them bypass it.

Ideally duplicacy should prompt, but this patch just logs a warning.

When I printed `err.(*googleapi.Error)`, its `Errors` field was empty,
hence the sketchy string matching. It's possible that I did something
wrong, though.
2018-06-13 10:49:04 -07:00
46 changed files with 1798 additions and 762 deletions

View File

@@ -1,5 +1,17 @@
Please submit an issue for bug reports or feature requests. If you have any questions please post them on https://forum.duplicacy.com.
---
name: Please use the official forum
about: Please use the official forum instead of Github
title: 'Please use the official forum'
labels: ''
assignees: ''
When you're reporting a bug, please specify the OS, version, command line arguments, or any info that you think is helpful for the diagnosis. If Duplicacy reports an error, please post the program output here.
---
Note that this repository hosts the CLI version of Duplicacy only. If you're reporting anything related to the GUI version, please visit https://forum.duplicacy.com.
Please **use the [Duplicacy Forum](https://forum.duplicacy.com/)** when reporting bugs, making feature requests, asking for help or simply praising Duplicacy for its ease of use.
We strongly encourage you to create an account on the forum and use that platform for discussion as there is a higher chance that someone there will talk to you.
There is a handful of people watching the Github Issues and we are in the process of moving **all** of them to the forum as well. Most likely you will not receive an answer here or it will be very slow and you will be pointed to the forum.
We have already created a comprehensive [Guide](https://forum.duplicacy.com/t/duplicacy-user-guide/1197), and a [How-To](https://forum.duplicacy.com/c/how-to) category which stores more wisdom than these issues on Github.

View File

@@ -14,3 +14,4 @@ Duplicacy is based on the following open source projects:
|https://github.com/pcwizz/xattr | BSD-2-Clause |
|https://github.com/minio/blake2b-simd | Apache-2.0 |
|https://github.com/go-ole/go-ole | MIT |
https://github.com/ncw/swift | MIT |

139
Gopkg.lock generated
View File

@@ -7,17 +7,11 @@
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
version = "v0.16.0"
[[projects]]
name = "github.com/Azure/azure-sdk-for-go"
packages = ["version"]
revision = "b7fadebe0e7f5c5720986080a01495bd8d27be37"
version = "v14.2.0"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
revision = "0ae36a9e544696de46fdadb7b0d5fb38af48c063"
version = "v10.2.0"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date","logger","version"]
revision = "9bc4033dd347c7f416fca46b2f42a043dc1fbdf6"
version = "v10.15.5"
[[projects]]
branch = "master"
@@ -27,9 +21,9 @@
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/sts"]
revision = "a32b1dcd091264b5dee7b386149b6cc3823395c9"
version = "v1.12.31"
packages = ["aws","aws/arn","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/processcreds","aws/credentials/stscreds","aws/csm","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/context","internal/ini","internal/s3err","internal/sdkio","internal/sdkmath","internal/sdkrand","internal/sdkuri","internal/shareddefaults","internal/strings","internal/sync/singleflight","private/protocol","private/protocol/eventstream","private/protocol/eventstream/eventstreamapi","private/protocol/json/jsonutil","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/internal/arn","service/sts","service/sts/stsiface"]
revision = "851d5ffb66720c2540cc68020d4d8708950686c8"
version = "v1.30.7"
[[projects]]
name = "github.com/bkaradzic/go-lz4"
@@ -40,14 +34,14 @@
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29"
version = "v3.1.0"
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/azure-sdk-for-go"
packages = ["storage"]
revision = "bbf89bd4d716c184f158d1e1428c2dbef4a18307"
packages = ["storage","version"]
revision = "8fd4663cab7c7c1c46d00449291c92ad23b0d0d9"
[[projects]]
branch = "master"
@@ -59,7 +53,7 @@
branch = "master"
name = "github.com/gilbertchen/go-dropbox"
packages = ["."]
revision = "90711b603312b1f973f3a5da3793ac4f1e5c2f2a"
revision = "994e692c5061cefa14e4296600a773de7119aa15"
[[projects]]
name = "github.com/gilbertchen/go-ole"
@@ -71,7 +65,7 @@
branch = "master"
name = "github.com/gilbertchen/go.dbus"
packages = ["."]
revision = "9e442e6378618c083fd3b85b703ffd202721fb17"
revision = "8591994fa32f1dbe3fa9486bc6f4d4361ac16649"
[[projects]]
branch = "master"
@@ -98,33 +92,33 @@
revision = "68e7a6806b0137a396d7d05601d7403ae1abac58"
[[projects]]
name = "github.com/go-ini/ini"
packages = ["."]
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
version = "v1.32.0"
branch = "master"
name = "github.com/golang/groupcache"
packages = ["lru"]
revision = "8c9f03a8e57eb486e42badaed3fb287da51807ba"
[[projects]]
branch = "master"
name = "github.com/golang/protobuf"
packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
packages = ["proto","protoc-gen-go","protoc-gen-go/descriptor","protoc-gen-go/generator","protoc-gen-go/generator/internal/remap","protoc-gen-go/grpc","protoc-gen-go/plugin","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
revision = "84668698ea25b64748563aa20726db66a6b8d299"
version = "v1.3.5"
[[projects]]
name = "github.com/googleapis/gax-go"
packages = ["."]
revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f"
version = "v2.0.0"
packages = [".","v2"]
revision = "c8a15bac9b9fe955bd9f900272f9a306465d28cf"
version = "v2.0.3"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "0b12d6b5"
revision = "c2b33e84"
[[projects]]
branch = "master"
name = "github.com/kr/fs"
packages = ["."]
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
version = "v0.1.0"
[[projects]]
name = "github.com/marstr/guid"
@@ -139,22 +133,22 @@
revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4"
[[projects]]
branch = "master"
name = "github.com/ncw/swift"
packages = ["."]
revision = "ae9f0ea1605b9aa6434ed5c731ca35d83ba67c55"
revision = "3e1a09f21340e4828e7265aa89f4dc1495fa7ccc"
version = "v1.0.50"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
revision = "614d223910a179a466c1767a985424175c39b465"
version = "v0.9.1"
[[projects]]
name = "github.com/pkg/sftp"
packages = ["."]
revision = "98203f5a8333288eb3163b7c667d4260fe1333e9"
version = "1.0.0"
revision = "5616182052227b951e76d9c9b79a616c608bd91b"
version = "v1.11.0"
[[projects]]
name = "github.com/satori/go.uuid"
@@ -168,63 +162,92 @@
packages = ["."]
revision = "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
[[projects]]
name = "go.opencensus.io"
packages = [".","internal","internal/tagencoding","metric/metricdata","metric/metricproducer","plugin/ochttp","plugin/ochttp/propagation/b3","resource","stats","stats/internal","stats/view","tag","trace","trace/internal","trace/propagation","trace/tracestate"]
revision = "d835ff86be02193d324330acdb7d65546b05f814"
version = "v0.22.3"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","pbkdf2","ssh","ssh/agent","ssh/terminal"]
revision = "9f005a07e0d31d45e6656d241bb5c0f2efd4bc94"
packages = ["blowfish","chacha20","curve25519","ed25519","ed25519/internal/edwards25519","internal/subtle","pbkdf2","poly1305","ssh","ssh/agent","ssh/internal/bcrypt_pbkdf","ssh/terminal"]
revision = "056763e48d71961566155f089ac0f02f1dda9b5a"
[[projects]]
branch = "master"
name = "golang.org/x/exp"
packages = ["apidiff","cmd/apidiff"]
revision = "e8c3332aa8e5b8e6acb4707c3a7e5979052b20aa"
[[projects]]
name = "golang.org/x/mod"
packages = ["module","semver"]
revision = "ed3ec21bb8e252814c380df79a80f366440ddb2d"
version = "v0.2.0"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
revision = "9dfe39835686865bff950a07b394c12a98ddc811"
packages = ["context","context/ctxhttp","http/httpguts","http2","http2/hpack","idna","internal/timeseries","trace"]
revision = "d3edc9973b7eb1fb302b0ff2c62357091cea9a30"
[[projects]]
branch = "master"
name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"]
revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28"
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix","windows"]
revision = "82aafbf43bf885069dc71b7e7c2f9d7a614d47da"
packages = ["cpu","unix","windows"]
revision = "59c9f1ba88faf592b225274f69c5ef1e4ebacf82"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
revision = "88f656faf3f37f690df1a32515b479415e1a6769"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/language","internal/language/compact","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
version = "v0.3.2"
[[projects]]
branch = "master"
name = "golang.org/x/tools"
packages = ["cmd/goimports","go/ast/astutil","go/gcexportdata","go/internal/gcimporter","go/internal/packagesdriver","go/packages","go/types/typeutil","internal/fastwalk","internal/gocommand","internal/gopathwalk","internal/imports","internal/packagesinternal","internal/telemetry/event"]
revision = "700752c244080ed7ef6a61c3cfd73382cd334e57"
[[projects]]
branch = "master"
name = "golang.org/x/xerrors"
packages = [".","internal"]
revision = "9bdfabe68543c54f90421aeb9a60ef8061b5b544"
[[projects]]
name = "google.golang.org/api"
packages = ["drive/v3","gensupport","googleapi","googleapi/internal/uritemplates","googleapi/transport","internal","iterator","option","storage/v1","transport/http"]
revision = "17b5f22a248d6d3913171c1a557552ace0d9c806"
packages = ["drive/v3","googleapi","googleapi/transport","internal","internal/gensupport","internal/third_party/uritemplates","iterator","option","option/internaloption","storage/v1","transport/cert","transport/http","transport/http/internal/propagation"]
revision = "52f0532eadbcc6f6b82d6f5edf66e610d10bfde6"
version = "v0.21.0"
[[projects]]
name = "google.golang.org/appengine"
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
version = "v1.0.0"
revision = "971852bfffca25b069c31162ae8f247a3dba083b"
version = "v1.6.5"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status"]
revision = "891aceb7c239e72692819142dfca057bdcbfcb96"
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status","googleapis/type/expr"]
revision = "baae70f3302d3efdff74db41e48a5d476d036906"
[[projects]]
name = "google.golang.org/grpc"
packages = [".","balancer","balancer/roundrobin","codes","connectivity","credentials","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
revision = "5a9f7b402fe85096d2e1d0383435ee1876e863d0"
version = "v1.8.0"
packages = [".","attributes","backoff","balancer","balancer/base","balancer/roundrobin","binarylog/grpc_binarylog_v1","codes","connectivity","credentials","credentials/internal","encoding","encoding/proto","grpclog","internal","internal/backoff","internal/balancerload","internal/binarylog","internal/buffer","internal/channelz","internal/envconfig","internal/grpclog","internal/grpcrand","internal/grpcsync","internal/grpcutil","internal/resolver/dns","internal/resolver/passthrough","internal/syscall","internal/transport","keepalive","metadata","naming","peer","resolver","serviceconfig","stats","status","tap"]
revision = "ac54eec90516cee50fc6b9b113b34628a85f976f"
version = "v1.28.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "eff5ae2d9507f0d62cd2e5bdedebb5c59d64f70f476b087c01c35d4a5e1be72d"
inputs-digest = "e124cf64f7f8770e51ae52ac89030d512da946e3fdc2666ebd3a604a624dd679"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -31,7 +31,7 @@
[[constraint]]
name = "github.com/aws/aws-sdk-go"
version = "1.12.31"
version = "1.30.7"
[[constraint]]
name = "github.com/bkaradzic/go-lz4"
@@ -75,7 +75,7 @@
[[constraint]]
name = "github.com/pkg/sftp"
version = "1.0.0"
version = "1.10.1"
[[constraint]]
branch = "master"
@@ -86,9 +86,13 @@
name = "golang.org/x/net"
[[constraint]]
branch = "master"
name = "golang.org/x/oauth2"
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
[[constraint]]
branch = "master"
name = "google.golang.org/api"
version = "0.21.0"
[[constraint]]
name = "google.golang.org/grpc"
version = "1.28.0"

View File

@@ -1,8 +1,7 @@
Copyright © 2017 Acrosync LLC
* Free for personal use or commercial trial
* Non-trial commercial use requires per-user CLI licenses available from [duplicacy.com](https://duplicacy.com/buy) at a cost of $20 per year
* A user is defined as the computer account that creates or edits the files to be backed up; if a backup contains files created or edited by multiple users for commercial purposes, one CLI license is required for each user
* Non-trial commercial use requires per-computer CLI licenses available from [duplicacy.com](https://duplicacy.com/buy.html) at a cost of $50 per year
* The computer with a valid commercial license for the GUI version may run the CLI version without a CLI license
* CLI licenses are not required to restore or manage backups; only the backup command requires valid CLI licenses
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license

View File

@@ -90,8 +90,7 @@ The following table compares the feature lists of all these backup tools:
## License
* Free for personal use or commercial trial
* Non-trial commercial use requires per-user CLI licenses available from [duplicacy.com](https://duplicacy.com/buy) at a cost of $20 per year
* A user is defined as the computer account that creates or edits the files to be backed up; if a backup contains files created or edited by multiple users for commercial purposes, one CLI license is required for each user
* Non-trial commercial use requires per-computer CLI licenses available from [duplicacy.com](https://duplicacy.com/buy.html) at a cost of $50 per year
* The computer with a valid commercial license for the GUI version may run the CLI version without a CLI license
* CLI licenses are not required to restore or manage backups; only the backup command requires valid CLI licenses
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license

View File

@@ -7,6 +7,7 @@ package main
import (
"encoding/json"
"fmt"
"net/http"
"os"
"os/exec"
"os/signal"
@@ -16,7 +17,6 @@ import (
"runtime"
"strconv"
"strings"
"net/http"
_ "net/http/pprof"
@@ -159,7 +159,9 @@ func setGlobalOptions(context *cli.Context) {
}()
}
for _, logID := range context.GlobalStringSlice("suppress") {
duplicacy.SuppressLog(logID)
}
duplicacy.RunInBackground = context.GlobalBool("background")
}
@@ -203,13 +205,24 @@ func runScript(context *cli.Context, storageName string, phase string) bool {
}
if err != nil {
duplicacy.LOG_WARN("SCRIPT_ERROR", "Failed to run script: %v", err)
duplicacy.LOG_ERROR("SCRIPT_ERROR", "Failed to run %s script: %v", script, err)
return false
}
return true
}
func loadRSAPrivateKey(keyFile string, preference *duplicacy.Preference, backupManager *duplicacy.BackupManager, resetPasswords bool) {
if keyFile == "" {
return
}
prompt := fmt.Sprintf("Enter the passphrase for %s:", keyFile)
passphrase := duplicacy.GetPassword(*preference, "rsa_passphrase", prompt, false, resetPasswords)
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
duplicacy.SavePassword(*preference, "rsa_passphrase", passphrase)
}
func initRepository(context *cli.Context) {
configRepository(context, true)
}
@@ -309,11 +322,11 @@ func configRepository(context *cli.Context, init bool) {
repositoryPath = context.String("repository")
}
preference := duplicacy.Preference{
Name: storageName,
SnapshotID: snapshotID,
Name: storageName,
SnapshotID: snapshotID,
RepositoryPath: repositoryPath,
StorageURL: storageURL,
Encrypted: context.Bool("encrypt"),
StorageURL: storageURL,
Encrypted: context.Bool("encrypt"),
}
storage := duplicacy.CreateStorage(preference, true, 1)
@@ -321,6 +334,11 @@ func configRepository(context *cli.Context, init bool) {
if preference.Encrypted {
prompt := fmt.Sprintf("Enter storage password for %s:", preference.StorageURL)
storagePassword = duplicacy.GetPassword(preference, "password", prompt, false, true)
} else {
if context.String("key") != "" {
duplicacy.LOG_ERROR("STORAGE_CONFIG", "RSA encryption can't be enabled with an unencrypted storage")
return
}
}
existingConfig, _, err := duplicacy.DownloadConfig(storage, storagePassword)
@@ -436,7 +454,7 @@ func configRepository(context *cli.Context, init bool) {
iterations = duplicacy.CONFIG_DEFAULT_ITERATIONS
}
duplicacy.ConfigStorage(storage, iterations, compressionLevel, averageChunkSize, maximumChunkSize,
minimumChunkSize, storagePassword, otherConfig, bitCopy)
minimumChunkSize, storagePassword, otherConfig, bitCopy, context.String("key"))
}
duplicacy.Preferences = append(duplicacy.Preferences, preference)
@@ -533,8 +551,14 @@ func setPreference(context *cli.Context) {
if triBool.IsSet() {
newPreference.DoNotSavePassword = triBool.IsTrue()
}
newPreference.NobackupFile = context.String("nobackup-file")
if context.String("nobackup-file") != "" {
newPreference.NobackupFile = context.String("nobackup-file")
}
if context.String("filters") != "" {
newPreference.FiltersFile = context.String("filters")
}
key := context.String("key")
value := context.String("value")
@@ -650,7 +674,7 @@ func changePassword(context *cli.Context) {
duplicacy.LOG_INFO("CONFIG_CLEAN", "The local copy of the old config has been removed")
}
}
} ()
}()
err = storage.DeleteFile(0, "config")
if err != nil {
@@ -717,7 +741,7 @@ func backupRepository(context *cli.Context) {
uploadRateLimit := context.Int("limit-rate")
enumOnly := context.Bool("enum-only")
storage.SetRateLimits(0, uploadRateLimit)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
@@ -784,31 +808,21 @@ func restoreRepository(context *cli.Context) {
pattern = pattern[1:]
}
if duplicacy.IsUnspecifiedFilter(pattern) {
pattern = "+" + pattern
}
if duplicacy.IsEmptyFilter(pattern) {
continue
}
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
valid, err := duplicacy.IsValidRegex(pattern[2:])
if !valid || err != nil {
duplicacy.LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
}
}
patterns = append(patterns, pattern)
}
patterns = duplicacy.ProcessFilterLines(patterns, make([]string, 0))
duplicacy.LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(duplicacy.RegexMap))
duplicacy.LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
storage.SetRateLimits(context.Int("limit-rate"), 0)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile)
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, setOwner, showStatistics, patterns)
@@ -846,7 +860,7 @@ func listSnapshots(context *cli.Context) {
tag := context.String("t")
revisions := getRevisions(context)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
id := preference.SnapshotID
@@ -859,6 +873,9 @@ func listSnapshots(context *cli.Context) {
showFiles := context.Bool("files")
showChunks := context.Bool("chunks")
// list doesn't need to decrypt file chunks; but we need -key here so we can reset the passphrase for the private key
loadRSAPrivateKey(context.String("key"), preference, backupManager, resetPassword)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.ListSnapshots(id, revisions, tag, showFiles, showChunks)
@@ -881,7 +898,12 @@ func checkSnapshots(context *cli.Context) {
runScript(context, preference.Name, "pre")
storage := duplicacy.CreateStorage(*preference, false, 1)
threads := context.Int("threads")
if threads < 1 {
threads = 1
}
storage := duplicacy.CreateStorage(*preference, false, threads)
if storage == nil {
return
}
@@ -894,9 +916,11 @@ func checkSnapshots(context *cli.Context) {
tag := context.String("t")
revisions := getRevisions(context)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
id := preference.SnapshotID
if context.Bool("all") {
id = ""
@@ -907,11 +931,12 @@ func checkSnapshots(context *cli.Context) {
showStatistics := context.Bool("stats")
showTabular := context.Bool("tabular")
checkFiles := context.Bool("files")
checkChunks := context.Bool("chunks")
searchFossils := context.Bool("fossils")
resurrect := context.Bool("resurrect")
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, checkChunks, searchFossils, resurrect, threads)
runScript(context, preference.Name, "post")
}
@@ -949,9 +974,11 @@ func printFile(context *cli.Context) {
snapshotID = context.String("id")
}
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
backupManager.SetupSnapshotCache(preference.Name)
file := ""
@@ -1005,11 +1032,13 @@ func diff(context *cli.Context) {
}
compareByHash := context.Bool("hash")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile)
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile, preference.FiltersFile)
runScript(context, preference.Name, "post")
}
@@ -1048,7 +1077,7 @@ func showHistory(context *cli.Context) {
revisions := getRevisions(context)
showLocalHash := context.Bool("hash")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
@@ -1111,7 +1140,7 @@ func pruneSnapshots(context *cli.Context) {
os.Exit(ArgumentExitCode)
}
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
@@ -1151,10 +1180,12 @@ func copySnapshots(context *cli.Context) {
sourcePassword = duplicacy.GetPassword(*source, "password", "Enter source storage password:", false, false)
}
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, source.NobackupFile)
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, "", "")
sourceManager.SetupSnapshotCache(source.Name)
duplicacy.SavePassword(*source, "password", sourcePassword)
loadRSAPrivateKey(context.String("key"), source, sourceManager, false)
_, destination := getRepositoryPreference(context, context.String("to"))
if destination.Name == source.Name {
@@ -1184,7 +1215,7 @@ func copySnapshots(context *cli.Context) {
destinationStorage.SetRateLimits(0, context.Int("upload-limit-rate"))
destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository,
destinationPassword, destination.NobackupFile)
destinationPassword, "", "")
duplicacy.SavePassword(*destination, "password", destinationPassword)
destinationManager.SetupSnapshotCache(destination.Name)
@@ -1262,7 +1293,7 @@ func infoStorage(context *cli.Context) {
for _, dir := range dirs {
if len(dir) > 0 && dir[len(dir)-1] == '/' {
duplicacy.LOG_INFO("STORAGE_SNAPSHOT", "%s", dir[0:len(dir) - 1])
duplicacy.LOG_INFO("STORAGE_SNAPSHOT", "%s", dir[0:len(dir)-1])
}
}
@@ -1298,7 +1329,7 @@ func benchmark(context *cli.Context) {
}
threads := downloadThreads
if (threads < uploadThreads) {
if threads < uploadThreads {
threads = uploadThreads
}
@@ -1309,7 +1340,7 @@ func benchmark(context *cli.Context) {
if storage == nil {
return
}
duplicacy.Benchmark(repository, storage, int64(fileSize) * 1000000, chunkSize * 1024 * 1024, chunkCount, uploadThreads, downloadThreads)
duplicacy.Benchmark(repository, storage, int64(fileSize) * 1024 * 1024, chunkSize * 1024 * 1024, chunkCount, uploadThreads, downloadThreads)
}
func main() {
@@ -1362,6 +1393,11 @@ func main() {
Usage: "initialize a new repository at the specified path rather than the current working directory",
Argument: "<path>",
},
cli.StringFlag{
Name: "key",
Usage: "the RSA public key to encrypt file chunks",
Argument: "<public key>",
},
},
Usage: "Initialize the storage if necessary and the current directory as the repository",
ArgsUsage: "<snapshot id> <storage url>",
@@ -1469,6 +1505,11 @@ func main() {
Usage: "restore from the specified storage instead of the default one",
Argument: "<storage name>",
},
cli.StringFlag{
Name: "key",
Usage: "the RSA private key to decrypt file chunks",
Argument: "<private key>",
},
},
Usage: "Restore the repository to a previously saved snapshot",
ArgsUsage: "[--] [pattern] ...",
@@ -1514,6 +1555,11 @@ func main() {
Usage: "retrieve snapshots from the specified storage",
Argument: "<storage name>",
},
cli.StringFlag{
Name: "key",
Usage: "the RSA private key to decrypt file chunks",
Argument: "<private key>",
},
},
Usage: "List snapshots",
ArgsUsage: " ",
@@ -1553,6 +1599,10 @@ func main() {
Name: "files",
Usage: "verify the integrity of every file",
},
cli.BoolFlag{
Name: "chunks",
Usage: "verify the integrity of every chunk",
},
cli.BoolFlag{
Name: "stats",
Usage: "show deduplication statistics (imply -all and all revisions)",
@@ -1566,6 +1616,17 @@ func main() {
Usage: "retrieve snapshots from the specified storage",
Argument: "<storage name>",
},
cli.StringFlag{
Name: "key",
Usage: "the RSA private key to decrypt file chunks",
Argument: "<private key>",
},
cli.IntFlag{
Name: "threads",
Value: 1,
Usage: "number of threads used to verify chunks",
Argument: "<n>",
},
},
Usage: "Check the integrity of snapshots",
ArgsUsage: " ",
@@ -1589,6 +1650,11 @@ func main() {
Usage: "retrieve the file from the specified storage",
Argument: "<storage name>",
},
cli.StringFlag{
Name: "key",
Usage: "the RSA private key to decrypt file chunks",
Argument: "<private key>",
},
},
Usage: "Print to stdout the specified file, or the snapshot content if no file is specified",
ArgsUsage: "[<file>]",
@@ -1617,6 +1683,11 @@ func main() {
Usage: "retrieve files from the specified storage",
Argument: "<storage name>",
},
cli.StringFlag{
Name: "key",
Usage: "the RSA private key to decrypt file chunks",
Argument: "<private key>",
},
},
Usage: "Compare two snapshots or two revisions of a file",
ArgsUsage: "[<file>]",
@@ -1773,14 +1844,19 @@ func main() {
Argument: "<storage name>",
},
cli.BoolFlag{
Name: "bit-identical",
Usage: "(when using -copy) make the new storage bit-identical to also allow rsync etc.",
Name: "bit-identical",
Usage: "(when using -copy) make the new storage bit-identical to also allow rsync etc.",
},
cli.StringFlag{
Name: "repository",
Usage: "specify the path of the repository (instead of the current working directory)",
Argument: "<path>",
},
cli.StringFlag{
Name: "key",
Usage: "the RSA public key to encrypt file chunks",
Argument: "<public key>",
},
},
Usage: "Add an additional storage to be used for the existing repository",
ArgsUsage: "<storage name> <snapshot id> <storage url>",
@@ -1815,10 +1891,10 @@ func main() {
Arg: "true",
},
cli.StringFlag{
Name: "nobackup-file",
Usage: "Directories containing a file with this name will not be backed up",
Name: "nobackup-file",
Usage: "Directories containing a file with this name will not be backed up",
Argument: "<file name>",
Value: "",
Value: "",
},
cli.StringFlag{
Name: "key",
@@ -1833,6 +1909,11 @@ func main() {
Usage: "use the specified storage instead of the default one",
Argument: "<storage name>",
},
cli.StringFlag{
Name: "filters",
Usage: "specify the path of the filters file containing include/exclude patterns",
Argument: "<file path>",
},
},
Usage: "Change the options for the default or specified storage",
ArgsUsage: " ",
@@ -1879,6 +1960,11 @@ func main() {
Usage: "number of uploading threads",
Argument: "<n>",
},
cli.StringFlag{
Name: "key",
Usage: "the RSA private key to decrypt file chunks from the source storage",
Argument: "<private key>",
},
},
Usage: "Copy snapshots between compatible storages",
ArgsUsage: " ",
@@ -1984,8 +2070,13 @@ func main() {
Argument: "<address:port>",
},
cli.StringFlag{
Name: "comment",
Usage: "add a comment to identify the process",
Name: "comment",
Usage: "add a comment to identify the process",
},
cli.StringSliceFlag{
Name: "suppress, s",
Usage: "suppress logs with the specified id",
Argument: "<id>",
},
}
@@ -1993,13 +2084,13 @@ func main() {
app.Name = "duplicacy"
app.HelpName = "duplicacy"
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
app.Version = "2.1.2" + " (" + GitCommit + ")"
app.Version = "2.5.1" + " (" + GitCommit + ")"
// If the program is interrupted, call the RunAtError function.
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
for _ = range c {
for range c {
duplicacy.RunAtError()
os.Exit(1)
}

View File

@@ -104,7 +104,7 @@ func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files
if dir == "snapshots/" {
for subDir, _ := range subDirs {
for subDir := range subDirs {
files = append(files, subDir)
}
@@ -166,9 +166,21 @@ func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chun
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.containers))
blob := storage.containers[threadIndex].GetBlobReference(filePath)
return blob.CreateBlockBlobFromReader(reader, nil)
tries := 0
for {
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.containers))
blob := storage.containers[threadIndex].GetBlobReference(filePath)
err = blob.CreateBlockBlobFromReader(reader, nil)
if err == nil || !strings.Contains(err.Error(), "write: broken pipe") || tries >= 3 {
return err
}
LOG_INFO("AZURE_RETRY", "Connection unexpectedly terminated: %v; retrying", err)
tries++
}
}

View File

@@ -5,19 +5,22 @@
package duplicacy
import (
"bytes"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"fmt"
"bytes"
"time"
"sync"
"strconv"
"strings"
"time"
"net/url"
"net/http"
"math/rand"
"io/ioutil"
"crypto/sha1"
"encoding/hex"
"encoding/json"
"encoding/base64"
)
type B2Error struct {
@@ -39,67 +42,115 @@ var B2AuthorizationURL = "https://api.backblazeb2.com/b2api/v1/b2_authorize_acco
type B2Client struct {
HTTPClient *http.Client
AccountID string
ApplicationKeyID string
ApplicationKey string
BucketName string
BucketID string
StorageDir string
Lock sync.Mutex
AuthorizationToken string
APIURL string
DownloadURL string
BucketName string
BucketID string
IsAuthorized bool
UploadURL string
UploadToken string
UploadURLs []string
UploadTokens []string
TestMode bool
Threads int
MaximumRetries int
TestMode bool
LastAuthorizationTime int64
}
func NewB2Client(applicationKeyID string, applicationKey string) *B2Client {
// URL encode the given path but keep the slashes intact
func B2Escape(path string) string {
var components []string
for _, c := range strings.Split(path, "/") {
components = append(components, url.QueryEscape(c))
}
return strings.Join(components, "/")
}
func NewB2Client(applicationKeyID string, applicationKey string, downloadURL string, storageDir string, threads int) *B2Client {
for storageDir != "" && storageDir[0] == '/' {
storageDir = storageDir[1:]
}
if storageDir != "" && storageDir[len(storageDir) - 1] != '/' {
storageDir += "/"
}
maximumRetries := 15
if value, found := os.LookupEnv("DUPLICACY_B2_RETRIES"); found && value != "" {
maximumRetries, _ = strconv.Atoi(value)
LOG_INFO("B2_RETRIES", "Setting maximum retries for B2 to %d", maximumRetries)
}
client := &B2Client{
HTTPClient: http.DefaultClient,
ApplicationKeyID: applicationKeyID,
ApplicationKey: applicationKey,
DownloadURL: downloadURL,
StorageDir: storageDir,
UploadURLs: make([]string, threads),
UploadTokens: make([]string, threads),
Threads: threads,
MaximumRetries: maximumRetries,
}
return client
}
func (client *B2Client) retry(backoff int, response *http.Response) int {
func (client *B2Client) getAPIURL() string {
client.Lock.Lock()
defer client.Lock.Unlock()
return client.APIURL
}
func (client *B2Client) getDownloadURL() string {
client.Lock.Lock()
defer client.Lock.Unlock()
return client.DownloadURL
}
func (client *B2Client) retry(retries int, response *http.Response) int {
if response != nil {
if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 {
retryAfter, _ := strconv.Atoi(backoffList[0])
if retryAfter >= 1 {
time.Sleep(time.Duration(retryAfter) * time.Second)
return 0
return 1
}
}
}
if backoff == 0 {
backoff = 1
} else {
backoff *= 2
if retries >= client.MaximumRetries + 1 {
return 0
}
time.Sleep(time.Duration(backoff) * time.Second)
return backoff
retries++
delay := 1 << uint(retries)
if delay > 64 {
delay = 64
}
delayInSeconds := (rand.Float32() + 1.0) * float32(delay) / 2.0
time.Sleep(time.Duration(delayInSeconds) * time.Second)
return retries
}
func (client *B2Client) call(url string, method string, requestHeaders map[string]string, input interface{}) (io.ReadCloser, http.Header, int64, error) {
switch method {
case http.MethodGet:
break
case http.MethodHead:
break
case http.MethodPost:
break
default:
return nil, nil, 0, fmt.Errorf("unhandled http request method: " + method)
}
func (client *B2Client) call(threadIndex int, requestURL string, method string, requestHeaders map[string]string, input interface{}) (
io.ReadCloser, http.Header, int64, error) {
var response *http.Response
backoff := 0
for i := 0; i < 8; i++ {
var inputReader *bytes.Reader
retries := 0
for {
var inputReader io.Reader
isUpload := false
switch input.(type) {
default:
@@ -108,21 +159,43 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
return nil, nil, 0, err
}
inputReader = bytes.NewReader(jsonInput)
case []byte:
inputReader = bytes.NewReader(input.([]byte))
case int:
inputReader = bytes.NewReader([]byte(""))
case []byte:
isUpload = true
inputReader = bytes.NewReader(input.([]byte))
case *RateLimitedReader:
isUpload = true
rateLimitedReader := input.(*RateLimitedReader)
rateLimitedReader.Reset()
inputReader = rateLimitedReader
}
request, err := http.NewRequest(method, url, inputReader)
if isUpload {
if client.UploadURLs[threadIndex] == "" || client.UploadTokens[threadIndex] == "" {
err := client.getUploadURL(threadIndex)
if err != nil {
return nil, nil, 0, err
}
}
requestURL = client.UploadURLs[threadIndex]
}
request, err := http.NewRequest(method, requestURL, inputReader)
if err != nil {
return nil, nil, 0, err
}
if url == B2AuthorizationURL {
if requestURL == B2AuthorizationURL {
request.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(client.ApplicationKeyID+":"+client.ApplicationKey)))
} else if isUpload {
request.ContentLength, _ = strconv.ParseInt(requestHeaders["Content-Length"], 10, 64)
request.Header.Set("Authorization", client.UploadTokens[threadIndex])
} else {
client.Lock.Lock()
request.Header.Set("Authorization", client.AuthorizationToken)
client.Lock.Unlock()
}
if requestHeaders != nil {
@@ -133,7 +206,9 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
if client.TestMode {
r := rand.Float32()
if r < 0.5 {
if r < 0.5 && isUpload {
request.Header.Set("X-Bz-Test-Mode", "fail_some_uploads")
} else if r < 0.75 {
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
} else {
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
@@ -142,28 +217,51 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
response, err = client.HTTPClient.Do(request)
if err != nil {
if url != B2AuthorizationURL {
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned an error: %v", url, err)
backoff = client.retry(backoff, response)
continue
// Don't retry when the first authorization request fails
if requestURL == B2AuthorizationURL && !client.IsAuthorized {
return nil, nil, 0, err
}
return nil, nil, 0, err
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s' returned an error: %v", threadIndex, requestURL, err)
retries = client.retry(retries, response)
if retries <= 0 {
return nil, nil, 0, err
}
// Clear the upload url to requrest a new one on retry
if isUpload {
client.UploadURLs[threadIndex] = ""
client.UploadTokens[threadIndex] = ""
}
continue
}
if response.StatusCode < 300 {
return response.Body, response.Header, response.ContentLength, nil
}
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s %s' returned status code %d", method, url, response.StatusCode)
e := &B2Error{}
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s %s' returned status code %d", threadIndex, method, requestURL, response.StatusCode)
} else {
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s %s' returned %d %s", threadIndex, method, requestURL, response.StatusCode, e.Message)
}
io.Copy(ioutil.Discard, response.Body)
response.Body.Close()
if response.StatusCode == 401 {
if url == B2AuthorizationURL {
if requestURL == B2AuthorizationURL {
return nil, nil, 0, fmt.Errorf("Authorization failure")
}
client.AuthorizeAccount()
continue
// Attempt authorization again. If authorization is actually not done, run the random backoff
_, allowed := client.AuthorizeAccount(threadIndex)
if allowed {
continue
}
} else if response.StatusCode == 403 {
if !client.TestMode {
return nil, nil, 0, fmt.Errorf("B2 cap exceeded")
@@ -176,32 +274,21 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
} else if response.StatusCode == 416 {
if http.MethodHead == method {
// 416 Requested Range Not Satisfiable
return nil, nil, 0, fmt.Errorf("URL request '%s' returned status code %d", url, response.StatusCode)
return nil, nil, 0, fmt.Errorf("URL request '%s' returned %d %s", requestURL, response.StatusCode, e.Message)
}
} else if response.StatusCode == 429 || response.StatusCode == 408 {
backoff = client.retry(backoff, response)
continue
} else if response.StatusCode >= 500 && response.StatusCode <= 599 {
backoff = client.retry(backoff, response)
continue
} else {
LOG_INFO("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
backoff = client.retry(backoff, response)
continue
}
defer response.Body.Close()
e := &B2Error{}
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
return nil, nil, 0, err
retries = client.retry(retries, response)
if retries <= 0 {
return nil, nil, 0, fmt.Errorf("URL request '%s' returned %d %s", requestURL, response.StatusCode, e.Message)
}
return nil, nil, 0, e
if isUpload {
client.UploadURLs[threadIndex] = ""
client.UploadTokens[threadIndex] = ""
}
}
return nil, nil, 0, fmt.Errorf("Maximum backoff reached")
}
type B2AuthorizeAccountOutput struct {
@@ -211,11 +298,18 @@ type B2AuthorizeAccountOutput struct {
DownloadURL string
}
func (client *B2Client) AuthorizeAccount() (err error) {
func (client *B2Client) AuthorizeAccount(threadIndex int) (err error, allowed bool) {
client.Lock.Lock()
defer client.Lock.Unlock()
readCloser, _, _, err := client.call(B2AuthorizationURL, http.MethodPost, nil, make(map[string]string))
// Don't authorize if the previous one was done less than 30 seconds ago
if client.LastAuthorizationTime != 0 && client.LastAuthorizationTime > time.Now().Unix() - 30 {
return nil, false
}
readCloser, _, _, err := client.call(threadIndex, B2AuthorizationURL, http.MethodPost, nil, make(map[string]string))
if err != nil {
return err
return err, true
}
defer readCloser.Close()
@@ -223,7 +317,7 @@ func (client *B2Client) AuthorizeAccount() (err error) {
output := &B2AuthorizeAccountOutput{}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return err
return err, true
}
// The account id may be different from the application key id so we're getting the account id from the returned
@@ -232,9 +326,15 @@ func (client *B2Client) AuthorizeAccount() (err error) {
client.AuthorizationToken = output.AuthorizationToken
client.APIURL = output.APIURL
client.DownloadURL = output.DownloadURL
if client.DownloadURL == "" {
client.DownloadURL = output.DownloadURL
}
LOG_INFO("BACKBLAZE_URL", "download URL is: %s", client.DownloadURL)
client.IsAuthorized = true
return nil
client.LastAuthorizationTime = time.Now().Unix()
return nil, true
}
type ListBucketOutput struct {
@@ -248,10 +348,11 @@ func (client *B2Client) FindBucket(bucketName string) (err error) {
input := make(map[string]string)
input["accountId"] = client.AccountID
input["bucketName"] = bucketName
url := client.APIURL + "/b2api/v1/b2_list_buckets"
url := client.getAPIURL() + "/b2api/v1/b2_list_buckets"
readCloser, _, _, err := client.call(url, http.MethodPost, nil, input)
readCloser, _, _, err := client.call(0, url, http.MethodPost, nil, input)
if err != nil {
return err
}
@@ -293,7 +394,7 @@ type B2ListFileNamesOutput struct {
NextFileId string
}
func (client *B2Client) ListFileNames(startFileName string, singleFile bool, includeVersions bool) (files []*B2Entry, err error) {
func (client *B2Client) ListFileNames(threadIndex int, startFileName string, singleFile bool, includeVersions bool) (files []*B2Entry, err error) {
maxFileCount := 1000
if singleFile {
@@ -311,20 +412,21 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
input := make(map[string]interface{})
input["bucketId"] = client.BucketID
input["startFileName"] = startFileName
input["startFileName"] = client.StorageDir + startFileName
input["maxFileCount"] = maxFileCount
input["prefix"] = client.StorageDir
for {
url := client.APIURL + "/b2api/v1/b2_list_file_names"
apiURL := client.getAPIURL() + "/b2api/v1/b2_list_file_names"
requestHeaders := map[string]string{}
requestMethod := http.MethodPost
var requestInput interface{}
requestInput = input
if includeVersions {
url = client.APIURL + "/b2api/v1/b2_list_file_versions"
apiURL = client.getAPIURL() + "/b2api/v1/b2_list_file_versions"
} else if singleFile {
// handle a single file with no versions as a special case to download the last byte of the file
url = client.DownloadURL + "/file/" + client.BucketName + "/" + startFileName
apiURL = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + startFileName)
// requesting byte -1 works for empty files where 0-0 fails with a 416 error
requestHeaders["Range"] = "bytes=-1"
// HEAD request
@@ -334,7 +436,7 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
var readCloser io.ReadCloser
var responseHeader http.Header
var err error
readCloser, responseHeader, _, err = client.call(url, requestMethod, requestHeaders, requestInput)
readCloser, responseHeader, _, err = client.call(threadIndex, apiURL, requestMethod, requestHeaders, requestInput)
if err != nil {
return nil, err
}
@@ -347,7 +449,7 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
if singleFile && !includeVersions {
if responseHeader == nil {
LOG_DEBUG("BACKBLAZE_LIST", "b2_download_file_by_name did not return headers")
LOG_DEBUG("BACKBLAZE_LIST", "%s did not return headers", apiURL)
return []*B2Entry{}, nil
}
requiredHeaders := []string{
@@ -361,11 +463,17 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
}
}
if len(missingKeys) > 0 {
return nil, fmt.Errorf("b2_download_file_by_name missing headers: %s", missingKeys)
return nil, fmt.Errorf("%s missing headers: %s", apiURL, missingKeys)
}
// construct the B2Entry from the response headers of the download request
fileID := responseHeader.Get("x-bz-file-id")
fileName := responseHeader.Get("x-bz-file-name")
unescapedFileName, err := url.QueryUnescape(fileName)
if err == nil {
fileName = unescapedFileName
} else {
LOG_WARN("BACKBLAZE_UNESCAPE", "Failed to unescape the file name %s", fileName)
}
fileAction := "upload"
// byte range that is returned: "bytes #-#/#
rangeString := responseHeader.Get("Content-Range")
@@ -378,14 +486,14 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
// this should only execute if the requested file is empty and the range request didn't result in a Content-Range header
fileSize, _ = strconv.ParseInt(lengthString, 0, 64)
if fileSize != 0 {
return nil, fmt.Errorf("b2_download_file_by_name returned non-zero file length")
return nil, fmt.Errorf("%s returned non-zero file length", apiURL)
}
} else {
return nil, fmt.Errorf("could not parse b2_download_file_by_name headers")
return nil, fmt.Errorf("could not parse headers returned by %s", apiURL)
}
fileUploadTimestamp, _ := strconv.ParseInt(responseHeader.Get("X-Bz-Upload-Timestamp"), 0, 64)
return []*B2Entry{&B2Entry{fileID, fileName, fileAction, fileSize, fileUploadTimestamp}}, nil
return []*B2Entry{{fileID, fileName[len(client.StorageDir):], fileAction, fileSize, fileUploadTimestamp}}, nil
}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
@@ -394,31 +502,27 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
ioutil.ReadAll(readCloser)
if startFileName == "" {
files = append(files, output.Files...)
} else {
for _, file := range output.Files {
if singleFile {
if file.FileName == startFileName {
files = append(files, file)
if !includeVersions {
output.NextFileName = ""
break
}
} else {
for _, file := range output.Files {
file.FileName = file.FileName[len(client.StorageDir):]
if singleFile {
if file.FileName == startFileName {
files = append(files, file)
if !includeVersions {
output.NextFileName = ""
break
}
} else {
if strings.HasPrefix(file.FileName, startFileName) {
files = append(files, file)
} else {
output.NextFileName = ""
break
}
output.NextFileName = ""
break
}
} else {
if strings.HasPrefix(file.FileName, startFileName) {
files = append(files, file)
} else {
output.NextFileName = ""
break
}
}
}
if len(output.NextFileName) == 0 {
@@ -434,14 +538,14 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
return files, nil
}
func (client *B2Client) DeleteFile(fileName string, fileID string) (err error) {
func (client *B2Client) DeleteFile(threadIndex int, fileName string, fileID string) (err error) {
input := make(map[string]string)
input["fileName"] = fileName
input["fileName"] = client.StorageDir + fileName
input["fileId"] = fileID
url := client.APIURL + "/b2api/v1/b2_delete_file_version"
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
url := client.getAPIURL() + "/b2api/v1/b2_delete_file_version"
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
if err != nil {
return err
}
@@ -454,14 +558,14 @@ type B2HideFileOutput struct {
FileID string
}
func (client *B2Client) HideFile(fileName string) (fileID string, err error) {
func (client *B2Client) HideFile(threadIndex int, fileName string) (fileID string, err error) {
input := make(map[string]string)
input["bucketId"] = client.BucketID
input["fileName"] = fileName
input["fileName"] = client.StorageDir + fileName
url := client.APIURL + "/b2api/v1/b2_hide_file"
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
url := client.getAPIURL() + "/b2api/v1/b2_hide_file"
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
if err != nil {
return "", err
}
@@ -478,11 +582,11 @@ func (client *B2Client) HideFile(fileName string) (fileID string, err error) {
return output.FileID, nil
}
func (client *B2Client) DownloadFile(filePath string) (io.ReadCloser, int64, error) {
func (client *B2Client) DownloadFile(threadIndex int, filePath string) (io.ReadCloser, int64, error) {
url := client.DownloadURL + "/file/" + client.BucketName + "/" + filePath
url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + filePath)
readCloser, _, len, err := client.call(url, http.MethodGet, make(map[string]string), 0)
readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0)
return readCloser, len, err
}
@@ -492,12 +596,12 @@ type B2GetUploadArgumentOutput struct {
AuthorizationToken string
}
func (client *B2Client) getUploadURL() error {
func (client *B2Client) getUploadURL(threadIndex int) error {
input := make(map[string]string)
input["bucketId"] = client.BucketID
url := client.APIURL + "/b2api/v1/b2_get_upload_url"
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
url := client.getAPIURL() + "/b2api/v1/b2_get_upload_url"
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
if err != nil {
return err
}
@@ -510,96 +614,29 @@ func (client *B2Client) getUploadURL() error {
return err
}
client.UploadURL = output.UploadURL
client.UploadToken = output.AuthorizationToken
client.UploadURLs[threadIndex] = output.UploadURL
client.UploadTokens[threadIndex] = output.AuthorizationToken
return nil
}
func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit int) (err error) {
func (client *B2Client) UploadFile(threadIndex int, filePath string, content []byte, rateLimit int) (err error) {
hasher := sha1.New()
hasher.Write(content)
hash := hex.EncodeToString(hasher.Sum(nil))
headers := make(map[string]string)
headers["X-Bz-File-Name"] = filePath
headers["X-Bz-File-Name"] = B2Escape(client.StorageDir + filePath)
headers["Content-Length"] = fmt.Sprintf("%d", len(content))
headers["Content-Type"] = "application/octet-stream"
headers["X-Bz-Content-Sha1"] = hash
var response *http.Response
backoff := 0
for i := 0; i < 8; i++ {
if client.UploadURL == "" || client.UploadToken == "" {
err = client.getUploadURL()
if err != nil {
return err
}
}
request, err := http.NewRequest("POST", client.UploadURL, CreateRateLimitedReader(content, rateLimit))
if err != nil {
return err
}
request.ContentLength = int64(len(content))
request.Header.Set("Authorization", client.UploadToken)
request.Header.Set("X-Bz-File-Name", filePath)
request.Header.Set("Content-Type", "application/octet-stream")
request.Header.Set("X-Bz-Content-Sha1", hash)
for key, value := range headers {
request.Header.Set(key, value)
}
if client.TestMode {
r := rand.Float32()
if r < 0.8 {
request.Header.Set("X-Bz-Test-Mode", "fail_some_uploads")
} else if r < 0.9 {
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
} else {
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
}
}
response, err = client.HTTPClient.Do(request)
if err != nil {
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned an error: %v", client.UploadURL, err)
backoff = client.retry(backoff, response)
client.UploadURL = ""
client.UploadToken = ""
continue
}
io.Copy(ioutil.Discard, response.Body)
response.Body.Close()
if response.StatusCode < 300 {
return nil
}
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
if response.StatusCode == 401 {
LOG_INFO("BACKBLAZE_UPLOAD", "Re-authorization required")
client.UploadURL = ""
client.UploadToken = ""
continue
} else if response.StatusCode == 403 {
if !client.TestMode {
return fmt.Errorf("B2 cap exceeded")
}
continue
} else {
LOG_INFO("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
backoff = client.retry(backoff, response)
client.UploadURL = ""
client.UploadToken = ""
}
readCloser, _, _, err := client.call(threadIndex, "", http.MethodPost, headers, CreateRateLimitedReader(content, rateLimit))
if err != nil {
return err
}
return fmt.Errorf("Maximum backoff reached")
readCloser.Close()
return nil
}

View File

@@ -37,7 +37,7 @@ func createB2ClientForTest(t *testing.T) (*B2Client, string) {
return nil, ""
}
return NewB2Client(b2["account"], b2["key"]), b2["bucket"]
return NewB2Client(b2["account"], b2["key"], "", b2["directory"], 1), b2["bucket"]
}
@@ -50,7 +50,7 @@ func TestB2Client(t *testing.T) {
b2Client.TestMode = true
err := b2Client.AuthorizeAccount()
err, _ := b2Client.AuthorizeAccount(0)
if err != nil {
t.Errorf("Failed to authorize the b2 account: %v", err)
return
@@ -64,14 +64,14 @@ func TestB2Client(t *testing.T) {
testDirectory := "b2client_test/"
files, err := b2Client.ListFileNames(testDirectory, false, false)
files, err := b2Client.ListFileNames(0, testDirectory, false, false)
if err != nil {
t.Errorf("Failed to list files: %v", err)
return
}
for _, file := range files {
err = b2Client.DeleteFile(file.FileName, file.FileID)
err = b2Client.DeleteFile(0, file.FileName, file.FileID)
if err != nil {
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
}
@@ -90,14 +90,14 @@ func TestB2Client(t *testing.T) {
hash := sha256.Sum256(content)
name := hex.EncodeToString(hash[:])
err = b2Client.UploadFile(testDirectory+name, content, 100)
err = b2Client.UploadFile(0, testDirectory+name, content, 100)
if err != nil {
t.Errorf("Error uploading file '%s': %v", name, err)
return
}
}
files, err = b2Client.ListFileNames(testDirectory, false, false)
files, err = b2Client.ListFileNames(0, testDirectory, false, false)
if err != nil {
t.Errorf("Failed to list files: %v", err)
return
@@ -105,7 +105,7 @@ func TestB2Client(t *testing.T) {
for _, file := range files {
readCloser, _, err := b2Client.DownloadFile(file.FileName)
readCloser, _, err := b2Client.DownloadFile(0, file.FileName)
if err != nil {
t.Errorf("Error downloading file '%s': %v", file.FileName, err)
return
@@ -125,7 +125,7 @@ func TestB2Client(t *testing.T) {
}
for _, file := range files {
err = b2Client.DeleteFile(file.FileName, file.FileID)
err = b2Client.DeleteFile(0, file.FileName, file.FileID)
if err != nil {
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
}

View File

@@ -11,32 +11,26 @@ import (
type B2Storage struct {
StorageBase
clients []*B2Client
client *B2Client
}
// CreateB2Storage creates a B2 storage object.
func CreateB2Storage(accountID string, applicationKey string, bucket string, threads int) (storage *B2Storage, err error) {
func CreateB2Storage(accountID string, applicationKey string, downloadURL string, bucket string, storageDir string, threads int) (storage *B2Storage, err error) {
var clients []*B2Client
client := NewB2Client(accountID, applicationKey, downloadURL, storageDir, threads)
for i := 0; i < threads; i++ {
client := NewB2Client(accountID, applicationKey)
err, _ = client.AuthorizeAccount(0)
if err != nil {
return nil, err
}
err = client.AuthorizeAccount()
if err != nil {
return nil, err
}
err = client.FindBucket(bucket)
if err != nil {
return nil, err
}
clients = append(clients, client)
err = client.FindBucket(bucket)
if err != nil {
return nil, err
}
storage = &B2Storage{
clients: clients,
client: client,
}
storage.DerivedStorage = storage
@@ -56,7 +50,7 @@ func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string
includeVersions = true
}
entries, err := storage.clients[threadIndex].ListFileNames(dir, false, includeVersions)
entries, err := storage.client.ListFileNames(threadIndex, dir, false, includeVersions)
if err != nil {
return nil, nil, err
}
@@ -71,7 +65,7 @@ func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string
subDirs[subDir+"/"] = true
}
for subDir, _ := range subDirs {
for subDir := range subDirs {
files = append(files, subDir)
}
} else if dir == "chunks" {
@@ -102,7 +96,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
if strings.HasSuffix(filePath, ".fsl") {
filePath = filePath[:len(filePath)-len(".fsl")]
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, true)
if err != nil {
return err
}
@@ -116,7 +110,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
toBeDeleted = true
err = storage.clients[threadIndex].DeleteFile(filePath, entry.FileID)
err = storage.client.DeleteFile(threadIndex, filePath, entry.FileID)
if err != nil {
return err
}
@@ -125,7 +119,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
return nil
} else {
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, false)
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, false)
if err != nil {
return err
}
@@ -133,7 +127,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
if len(entries) == 0 {
return nil
}
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
return storage.client.DeleteFile(threadIndex, filePath, entries[0].FileID)
}
}
@@ -160,10 +154,10 @@ func (storage *B2Storage) MoveFile(threadIndex int, from string, to string) (err
}
if filePath == from {
_, err = storage.clients[threadIndex].HideFile(from)
_, err = storage.client.HideFile(threadIndex, from)
return err
} else {
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, true)
if err != nil {
return err
}
@@ -171,7 +165,7 @@ func (storage *B2Storage) MoveFile(threadIndex int, from string, to string) (err
return nil
}
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
return storage.client.DeleteFile(threadIndex, filePath, entries[0].FileID)
}
}
@@ -188,7 +182,7 @@ func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist b
filePath = filePath[:len(filePath)-len(".fsl")]
}
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, isFossil)
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, isFossil)
if err != nil {
return false, false, 0, err
}
@@ -210,22 +204,20 @@ func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist b
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
filePath = strings.Replace(filePath, " ", "%20", -1)
readCloser, _, err := storage.clients[threadIndex].DownloadFile(filePath)
readCloser, _, err := storage.client.DownloadFile(threadIndex, filePath)
if err != nil {
return err
}
defer readCloser.Close()
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.clients))
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.client.Threads)
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
filePath = strings.Replace(filePath, " ", "%20", -1)
return storage.clients[threadIndex].UploadFile(filePath, content, storage.UploadRateLimit/len(storage.clients))
return storage.client.UploadFile(threadIndex, filePath, content, storage.UploadRateLimit/storage.client.Threads)
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
@@ -243,7 +235,5 @@ func (storage *B2Storage) IsFastListing() bool { return true }
// Enable the test mode.
func (storage *B2Storage) EnableTestMode() {
for _, client := range storage.clients {
client.TestMode = true
}
storage.client.TestMode = true
}

View File

@@ -33,8 +33,9 @@ type BackupManager struct {
snapshotCache *FileStorage // for copies of chunks needed by snapshots
config *Config // contains a number of options
nobackupFile string // don't backup directory when this file name is found
filtersFile string // the path to the filters file
}
func (manager *BackupManager) SetDryRun(dryRun bool) {
@@ -44,7 +45,7 @@ func (manager *BackupManager) SetDryRun(dryRun bool) {
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
// master key which can be nil if encryption is not enabled.
func CreateBackupManager(snapshotID string, storage Storage, top string, password string, nobackupFile string) *BackupManager {
func CreateBackupManager(snapshotID string, storage Storage, top string, password string, nobackupFile string, filtersFile string) *BackupManager {
config, _, err := DownloadConfig(storage, password)
if err != nil {
@@ -65,8 +66,9 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
SnapshotManager: snapshotManager,
config: config,
nobackupFile: nobackupFile,
filtersFile: filtersFile,
}
if IsDebugging() {
@@ -76,6 +78,11 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
return backupManager
}
// loadRSAPrivateKey loads the specifed private key file for decrypting file chunks
func (manager *BackupManager) LoadRSAPrivateKey(keyFile string, passphrase string) {
manager.config.loadRSAPrivateKey(keyFile, passphrase)
}
// SetupSnapshotCache creates the snapshot cache, which is merely a local storage under the default .duplicacy
// directory
func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
@@ -103,6 +110,7 @@ func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
return true
}
// setEntryContent sets the 4 content pointers for each entry in 'entries'. 'offset' indicates the value
// to be added to the StartChunk and EndChunk points, used when intending to append 'entries' to the
// original unchanged entry list.
@@ -176,6 +184,10 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
LOG_DEBUG("BACKUP_PARAMETERS", "top: %s, quick: %t, tag: %s", top, quickMode, tag)
if manager.config.rsaPublicKey != nil && len(manager.config.FileKey) > 0 {
LOG_INFO("BACKUP_KEY", "RSA encryption is enabled" )
}
remoteSnapshot := manager.SnapshotManager.downloadLatestSnapshot(manager.snapshotID)
if remoteSnapshot == nil {
remoteSnapshot = CreateEmptySnapshot(manager.snapshotID)
@@ -188,7 +200,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
defer DeleteShadowCopy()
LOG_INFO("BACKUP_INDEXING", "Indexing %s", top)
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop, manager.nobackupFile)
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop,
manager.nobackupFile, manager.filtersFile)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
return false
@@ -198,6 +211,11 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
return true
}
if len(localSnapshot.Files) == 0 {
LOG_ERROR("SNAPSHOT_EMPTY", "No files under the repository to be backed up")
return false
}
// This cache contains all chunks referenced by last snasphot. Any other chunks will lead to a call to
// UploadChunk.
chunkCache := make(map[string]bool)
@@ -760,7 +778,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision)
manager.SnapshotManager.DownloadSnapshotContents(remoteSnapshot, patterns, true)
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile)
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile,
manager.filtersFile)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the repository: %v", err)
return false
@@ -807,6 +826,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
if compare == 0 {
i++
if quickMode && local.IsSameAs(entry) {
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", local.Path)
skipped = true
}
}
@@ -898,7 +918,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
continue
}
} else {
err = os.MkdirAll(path.Dir(fullPath), 0744)
parent, _ := SplitDir(fullPath)
err = os.MkdirAll(parent, 0744)
if err != nil {
LOG_ERROR("DOWNLOAD_MKDIR", "Failed to create directory: %v", err)
}
@@ -981,12 +1002,12 @@ type fileEncoder struct {
buffer *bytes.Buffer
}
// Read reads data from the embeded buffer
// Read reads data from the embedded buffer
func (encoder fileEncoder) Read(data []byte) (n int, err error) {
return encoder.buffer.Read(data)
}
// NextFile switchs to the next file and generates its json description in the buffer. It also takes care of
// NextFile switches to the next file and generates its json description in the buffer. It also takes care of
// the ending ']' and the commas between files.
func (encoder *fileEncoder) NextFile() (io.Reader, bool) {
if encoder.currentIndex == len(encoder.files) {
@@ -1126,7 +1147,7 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
}
// Restore downloads a file from the storage. If 'inPlace' is false, the download file is saved first to a temporary
// file under the .duplicacy directory and then replaces the existing one. Otherwise, the exising file will be
// file under the .duplicacy directory and then replaces the existing one. Otherwise, the existing file will be
// overwritten directly.
func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chunkMaker *ChunkMaker, entry *Entry, top string, inPlace bool, overwrite bool,
showStatistics bool, totalFileSize int64, downloadedFileSize int64, startTime int64) bool {
@@ -1324,7 +1345,6 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
}
}
for i := entry.StartChunk; i <= entry.EndChunk; i++ {
if _, found := offsetMap[chunkDownloader.taskList[i].chunkHash]; !found {
chunkDownloader.taskList[i].needed = true
@@ -1528,7 +1548,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
revisionsToBeCopied []int, threads int) bool {
if !manager.config.IsCompatiableWith(otherManager.config) {
LOG_ERROR("CONFIG_INCOMPATIABLE", "Two storages are not compatiable for the copy operation")
LOG_ERROR("CONFIG_INCOMPATIBLE", "Two storages are not compatible for the copy operation")
return false
}
@@ -1611,6 +1631,9 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
return true
}
// These two maps store hashes of chunks in the source and destination storages, respectively. Note that
// the value of 'chunks' is used to indicated if the chunk is a snapshot chunk, while the value of 'otherChunks'
// is not used.
chunks := make(map[string]bool)
otherChunks := make(map[string]bool)
@@ -1623,21 +1646,15 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
LOG_TRACE("SNAPSHOT_COPY", "Copying snapshot %s at revision %d", snapshot.ID, snapshot.Revision)
for _, chunkHash := range snapshot.FileSequence {
if _, found := chunks[chunkHash]; !found {
chunks[chunkHash] = true
}
chunks[chunkHash] = true // The chunk is a snapshot chunk
}
for _, chunkHash := range snapshot.ChunkSequence {
if _, found := chunks[chunkHash]; !found {
chunks[chunkHash] = true
}
chunks[chunkHash] = true // The chunk is a snapshot chunk
}
for _, chunkHash := range snapshot.LengthSequence {
if _, found := chunks[chunkHash]; !found {
chunks[chunkHash] = true
}
chunks[chunkHash] = true // The chunk is a snapshot chunk
}
description := manager.SnapshotManager.DownloadSequence(snapshot.ChunkSequence)
@@ -1650,9 +1667,11 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
for _, chunkHash := range snapshot.ChunkHashes {
if _, found := chunks[chunkHash]; !found {
chunks[chunkHash] = true
chunks[chunkHash] = false // The chunk is a file chunk
}
}
snapshot.ChunkHashes = nil
}
otherChunkFiles, otherChunkSizes := otherManager.SnapshotManager.ListAllFiles(otherManager.storage, "chunks/")
@@ -1674,7 +1693,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
chunksToCopy := 0
chunksToSkip := 0
for chunkHash, _ := range chunks {
for chunkHash := range chunks {
otherChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
if _, found := otherChunks[otherChunkID]; found {
chunksToSkip++
@@ -1704,7 +1723,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
totalSkipped := 0
chunkIndex := 0
for chunkHash, _ := range chunks {
for chunkHash, isSnapshot := range chunks {
chunkIndex++
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
newChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
@@ -1715,6 +1734,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
newChunk := otherManager.config.GetChunk()
newChunk.Reset(true)
newChunk.Write(chunk.GetBytes())
newChunk.isSnapshot = isSnapshot
chunkUploader.StartChunk(newChunk, chunkIndex)
totalCopied++
} else {

View File

@@ -227,11 +227,11 @@ func TestBackupManager(t *testing.T) {
time.Sleep(time.Duration(delay) * time.Second)
if testFixedChunkSize {
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false) {
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "") {
t.Errorf("Failed to initialize the storage")
}
} else {
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false) {
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false, "") {
t.Errorf("Failed to initialize the storage")
}
}
@@ -239,15 +239,15 @@ func TestBackupManager(t *testing.T) {
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager := CreateBackupManager("host1", storage, testDir, password, "")
backupManager := CreateBackupManager("host1", storage, testDir, password, "", "")
backupManager.SetupSnapshotCache("default")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", threads, /*inPlace=*/false, /*quickMode=*/false, threads, /*overwrite=*/true,
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/ nil)
backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
for _, f := range []string{"file1", "file2", "dir1/file3"} {
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
@@ -270,8 +270,8 @@ func TestBackupManager(t *testing.T) {
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", 2, /*inPlace=*/true, /*quickMode=*/true, threads, /*overwrite=*/true,
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/nil)
backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f)
@@ -298,8 +298,8 @@ func TestBackupManager(t *testing.T) {
createRandomFile(testDir+"/repository2/dir5/file5", 100)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
/*deleteMode=*/true, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/nil)
backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ true /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f)
@@ -325,8 +325,8 @@ func TestBackupManager(t *testing.T) {
os.Remove(testDir + "/repository1/file2")
os.Remove(testDir + "/repository1/dir1/file3")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Restore(testDir+"/repository1", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/[]string{"+file2", "+dir1/file3", "-*"})
backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"})
for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f)
@@ -341,7 +341,7 @@ func TestBackupManager(t *testing.T) {
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
}
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1, 2, 3} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1)
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, []int{1} /*tags*/, nil /*retentions*/, nil,
/*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
@@ -349,7 +349,7 @@ func TestBackupManager(t *testing.T) {
t.Errorf("Expected 2 snapshots but got %d", numberOfSnapshots)
}
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false)
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, nil /*tags*/, nil /*retentions*/, nil,
/*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
@@ -358,7 +358,7 @@ func TestBackupManager(t *testing.T) {
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
}
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3, 4} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1)
/*buf := make([]byte, 1<<16)
runtime.Stack(buf, true)

View File

@@ -41,7 +41,7 @@ func benchmarkSplit(reader *bytes.Reader, fileSize int64, chunkSize int, compres
if encryption {
key = "0123456789abcdef0123456789abcdef"
}
err := chunk.Encrypt([]byte(key), "")
err := chunk.Encrypt([]byte(key), "", false)
if err != nil {
LOG_ERROR("BENCHMARK_ENCRYPT", "Failed to encrypt the chunk: %v", err)
}

View File

@@ -5,18 +5,20 @@
package duplicacy
import (
"os"
"bytes"
"compress/zlib"
"crypto/aes"
"crypto/rsa"
"crypto/cipher"
"crypto/rand"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"encoding/binary"
"fmt"
"hash"
"io"
"os"
"runtime"
"github.com/bkaradzic/go-lz4"
@@ -60,11 +62,17 @@ type Chunk struct {
config *Config // Every chunk is associated with a Config object. Which hashing algorithm to use is determined
// by the config
isSnapshot bool // Indicates if the chunk is a snapshot chunk (instead of a file chunk). This is only used by RSA
// encryption, where a snapshot chunk is not encrypted by RSA
}
// Magic word to identify a duplicacy format encrypted file, plus a version number.
var ENCRYPTION_HEADER = "duplicacy\000"
// RSA encrypted chunks start with "duplicacy\002"
var ENCRYPTION_VERSION_RSA byte = 2
// CreateChunk creates a new chunk.
func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
@@ -113,6 +121,7 @@ func (chunk *Chunk) Reset(hashNeeded bool) {
chunk.hash = nil
chunk.id = ""
chunk.size = 0
chunk.isSnapshot = false
}
// Write implements the Writer interface.
@@ -170,7 +179,7 @@ func (chunk *Chunk) VerifyID() {
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err error) {
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapshot bool) (err error) {
var aesBlock cipher.Block
var gcm cipher.AEAD
@@ -186,8 +195,17 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
if len(encryptionKey) > 0 {
key := encryptionKey
if len(derivationKey) > 0 {
usingRSA := false
// Enable RSA encryption only when the chunk is not a snapshot chunk
if chunk.config.rsaPublicKey != nil && !isSnapshot && !chunk.isSnapshot {
randomKey := make([]byte, 32)
_, err := rand.Read(randomKey)
if err != nil {
return err
}
key = randomKey
usingRSA = true
} else if len(derivationKey) > 0 {
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
hasher.Write(encryptionKey)
key = hasher.Sum(nil)
@@ -204,7 +222,21 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
}
// Start with the magic number and the version number.
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER))
if usingRSA {
// RSA encryption starts "duplicacy\002"
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER)[:len(ENCRYPTION_HEADER) - 1])
encryptedBuffer.Write([]byte{ENCRYPTION_VERSION_RSA})
// Then the encrypted key
encryptedKey, err := rsa.EncryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPublicKey, key, nil)
if err != nil {
return err
}
binary.Write(encryptedBuffer, binary.LittleEndian, uint16(len(encryptedKey)))
encryptedBuffer.Write(encryptedKey)
} else {
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER))
}
// Followed by the nonce
nonce = make([]byte, gcm.NonceSize())
@@ -214,7 +246,6 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
}
encryptedBuffer.Write(nonce)
offset = encryptedBuffer.Len()
}
// offset is either 0 or the length of header + nonce
@@ -250,7 +281,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
// PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes
// to be the maximum allowed by PKCS7
dataLength := encryptedBuffer.Len() - offset
paddingLength := 256 - dataLength % 256
paddingLength := 256 - dataLength%256
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
@@ -267,7 +298,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
}
// This is to ensure compability with Vertical Backup, which still uses HMAC-SHA256 (instead of HMAC-BLAKE2) to
// This is to ensure compatibility with Vertical Backup, which still uses HMAC-SHA256 (instead of HMAC-BLAKE2) to
// derive the key used to encrypt/decrypt files and chunks.
var DecryptWithHMACSHA256 = false
@@ -291,6 +322,7 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
}()
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
headerLength := len(ENCRYPTION_HEADER)
if len(encryptionKey) > 0 {
@@ -308,6 +340,41 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
key = hasher.Sum(nil)
}
if len(encryptedBuffer.Bytes()) < headerLength + 12 {
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
}
if string(encryptedBuffer.Bytes()[:headerLength-1]) != ENCRYPTION_HEADER[:headerLength-1] {
return fmt.Errorf("The storage doesn't seem to be encrypted")
}
encryptionVersion := encryptedBuffer.Bytes()[headerLength-1]
if encryptionVersion != 0 && encryptionVersion != ENCRYPTION_VERSION_RSA {
return fmt.Errorf("Unsupported encryption version %d", encryptionVersion)
}
if encryptionVersion == ENCRYPTION_VERSION_RSA {
if chunk.config.rsaPrivateKey == nil {
LOG_ERROR("CHUNK_DECRYPT", "An RSA private key is required to decrypt the chunk")
return fmt.Errorf("An RSA private key is required to decrypt the chunk")
}
encryptedKeyLength := binary.LittleEndian.Uint16(encryptedBuffer.Bytes()[headerLength:headerLength+2])
if len(encryptedBuffer.Bytes()) < headerLength + 14 + int(encryptedKeyLength) {
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
}
encryptedKey := encryptedBuffer.Bytes()[headerLength + 2:headerLength + 2 + int(encryptedKeyLength)]
headerLength += 2 + int(encryptedKeyLength)
decryptedKey, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPrivateKey, encryptedKey, nil)
if err != nil {
return err
}
key = decryptedKey
}
aesBlock, err := aes.NewCipher(key)
if err != nil {
return err
@@ -318,21 +385,7 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
return err
}
headerLength := len(ENCRYPTION_HEADER)
offset = headerLength + gcm.NonceSize()
if len(encryptedBuffer.Bytes()) < offset {
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
}
if string(encryptedBuffer.Bytes()[:headerLength-1]) != ENCRYPTION_HEADER[:headerLength-1] {
return fmt.Errorf("The storage doesn't seem to be encrypted")
}
if encryptedBuffer.Bytes()[headerLength-1] != 0 {
return fmt.Errorf("Unsupported encryption version %d", encryptedBuffer.Bytes()[headerLength-1])
}
nonce := encryptedBuffer.Bytes()[headerLength:offset]
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
@@ -342,7 +395,6 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
return err
}
paddingLength := int(decryptedBytes[len(decryptedBytes)-1])
if paddingLength == 0 {
paddingLength = 256

View File

@@ -7,6 +7,7 @@ package duplicacy
import (
"bytes"
crypto_rand "crypto/rand"
"crypto/rsa"
"math/rand"
"testing"
)
@@ -22,6 +23,15 @@ func TestChunk(t *testing.T) {
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
maxSize := 1000000
if testRSAEncryption {
privateKey, err := rsa.GenerateKey(crypto_rand.Reader, 2048)
if err != nil {
t.Errorf("Failed to generate a random private key: %v", err)
}
config.rsaPrivateKey = privateKey
config.rsaPublicKey = privateKey.Public().(*rsa.PublicKey)
}
remainderLength := -1
for i := 0; i < 500; i++ {
@@ -37,7 +47,7 @@ func TestChunk(t *testing.T) {
hash := chunk.GetHash()
id := chunk.GetID()
err := chunk.Encrypt(key, "")
err := chunk.Encrypt(key, "", false)
if err != nil {
t.Errorf("Failed to encrypt the data: %v", err)
continue
@@ -48,7 +58,7 @@ func TestChunk(t *testing.T) {
if remainderLength == -1 {
remainderLength = len(encryptedData) % 256
} else if len(encryptedData) % 256 != remainderLength {
} else if len(encryptedData)%256 != remainderLength {
t.Errorf("Incorrect padding size")
}
@@ -71,7 +81,7 @@ func TestChunk(t *testing.T) {
}
if bytes.Compare(plainData, decryptedData) != 0 {
t.Logf("orginal length: %d, decrypted length: %d", len(plainData), len(decryptedData))
t.Logf("Original length: %d, decrypted length: %d", len(plainData), len(decryptedData))
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
}

View File

@@ -126,6 +126,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files []*Entry)
// AddChunk adds a single chunk the download list.
func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
task := ChunkDownloadTask{
chunkIndex: len(downloader.taskList),
chunkHash: chunkHash,
@@ -178,7 +179,7 @@ func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
return
}
for i, _ := range downloader.completedTasks {
for i := range downloader.completedTasks {
if i < chunkIndex && downloader.taskList[i].chunk != nil {
downloader.config.PutChunk(downloader.taskList[i].chunk)
downloader.taskList[i].chunk = nil
@@ -197,6 +198,16 @@ func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
downloader.lastChunkIndex = chunkIndex
}
// Return the chunk last downloaded and its hash
func (downloader *ChunkDownloader) GetLastDownloadedChunk() (chunk *Chunk, chunkHash string) {
if downloader.lastChunkIndex >= len(downloader.taskList) {
return nil, ""
}
task := downloader.taskList[downloader.lastChunkIndex]
return task.chunk, task.chunkHash
}
// WaitForChunk waits until the specified chunk is ready
func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
@@ -243,6 +254,47 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
return downloader.taskList[chunkIndex].chunk
}
// WaitForCompletion waits until all chunks have been downloaded
func (downloader *ChunkDownloader) WaitForCompletion() {
// Tasks in completedTasks have not been counted by numberOfActiveChunks
downloader.numberOfActiveChunks -= len(downloader.completedTasks)
// find the completed task with the largest index; we'll start from the next index
for index := range downloader.completedTasks {
if downloader.lastChunkIndex < index {
downloader.lastChunkIndex = index
}
}
// Looping until there isn't a download task in progress
for downloader.numberOfActiveChunks > 0 || downloader.lastChunkIndex + 1 < len(downloader.taskList) {
// Wait for a completion event first
if downloader.numberOfActiveChunks > 0 {
completion := <-downloader.completionChannel
downloader.config.PutChunk(completion.chunk)
downloader.numberOfActiveChunks--
downloader.numberOfDownloadedChunks++
downloader.numberOfDownloadingChunks--
}
// Pass the tasks one by one to the download queue
if downloader.lastChunkIndex + 1 < len(downloader.taskList) {
task := &downloader.taskList[downloader.lastChunkIndex + 1]
if task.isDownloading {
downloader.lastChunkIndex++
continue
}
downloader.taskQueue <- *task
task.isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
downloader.lastChunkIndex++
}
}
}
// Stop terminates all downloading goroutines
func (downloader *ChunkDownloader) Stop() {
for downloader.numberOfDownloadingChunks > 0 {
@@ -253,7 +305,7 @@ func (downloader *ChunkDownloader) Stop() {
downloader.numberOfDownloadingChunks--
}
for i, _ := range downloader.completedTasks {
for i := range downloader.completedTasks {
downloader.config.PutChunk(downloader.taskList[i].chunk)
downloader.taskList[i].chunk = nil
downloader.numberOfActiveChunks--

View File

@@ -18,7 +18,7 @@ const (
ChunkOperationResurrect = 3
)
// ChunkOperatorTask is used to pass paramaters for different kinds of chunk operations.
// ChunkOperatorTask is used to pass parameters for different kinds of chunk operations.
type ChunkOperatorTask struct {
operation int // The type of operation
chunkID string // The chunk id

View File

@@ -128,7 +128,7 @@ func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) boo
}
// Encrypt the chunk only after we know that it must be uploaded.
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash())
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash(), uploader.snapshotCache != nil)
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to encrypt the chunk %s: %v", chunkID, err)
return false

View File

@@ -9,15 +9,20 @@ import (
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"crypto/rsa"
"crypto/x509"
"encoding/binary"
"encoding/hex"
"encoding/json"
"encoding/pem"
"fmt"
"hash"
"os"
"runtime"
"runtime/debug"
"sync/atomic"
"io/ioutil"
"reflect"
blake2 "github.com/minio/blake2b-simd"
)
@@ -65,6 +70,10 @@ type Config struct {
// for encrypting a non-chunk file
FileKey []byte `json:"-"`
// for RSA encryption
rsaPrivateKey *rsa.PrivateKey
rsaPublicKey *rsa.PublicKey
chunkPool chan *Chunk
numberOfChunks int32
dryRun bool
@@ -80,10 +89,15 @@ type jsonableConfig struct {
IDKey string `json:"id-key"`
ChunkKey string `json:"chunk-key"`
FileKey string `json:"file-key"`
RSAPublicKey string `json:"rsa-public-key"`
}
func (config *Config) MarshalJSON() ([]byte, error) {
publicKey := []byte {}
if config.rsaPublicKey != nil {
publicKey, _ = x509.MarshalPKIXPublicKey(config.rsaPublicKey)
}
return json.Marshal(&jsonableConfig{
aliasedConfig: (*aliasedConfig)(config),
ChunkSeed: hex.EncodeToString(config.ChunkSeed),
@@ -91,6 +105,7 @@ func (config *Config) MarshalJSON() ([]byte, error) {
IDKey: hex.EncodeToString(config.IDKey),
ChunkKey: hex.EncodeToString(config.ChunkKey),
FileKey: hex.EncodeToString(config.FileKey),
RSAPublicKey: hex.EncodeToString(publicKey),
})
}
@@ -120,6 +135,19 @@ func (config *Config) UnmarshalJSON(description []byte) (err error) {
return fmt.Errorf("Invalid representation of the file key in the config")
}
if publicKey, err := hex.DecodeString(aliased.RSAPublicKey); err != nil {
return fmt.Errorf("Invalid hex encoding of the RSA public key in the config")
} else if len(publicKey) > 0 {
parsedKey, err := x509.ParsePKIXPublicKey(publicKey)
if err != nil {
return fmt.Errorf("Invalid RSA public key in the config: %v", err)
}
config.rsaPublicKey = parsedKey.(*rsa.PublicKey)
if config.rsaPublicKey == nil {
return fmt.Errorf("Unsupported public key type %s in the config", reflect.TypeOf(parsedKey))
}
}
return nil
}
@@ -140,6 +168,29 @@ func (config *Config) Print() {
LOG_INFO("CONFIG_INFO", "Maximum chunk size: %d", config.MaximumChunkSize)
LOG_INFO("CONFIG_INFO", "Minimum chunk size: %d", config.MinimumChunkSize)
LOG_INFO("CONFIG_INFO", "Chunk seed: %x", config.ChunkSeed)
LOG_TRACE("CONFIG_INFO", "Hash key: %x", config.HashKey)
LOG_TRACE("CONFIG_INFO", "ID key: %x", config.IDKey)
if len(config.ChunkKey) > 0 {
LOG_TRACE("CONFIG_INFO", "File chunks are encrypted")
}
if len(config.FileKey) > 0 {
LOG_TRACE("CONFIG_INFO", "Metadata chunks are encrypted")
}
if config.rsaPublicKey != nil {
pkisPublicKey, _ := x509.MarshalPKIXPublicKey(config.rsaPublicKey)
publicKey := pem.EncodeToMemory(&pem.Block{
Type: "PUBLIC KEY",
Bytes: pkisPublicKey,
})
LOG_TRACE("CONFIG_INFO", "RSA public key: %s", publicKey)
}
}
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
@@ -430,7 +481,7 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i
if len(password) > 0 {
// Encrypt the config file with masterKey. If masterKey is nil then no encryption is performed.
err = chunk.Encrypt(masterKey, "")
err = chunk.Encrypt(masterKey, "", true)
if err != nil {
LOG_ERROR("CONFIG_CREATE", "Failed to create the config file: %v", err)
return false
@@ -477,7 +528,7 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i
// it simply creates a file named 'config' that stores various parameters as well as a set of keys if encryption
// is enabled.
func ConfigStorage(storage Storage, iterations int, compressionLevel int, averageChunkSize int, maximumChunkSize int,
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool) bool {
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool, keyFile string) bool {
exist, _, _, err := storage.GetFileInfo(0, "config")
if err != nil {
@@ -496,5 +547,113 @@ func ConfigStorage(storage Storage, iterations int, compressionLevel int, averag
return false
}
if keyFile != "" {
config.loadRSAPublicKey(keyFile)
}
return UploadConfig(storage, config, password, iterations)
}
func (config *Config) loadRSAPublicKey(keyFile string) {
encodedKey, err := ioutil.ReadFile(keyFile)
if err != nil {
LOG_ERROR("BACKUP_KEY", "Failed to read the public key file: %v", err)
return
}
decodedKey, _ := pem.Decode(encodedKey)
if decodedKey == nil {
LOG_ERROR("RSA_PUBLIC", "unrecognized public key in %s", keyFile)
return
}
if decodedKey.Type != "PUBLIC KEY" {
LOG_ERROR("RSA_PUBLIC", "Unsupported public key type %s in %s", decodedKey.Type, keyFile)
return
}
parsedKey, err := x509.ParsePKIXPublicKey(decodedKey.Bytes)
if err != nil {
LOG_ERROR("RSA_PUBLIC", "Failed to parse the public key in %s: %v", keyFile, err)
return
}
key, ok := parsedKey.(*rsa.PublicKey)
if !ok {
LOG_ERROR("RSA_PUBLIC", "Unsupported public key type %s in %s", reflect.TypeOf(parsedKey), keyFile)
return
}
config.rsaPublicKey = key
}
// loadRSAPrivateKey loads the specifed private key file for decrypting file chunks
func (config *Config) loadRSAPrivateKey(keyFile string, passphrase string) {
if config.rsaPublicKey == nil {
LOG_ERROR("RSA_PUBLIC", "The storage was not encrypted by an RSA key")
return
}
encodedKey, err := ioutil.ReadFile(keyFile)
if err != nil {
LOG_ERROR("RSA_PRIVATE", "Failed to read the private key file: %v", err)
return
}
decodedKey, _ := pem.Decode(encodedKey)
if decodedKey == nil {
LOG_ERROR("RSA_PRIVATE", "unrecognized private key in %s", keyFile)
return
}
if decodedKey.Type != "RSA PRIVATE KEY" {
LOG_ERROR("RSA_PRIVATE", "Unsupported private key type %s in %s", decodedKey.Type, keyFile)
return
}
var decodedKeyBytes []byte
if passphrase != "" {
decodedKeyBytes, err = x509.DecryptPEMBlock(decodedKey, []byte(passphrase))
} else {
decodedKeyBytes = decodedKey.Bytes
}
var parsedKey interface{}
if parsedKey, err = x509.ParsePKCS1PrivateKey(decodedKeyBytes); err != nil {
if parsedKey, err = x509.ParsePKCS8PrivateKey(decodedKeyBytes); err != nil {
LOG_ERROR("RSA_PRIVATE", "Failed to parse the private key in %s: %v", keyFile, err)
return
}
}
key, ok := parsedKey.(*rsa.PrivateKey)
if !ok {
LOG_ERROR("RSA_PRIVATE", "Unsupported private key type %s in %s", reflect.TypeOf(parsedKey), keyFile)
return
}
data := make([]byte, 32)
_, err = rand.Read(data)
if err != nil {
LOG_ERROR("RSA_PRIVATE", "Failed to generate random data for testing the private key: %v", err)
return
}
// Now test if the private key matches the public key
encryptedData, err := rsa.EncryptOAEP(sha256.New(), rand.Reader, config.rsaPublicKey, data, nil)
if err != nil {
LOG_ERROR("RSA_PRIVATE", "Failed to encrypt random data with the public key: %v", err)
return
}
decryptedData, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, key, encryptedData, nil)
if err != nil {
LOG_ERROR("RSA_PRIVATE", "Incorrect private key: %v", err)
return
}
if !bytes.Equal(data, decryptedData) {
LOG_ERROR("RSA_PRIVATE", "Decrypted data do not match the original data")
return
}
config.rsaPrivateKey = key
}

View File

@@ -6,6 +6,7 @@ package duplicacy
import (
"fmt"
"io/ioutil"
"strings"
"github.com/gilbertchen/go-dropbox"
@@ -199,6 +200,7 @@ func (storage *DropboxStorage) DownloadFile(threadIndex int, filePath string, ch
}
defer output.Body.Close()
defer ioutil.ReadAll(output.Body)
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.clients))
return err

View File

@@ -272,7 +272,7 @@ func (entry *Entry) IsLink() bool {
}
func (entry *Entry) GetPermissions() os.FileMode {
return os.FileMode(entry.Mode)&fileModeMask
return os.FileMode(entry.Mode) & fileModeMask
}
func (entry *Entry) IsSameAs(other *Entry) bool {
@@ -308,7 +308,7 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setO
}
// Only set the permission if the file is not a symlink
if !entry.IsLink() && (*fileInfo).Mode() & fileModeMask != entry.GetPermissions() {
if !entry.IsLink() && (*fileInfo).Mode()&fileModeMask != entry.GetPermissions() {
err := os.Chmod(fullPath, entry.GetPermissions())
if err != nil {
LOG_ERROR("RESTORE_CHMOD", "Failed to set the file permissions: %v", err)
@@ -456,10 +456,10 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
if err != nil {
return directoryList, nil, err
}
// This binary search works because ioutil.ReadDir returns files sorted by Name() by default
if nobackupFile != "" {
ii := sort.Search(len(files), func(ii int) bool { return strings.Compare(files[ii].Name(), nobackupFile) >= 0})
ii := sort.Search(len(files), func(ii int) bool { return strings.Compare(files[ii].Name(), nobackupFile) >= 0 })
if ii < len(files) && files[ii].Name() == nobackupFile {
LOG_DEBUG("LIST_NOBACKUP", "%s is excluded due to nobackup file", path)
return directoryList, skippedFiles, nil
@@ -490,7 +490,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
}
if entry.IsLink() {
isRegular := false
isRegular, entry.Link, err = Readlink(filepath.Join(top, entry.Path))
isRegular, entry.Link, err = Readlink(joinPath(top, entry.Path))
if err != nil {
LOG_WARN("LIST_LINK", "Failed to read the symlink %s: %v", entry.Path, err)
skippedFiles = append(skippedFiles, entry.Path)
@@ -500,7 +500,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
if isRegular {
entry.Mode ^= uint32(os.ModeSymlink)
} else if path == "" && (filepath.IsAbs(entry.Link) || filepath.HasPrefix(entry.Link, `\\`)) && !strings.HasPrefix(entry.Link, normalizedTop) {
stat, err := os.Stat(filepath.Join(top, entry.Path))
stat, err := os.Stat(joinPath(top, entry.Path))
if err != nil {
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err)
skippedFiles = append(skippedFiles, entry.Path)
@@ -513,6 +513,9 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
// path from f.Name(); note that a "/" is append assuming a symbolic link is always a directory
newEntry.Path = filepath.Join(normalizedPath, f.Name()) + "/"
}
if len(patterns) > 0 && !MatchPath(newEntry.Path, patterns) {
continue
}
entry = newEntry
}
}

View File

@@ -34,7 +34,7 @@ func CreateFileReader(top string, files []*Entry) *FileReader {
return reader
}
// NextFile switchs to the next file in the file reader.
// NextFile switches to the next file in the file reader.
func (reader *FileReader) NextFile() bool {
if reader.CurrentFile != nil {

View File

@@ -12,6 +12,7 @@ import (
"os"
"path"
"strings"
"syscall"
"time"
)
@@ -165,7 +166,7 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
}
} else {
if !stat.IsDir() {
fmt.Errorf("The path %s is not a directory", dir)
return fmt.Errorf("The path %s is not a directory", dir)
}
}
}
@@ -190,7 +191,19 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
return err
}
file.Close()
if err = file.Sync(); err != nil {
pathErr, ok := err.(*os.PathError)
isNotSupported := ok && pathErr.Op == "sync" && pathErr.Err == syscall.ENOTSUP
if !isNotSupported {
_ = file.Close()
return err
}
}
err = file.Close()
if err != nil {
return err
}
err = os.Rename(temporaryFile, fullPath)
if err != nil {

View File

@@ -20,23 +20,27 @@ import (
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/drive/v3"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
)
var (
GCDFileMimeType = "application/octet-stream"
GCDFileMimeType = "application/octet-stream"
GCDDirectoryMimeType = "application/vnd.google-apps.folder"
GCDUserDrive = "root"
)
type GCDStorage struct {
StorageBase
service *drive.Service
idCache map[string]string // only directories are saved in this cache
idCache map[string]string // only directories are saved in this cache
idCacheLock sync.Mutex
backoffs []int // desired backoff time in seconds for each thread
attempts []int // number of failed attempts since last success for each thread
driveID string // the ID of the shared drive or 'root' (GCDUserDrive) if the user's drive
createDirectoryLock sync.Mutex
isConnected bool
@@ -78,6 +82,10 @@ func (storage *GCDStorage) shouldRetry(threadIndex int, err error) (bool, error)
// User Rate Limit Exceeded
message = e.Message
retry = true
} else if e.Code == 408 {
// Request timeout
message = e.Message
retry = true
} else if e.Code == 401 {
// Only retry on authorization error when storage has been connected before
if storage.isConnected {
@@ -187,7 +195,11 @@ func (storage *GCDStorage) listFiles(threadIndex int, parentID string, listFiles
var err error
for {
fileList, err = storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount).Do()
q := storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount)
if storage.driveID != GCDUserDrive {
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
}
fileList, err = q.Do()
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
break
} else if retry {
@@ -215,7 +227,11 @@ func (storage *GCDStorage) listByName(threadIndex int, parentID string, name str
for {
query := "name = '" + name + "' and '" + parentID + "' in parents and trashed = false "
fileList, err = storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)").Do()
q := storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)")
if storage.driveID != GCDUserDrive {
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
}
fileList, err = q.Do()
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
break
@@ -244,7 +260,7 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, creat
return fileID, nil
}
fileID := "root"
fileID := storage.driveID
if rootID, ok := storage.findPathID(""); ok {
fileID = rootID
@@ -291,7 +307,7 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, creat
} else if isDir {
storage.savePathID(current, fileID)
}
if i != len(names) - 1 && !isDir {
if i != len(names)-1 && !isDir {
return "", fmt.Errorf("Path '%s' is not a directory", current)
}
}
@@ -299,37 +315,85 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, creat
}
// CreateGCDStorage creates a GCD storage object.
func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storage *GCDStorage, err error) {
func CreateGCDStorage(tokenFile string, driveID string, storagePath string, threads int) (storage *GCDStorage, err error) {
ctx := context.Background()
description, err := ioutil.ReadFile(tokenFile)
if err != nil {
return nil, err
}
gcdConfig := &GCDConfig{}
if err := json.Unmarshal(description, gcdConfig); err != nil {
return nil, err
}
var object map[string]interface{}
oauth2Config := oauth2.Config{
ClientID: gcdConfig.ClientID,
ClientSecret: gcdConfig.ClientSecret,
Endpoint: gcdConfig.Endpoint,
}
authClient := oauth2Config.Client(context.Background(), &gcdConfig.Token)
service, err := drive.New(authClient)
err = json.Unmarshal(description, &object)
if err != nil {
return nil, err
}
isServiceAccount := false
if value, ok := object["type"]; ok {
if authType, ok := value.(string); ok && authType == "service_account" {
isServiceAccount = true
}
}
var tokenSource oauth2.TokenSource
if isServiceAccount {
config, err := google.JWTConfigFromJSON(description, drive.DriveScope)
if err != nil {
return nil, err
}
tokenSource = config.TokenSource(ctx)
} else {
gcdConfig := &GCDConfig{}
if err := json.Unmarshal(description, gcdConfig); err != nil {
return nil, err
}
config := oauth2.Config{
ClientID: gcdConfig.ClientID,
ClientSecret: gcdConfig.ClientSecret,
Endpoint: gcdConfig.Endpoint,
}
tokenSource = config.TokenSource(ctx, &gcdConfig.Token)
}
service, err := drive.NewService(ctx, option.WithTokenSource(tokenSource))
if err != nil {
return nil, err
}
if len(driveID) == 0 {
driveID = GCDUserDrive
} else {
driveList, err := drive.NewTeamdrivesService(service).List().Do()
if err != nil {
return nil, fmt.Errorf("Failed to look up the drive id: %v", err)
}
found := false
for _, teamDrive := range driveList.TeamDrives {
if teamDrive.Id == driveID || teamDrive.Name == driveID {
driveID = teamDrive.Id
found = true
break
}
}
if !found {
return nil, fmt.Errorf("%s is not the id or name of a shared drive", driveID)
}
}
storage = &GCDStorage{
service: service,
numberOfThreads: threads,
idCache: make(map[string]string),
backoffs: make([]int, threads),
attempts: make([]int, threads),
driveID: driveID,
}
for i := range storage.backoffs {
@@ -337,6 +401,7 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
storage.attempts[i] = 0
}
storage.savePathID("", driveID)
storagePathID, err := storage.getIDFromPath(0, storagePath, true)
if err != nil {
return nil, err
@@ -386,8 +451,8 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
subDirs := []string{}
for _, file := range files {
storage.savePathID("snapshots/" + file.Name, file.Id)
subDirs = append(subDirs, file.Name + "/")
storage.savePathID("snapshots/"+file.Name, file.Id)
subDirs = append(subDirs, file.Name+"/")
}
return subDirs, nil, nil
} else if strings.HasPrefix(dir, "snapshots/") || strings.HasPrefix(dir, "benchmark") {
@@ -438,8 +503,8 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
files = append(files, name)
sizes = append(sizes, entry.Size)
} else {
parents = append(parents, parent+ "/" + entry.Name)
storage.savePathID(parent + "/" + entry.Name, entry.Id)
parents = append(parents, parent+"/"+entry.Name)
storage.savePathID(parent+"/"+entry.Name, entry.Id)
}
}
}
@@ -458,7 +523,7 @@ func (storage *GCDStorage) DeleteFile(threadIndex int, filePath string) (err err
}
for {
err = storage.service.Files.Delete(fileID).Fields("id").Do()
err = storage.service.Files.Delete(fileID).SupportsAllDrives(true).Fields("id").Do()
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
storage.deletePathID(filePath)
return nil
@@ -504,7 +569,7 @@ func (storage *GCDStorage) MoveFile(threadIndex int, from string, to string) (er
}
for {
_, err = storage.service.Files.Update(fileID, nil).AddParents(toParentID).RemoveParents(fromParentID).Do()
_, err = storage.service.Files.Update(fileID, nil).SupportsAllDrives(true).AddParents(toParentID).RemoveParents(fromParentID).Do()
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
break
} else if retry {
@@ -555,7 +620,7 @@ func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err err
Parents: []string{parentID},
}
file, err = storage.service.Files.Create(file).Fields("id").Do()
file, err = storage.service.Files.Create(file).SupportsAllDrives(true).Fields("id").Do()
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
break
} else {
@@ -624,13 +689,22 @@ func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk
var response *http.Response
for {
response, err = storage.service.Files.Get(fileID).Download()
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
// AcknowledgeAbuse(true) lets the download proceed even if GCD thinks that it contains malware.
// TODO: Should this prompt the user or log a warning?
req := storage.service.Files.Get(fileID).SupportsAllDrives(true)
if e, ok := err.(*googleapi.Error); ok {
if strings.Contains(err.Error(), "cannotDownloadAbusiveFile") || len(e.Errors) > 0 && e.Errors[0].Reason == "cannotDownloadAbusiveFile" {
LOG_WARN("GCD_STORAGE", "%s is marked as abusive, will download anyway.", filePath)
req = req.AcknowledgeAbuse(true)
}
}
response, err = req.Download()
if retry, retry_err := storage.shouldRetry(threadIndex, err); retry_err == nil && !retry {
break
} else if retry {
continue
} else {
return err
return retry_err
}
}
@@ -663,7 +737,7 @@ func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content
for {
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
_, err = storage.service.Files.Create(file).Media(reader).Fields("id").Do()
_, err = storage.service.Files.Create(file).SupportsAllDrives(true).Media(reader).Fields("id").Do()
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
break
} else if retry {

View File

@@ -113,7 +113,7 @@ func (storage *HubicStorage) ListFiles(threadIndex int, dir string) ([]string, [
for _, entry := range entries {
if entry.Type == "application/directory" {
files = append(files, entry.Name + "/")
files = append(files, entry.Name+"/")
sizes = append(sizes, 0)
} else {
files = append(files, entry.Name)

View File

@@ -7,10 +7,12 @@ package duplicacy
import (
"fmt"
"os"
"log"
"runtime/debug"
"sync"
"testing"
"time"
"regexp"
)
const (
@@ -43,6 +45,13 @@ func setTestingT(t *testing.T) {
testingT = t
}
// Contains the ids of logs that won't be displayed
var suppressedLogs map[string]bool = map[string]bool{}
func SuppressLog(id string) {
suppressedLogs[id] = true
}
func getLevelName(level int) string {
switch level {
case DEBUG:
@@ -143,6 +152,12 @@ func logf(level int, logID string, format string, v ...interface{}) {
defer logMutex.Unlock()
if level >= loggingLevel {
if level <= ERROR && len(suppressedLogs) > 0 {
if _, found := suppressedLogs[logID]; found {
return
}
}
if printLogHeader {
fmt.Printf("%s %s %s %s\n",
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
@@ -161,6 +176,32 @@ func logf(level int, logID string, format string, v ...interface{}) {
}
}
// Set up logging for libraries that Duplicacy depends on. They can call 'log.Printf("[ID] message")'
// to produce logs in Duplicacy's format
type Logger struct {
formatRegex *regexp.Regexp
}
func (logger *Logger) Write(line []byte) (n int, err error) {
n = len(line)
for len(line) > 0 && line[len(line) - 1] == '\n' {
line = line[:len(line) - 1]
}
matched := logger.formatRegex.FindStringSubmatch(string(line))
if matched != nil {
LOG_INFO(matched[1], "%s", matched[2])
} else {
LOG_INFO("LOG_DEFAULT", "%s", line)
}
return
}
func init() {
log.SetFlags(0)
log.SetOutput(&Logger{ formatRegex: regexp.MustCompile(`^\[(.+)\]\s*(.+)`) })
}
const (
duplicacyExitCode = 100
otherExitCode = 101

View File

@@ -15,6 +15,7 @@ import (
"strings"
"sync"
"time"
"path/filepath"
"golang.org/x/oauth2"
)
@@ -32,9 +33,6 @@ type OneDriveErrorResponse struct {
Error OneDriveError `json:"error"`
}
var OneDriveRefreshTokenURL = "https://duplicacy.com/one_refresh"
var OneDriveAPIURL = "https://api.onedrive.com/v1.0"
type OneDriveClient struct {
HTTPClient *http.Client
@@ -44,9 +42,13 @@ type OneDriveClient struct {
IsConnected bool
TestMode bool
IsBusiness bool
RefreshTokenURL string
APIURL string
}
func NewOneDriveClient(tokenFile string) (*OneDriveClient, error) {
func NewOneDriveClient(tokenFile string, isBusiness bool) (*OneDriveClient, error) {
description, err := ioutil.ReadFile(tokenFile)
if err != nil {
@@ -63,6 +65,15 @@ func NewOneDriveClient(tokenFile string) (*OneDriveClient, error) {
TokenFile: tokenFile,
Token: token,
TokenLock: &sync.Mutex{},
IsBusiness: isBusiness,
}
if isBusiness {
client.RefreshTokenURL = "https://duplicacy.com/odb_refresh"
client.APIURL = "https://graph.microsoft.com/v1.0/me"
} else {
client.RefreshTokenURL = "https://duplicacy.com/one_refresh"
client.APIURL = "https://api.onedrive.com/v1.0"
}
client.RefreshToken(false)
@@ -106,9 +117,10 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
if reader, ok := inputReader.(*RateLimitedReader); ok {
request.ContentLength = reader.Length()
request.Header.Set("Content-Range", fmt.Sprintf("bytes 0-%d/%d", reader.Length() - 1, reader.Length()))
}
if url != OneDriveRefreshTokenURL {
if url != client.RefreshTokenURL {
client.TokenLock.Lock()
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
client.TokenLock.Unlock()
@@ -152,7 +164,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
if response.StatusCode == 401 {
if url == OneDriveRefreshTokenURL {
if url == client.RefreshTokenURL {
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
}
@@ -161,6 +173,8 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
return nil, 0, err
}
continue
} else if response.StatusCode == 409 {
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Conflict"}
} else if response.StatusCode > 401 && response.StatusCode != 404 {
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
@@ -188,7 +202,7 @@ func (client *OneDriveClient) RefreshToken(force bool) (err error) {
return nil
}
readCloser, _, err := client.call(OneDriveRefreshTokenURL, "POST", client.Token, "")
readCloser, _, err := client.call(client.RefreshTokenURL, "POST", client.Token, "")
if err != nil {
return fmt.Errorf("failed to refresh the access token: %v", err)
}
@@ -228,9 +242,9 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
entries := []OneDriveEntry{}
url := OneDriveAPIURL + "/drive/root:/" + path + ":/children"
url := client.APIURL + "/drive/root:/" + path + ":/children"
if path == "" {
url = OneDriveAPIURL + "/drive/root/children"
url = client.APIURL + "/drive/root/children"
}
if client.TestMode {
url += "?top=8"
@@ -266,7 +280,7 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) {
url := OneDriveAPIURL + "/drive/root:/" + path
url := client.APIURL + "/drive/root:/" + path
url += "?select=id,name,size,folder"
readCloser, _, err := client.call(url, "GET", 0, "")
@@ -291,28 +305,95 @@ func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, err
func (client *OneDriveClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
url := OneDriveAPIURL + "/drive/items/root:/" + path + ":/content"
url := client.APIURL + "/drive/items/root:/" + path + ":/content"
return client.call(url, "GET", 0, "")
}
func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
url := OneDriveAPIURL + "/drive/root:/" + path + ":/content"
// Upload file using the simple method; this is only possible for OneDrive Personal or if the file
// is smaller than 4MB for OneDrive Business
if !client.IsBusiness || len(content) < 4 * 1024 * 1024 || (client.TestMode && rand.Int() % 2 == 0) {
url := client.APIURL + "/drive/root:/" + path + ":/content"
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
if err != nil {
return err
}
readCloser.Close()
return nil
}
// For large files, create an upload session first
uploadURL, err := client.CreateUploadSession(path)
if err != nil {
return err
}
return client.UploadFileSession(uploadURL, content, rateLimit)
}
func (client *OneDriveClient) CreateUploadSession(path string) (uploadURL string, err error) {
type CreateUploadSessionItem struct {
ConflictBehavior string `json:"@microsoft.graph.conflictBehavior"`
Name string `json:"name"`
}
input := map[string]interface{} {
"item": CreateUploadSessionItem {
ConflictBehavior: "replace",
Name: filepath.Base(path),
},
}
readCloser, _, err := client.call(client.APIURL + "/drive/root:/" + path + ":/createUploadSession", "POST", input, "application/json")
if err != nil {
return "", err
}
type CreateUploadSessionOutput struct {
UploadURL string `json:"uploadUrl"`
}
output := &CreateUploadSessionOutput{}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return "", err
}
readCloser.Close()
return output.UploadURL, nil
}
func (client *OneDriveClient) UploadFileSession(uploadURL string, content []byte, rateLimit int) (err error) {
readCloser, _, err := client.call(uploadURL, "PUT", CreateRateLimitedReader(content, rateLimit), "")
if err != nil {
return err
}
type UploadFileSessionOutput struct {
Size int `json:"size"`
}
output := &UploadFileSessionOutput{}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return fmt.Errorf("Failed to complete the file upload session: %v", err)
}
if output.Size != len(content) {
return fmt.Errorf("Uploaded %d bytes out of %d bytes", output.Size, len(content))
}
readCloser.Close()
return nil
}
func (client *OneDriveClient) DeleteFile(path string) error {
url := OneDriveAPIURL + "/drive/root:/" + path
url := client.APIURL + "/drive/root:/" + path
readCloser, _, err := client.call(url, "DELETE", 0, "")
if err != nil {
@@ -325,7 +406,7 @@ func (client *OneDriveClient) DeleteFile(path string) error {
func (client *OneDriveClient) MoveFile(path string, parent string) error {
url := OneDriveAPIURL + "/drive/root:/" + path
url := client.APIURL + "/drive/root:/" + path
parentReference := make(map[string]string)
parentReference["path"] = "/drive/root:/" + parent
@@ -335,6 +416,20 @@ func (client *OneDriveClient) MoveFile(path string, parent string) error {
readCloser, _, err := client.call(url, "PATCH", parameters, "application/json")
if err != nil {
if e, ok := err.(OneDriveError); ok && e.Status == 400 {
// The destination directory doesn't exist; trying to create it...
dir := filepath.Dir(parent)
if dir == "." {
dir = ""
}
client.CreateDirectory(dir, filepath.Base(parent))
readCloser, _, err = client.call(url, "PATCH", parameters, "application/json")
if err != nil {
return nil
}
}
return err
}
@@ -344,24 +439,29 @@ func (client *OneDriveClient) MoveFile(path string, parent string) error {
func (client *OneDriveClient) CreateDirectory(path string, name string) error {
url := OneDriveAPIURL + "/root/children"
url := client.APIURL + "/root/children"
if path != "" {
parentID, isDir, _, err := client.GetFileInfo(path)
pathID, isDir, _, err := client.GetFileInfo(path)
if err != nil {
return err
}
if parentID == "" {
return fmt.Errorf("The path '%s' does not exist", path)
if pathID == "" {
dir := filepath.Dir(path)
if dir != "." {
// The parent directory doesn't exist; trying to create it...
client.CreateDirectory(dir, filepath.Base(path))
isDir = true
}
}
if !isDir {
return fmt.Errorf("The path '%s' is not a directory", path)
}
url = OneDriveAPIURL + "/drive/items/" + parentID + "/children"
url = client.APIURL + "/drive/root:/" + path + ":/children"
}
parameters := make(map[string]interface{})
@@ -370,6 +470,11 @@ func (client *OneDriveClient) CreateDirectory(path string, name string) error {
readCloser, _, err := client.call(url, "POST", parameters, "application/json")
if err != nil {
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
// This error usually means the directory already exists
LOG_TRACE("ONEDRIVE_MKDIR", "The directory '%s/%s' already exists", path, name)
return nil
}
return err
}

View File

@@ -17,7 +17,7 @@ import (
func TestOneDriveClient(t *testing.T) {
oneDriveClient, err := NewOneDriveClient("one-token.json")
oneDriveClient, err := NewOneDriveClient("one-token.json", false)
if err != nil {
t.Errorf("Failed to create the OneDrive client: %v", err)
return

View File

@@ -19,13 +19,13 @@ type OneDriveStorage struct {
}
// CreateOneDriveStorage creates an OneDrive storage object.
func CreateOneDriveStorage(tokenFile string, storagePath string, threads int) (storage *OneDriveStorage, err error) {
func CreateOneDriveStorage(tokenFile string, isBusiness bool, storagePath string, threads int) (storage *OneDriveStorage, err error) {
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
storagePath = storagePath[:len(storagePath)-1]
}
client, err := NewOneDriveClient(tokenFile)
client, err := NewOneDriveClient(tokenFile, isBusiness)
if err != nil {
return nil, err
}
@@ -80,6 +80,7 @@ func (storage *OneDriveStorage) convertFilePath(filePath string) string {
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
for len(dir) > 0 && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}

View File

@@ -25,6 +25,7 @@ type Preference struct {
DoNotSavePassword bool `json:"no_save_password"`
NobackupFile string `json:"nobackup_file"`
Keys map[string]string `json:"keys"`
FiltersFile string `json:"filters"`
}
var preferencePath string

View File

@@ -210,7 +210,7 @@ func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *
defer output.Body.Close()
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.bucket))
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/storage.numberOfThreads)
return err
}
@@ -225,7 +225,7 @@ func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content [
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
ACL: aws.String(s3.ObjectCannedACLPrivate),
Body: CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.bucket)),
Body: CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads),
ContentType: aws.String("application/duplicacy"),
}
@@ -237,8 +237,6 @@ func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content [
LOG_INFO("S3_RETRY", "Retrying on %s: %v", reflect.TypeOf(err), err)
attempts += 1
}
return err
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when

View File

@@ -14,6 +14,7 @@ import (
"runtime"
"strings"
"time"
"sync"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
@@ -23,9 +24,13 @@ type SFTPStorage struct {
StorageBase
client *sftp.Client
clientLock sync.Mutex
minimumNesting int // The minimum level of directories to dive into before searching for the chunk file.
storageDir string
numberOfThreads int
numberOfTries int
serverAddress string
sftpConfig *ssh.ClientConfig
}
func CreateSFTPStorageWithPassword(server string, port int, username string, storageDir string,
@@ -86,6 +91,9 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
storageDir: storageDir,
minimumNesting: minimumNesting,
numberOfThreads: threads,
numberOfTries: 8,
serverAddress: serverAddress,
sftpConfig: sftpConfig,
}
// Random number fo generating the temporary chunk file suffix.
@@ -99,13 +107,57 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
}
func CloseSFTPStorage(storage *SFTPStorage) {
storage.client.Close()
if storage.client != nil {
storage.client.Close()
storage.client = nil
}
}
func (storage *SFTPStorage) getSFTPClient() *sftp.Client {
storage.clientLock.Lock()
defer storage.clientLock.Unlock()
return storage.client
}
func (storage *SFTPStorage) retry(f func () error) error {
delay := time.Second
for i := 0;; i++ {
err := f()
if err != nil && strings.Contains(err.Error(), "EOF") && i < storage.numberOfTries {
LOG_WARN("SFTP_RETRY", "Encountered an error (%v); retry after %d second(s)", err, delay/time.Second)
time.Sleep(delay)
delay *= 2
storage.clientLock.Lock()
connection, err := ssh.Dial("tcp", storage.serverAddress, storage.sftpConfig)
if err != nil {
LOG_WARN("SFT_RECONNECT", "Failed to connect to %s: %v; retrying", storage.serverAddress, err)
storage.clientLock.Unlock()
continue
}
client, err := sftp.NewClient(connection)
if err != nil {
LOG_WARN("SFT_RECONNECT", "Failed to create a new SFTP client to %s: %v; retrying", storage.serverAddress, err)
connection.Close()
storage.clientLock.Unlock()
continue
}
storage.client = client
storage.clientLock.Unlock()
continue
}
return err
}
}
// ListFiles return the list of files and subdirectories under 'file' (non-recursively)
func (storage *SFTPStorage) ListFiles(threadIndex int, dirPath string) (files []string, sizes []int64, err error) {
entries, err := storage.client.ReadDir(path.Join(storage.storageDir, dirPath))
var entries []os.FileInfo
err = storage.retry(func() error {
entries, err = storage.getSFTPClient().ReadDir(path.Join(storage.storageDir, dirPath))
return err
})
if err != nil {
return nil, nil, err
}
@@ -126,7 +178,11 @@ func (storage *SFTPStorage) ListFiles(threadIndex int, dirPath string) (files []
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *SFTPStorage) DeleteFile(threadIndex int, filePath string) (err error) {
fullPath := path.Join(storage.storageDir, filePath)
fileInfo, err := storage.client.Stat(fullPath)
var fileInfo os.FileInfo
err = storage.retry(func() error {
fileInfo, err = storage.getSFTPClient().Stat(fullPath)
return err
})
if err != nil {
if os.IsNotExist(err) {
LOG_TRACE("SFTP_STORAGE", "File %s has disappeared before deletion", filePath)
@@ -137,33 +193,47 @@ func (storage *SFTPStorage) DeleteFile(threadIndex int, filePath string) (err er
if fileInfo == nil {
return nil
}
return storage.client.Remove(path.Join(storage.storageDir, filePath))
return storage.retry(func() error { return storage.getSFTPClient().Remove(path.Join(storage.storageDir, filePath)) })
}
// MoveFile renames the file.
func (storage *SFTPStorage) MoveFile(threadIndex int, from string, to string) (err error) {
toPath := path.Join(storage.storageDir, to)
fileInfo, err := storage.client.Stat(toPath)
var fileInfo os.FileInfo
err = storage.retry(func() error {
fileInfo, err = storage.getSFTPClient().Stat(toPath)
return err
})
if fileInfo != nil {
return fmt.Errorf("The destination file %s already exists", toPath)
}
return storage.client.Rename(path.Join(storage.storageDir, from),
path.Join(storage.storageDir, to))
err = storage.retry(func() error { return storage.getSFTPClient().Rename(path.Join(storage.storageDir, from),
path.Join(storage.storageDir, to)) })
return err
}
// CreateDirectory creates a new directory.
func (storage *SFTPStorage) CreateDirectory(threadIndex int, dirPath string) (err error) {
fullPath := path.Join(storage.storageDir, dirPath)
fileInfo, err := storage.client.Stat(fullPath)
var fileInfo os.FileInfo
err = storage.retry(func() error {
fileInfo, err = storage.getSFTPClient().Stat(fullPath)
return err
})
if fileInfo != nil && fileInfo.IsDir() {
return nil
}
return storage.client.Mkdir(path.Join(storage.storageDir, dirPath))
return storage.retry(func() error { return storage.getSFTPClient().Mkdir(path.Join(storage.storageDir, dirPath)) })
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *SFTPStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
fileInfo, err := storage.client.Stat(path.Join(storage.storageDir, filePath))
var fileInfo os.FileInfo
err = storage.retry(func() error {
fileInfo, err = storage.getSFTPClient().Stat(path.Join(storage.storageDir, filePath))
return err
})
if err != nil {
if os.IsNotExist(err) {
return false, false, 0, nil
@@ -181,18 +251,19 @@ func (storage *SFTPStorage) GetFileInfo(threadIndex int, filePath string) (exist
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *SFTPStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
file, err := storage.client.Open(path.Join(storage.storageDir, filePath))
return storage.retry(func() error {
file, err := storage.getSFTPClient().Open(path.Join(storage.storageDir, filePath))
if err != nil {
return err
}
if err != nil {
return err
}
defer file.Close()
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
return err
}
return nil
defer file.Close()
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
return err
}
return nil
})
}
// UploadFile writes 'content' to the file at 'filePath'.
@@ -201,62 +272,53 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
fullPath := path.Join(storage.storageDir, filePath)
dirs := strings.Split(filePath, "/")
if len(dirs) > 1 {
fullDir := path.Dir(fullPath)
_, err := storage.client.Stat(fullDir)
if err != nil {
// The error may be caused by a non-existent fullDir, or a broken connection. In either case,
// we just assume it is the former because there isn't a way to tell which is the case.
for i, _ := range dirs[1 : len(dirs)-1] {
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
// We don't check the error; just keep going blindly but always store the last err
err = storage.client.Mkdir(subDir)
}
fullDir := path.Dir(fullPath)
return storage.retry(func() error {
// If there is an error creating the dirs, we check fullDir one more time, because another thread
// may happen to create the same fullDir ahead of this thread
if err != nil {
_, err := storage.client.Stat(fullDir)
if err != nil {
return err
if len(dirs) > 1 {
_, err := storage.getSFTPClient().Stat(fullDir)
if os.IsNotExist(err) {
for i := range dirs[1 : len(dirs)-1] {
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
// We don't check the error; just keep going blindly
storage.getSFTPClient().Mkdir(subDir)
}
}
}
}
letters := "abcdefghijklmnopqrstuvwxyz"
suffix := make([]byte, 8)
for i := range suffix {
suffix[i] = letters[rand.Intn(len(letters))]
}
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
file, err := storage.client.OpenFile(temporaryFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
if err != nil {
return err
}
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
_, err = io.Copy(file, reader)
if err != nil {
file.Close()
return err
}
file.Close()
err = storage.client.Rename(temporaryFile, fullPath)
if err != nil {
if _, err = storage.client.Stat(fullPath); err == nil {
storage.client.Remove(temporaryFile)
return nil
} else {
return fmt.Errorf("Uploaded file but failed to store it at %s: %v", fullPath, err)
letters := "abcdefghijklmnopqrstuvwxyz"
suffix := make([]byte, 8)
for i := range suffix {
suffix[i] = letters[rand.Intn(len(letters))]
}
}
return nil
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
file, err := storage.getSFTPClient().OpenFile(temporaryFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
if err != nil {
return err
}
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
_, err = io.Copy(file, reader)
if err != nil {
file.Close()
return err
}
file.Close()
err = storage.getSFTPClient().Rename(temporaryFile, fullPath)
if err != nil {
if _, err = storage.getSFTPClient().Stat(fullPath); err == nil {
storage.getSFTPClient().Remove(temporaryFile)
return nil
} else {
return fmt.Errorf("Uploaded file but failed to store it at %s: %v", fullPath, err)
}
}
return nil
})
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when

View File

@@ -13,6 +13,7 @@ import (
"io/ioutil"
"os"
"os/exec"
"regexp"
"strings"
"syscall"
"time"
@@ -25,7 +26,7 @@ var snapshotDate string
func CharsToString(ca []int8) string {
len := len(ca)
ba := make([]byte, len)
ba := make([]byte, len)
for i, v := range ca {
ba[i] = byte(v)
@@ -54,8 +55,8 @@ func GetPathDeviceId(path string) (deviceId int32, err error) {
// Executes shell command with timeout and returns stdout
func CommandWithTimeout(timeoutInSeconds int, name string, arg ...string) (output string, err error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutInSeconds) * time.Second)
defer cancel()
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutInSeconds)*time.Second)
defer cancel()
cmd := exec.CommandContext(ctx, name, arg...)
out, err := cmd.Output()
@@ -91,10 +92,10 @@ func DeleteShadowCopy() {
LOG_ERROR("VSS_DELETE", "Error while deleting temporary mount directory")
return
}
LOG_INFO("VSS_DELETE", "Shadow copy unmounted and deleted at %s", snapshotPath)
snapshotPath = ""
snapshotPath = ""
}
func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadowTop string) {
@@ -123,12 +124,12 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
}
deviceIdRepository, err := GetPathDeviceId(top)
if err != nil {
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: ", top)
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: %s", top)
return top
}
if deviceIdLocal != deviceIdRepository {
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: ", top)
return top
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: %s", top)
return top
}
if timeoutInSeconds <= 60 {
@@ -145,22 +146,37 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
// Use tmutil to create snapshot
tmutilOutput, err := CommandWithTimeout(timeoutInSeconds, "tmutil", "snapshot")
if err != nil {
LOG_ERROR("VSS_CREATE", "Error while calling tmutil: ", err)
LOG_ERROR("VSS_CREATE", "Error while calling tmutil: %v", err)
return top
}
colonPos := strings.IndexByte(tmutilOutput, ':')
if colonPos < 0 {
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: ", tmutilOutput)
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: %s", tmutilOutput)
return top
}
snapshotDate = strings.TrimSpace(tmutilOutput[colonPos+1:])
// Mount snapshot as readonly and hide from GUI i.e. Finder
_, err = CommandWithTimeout(timeoutInSeconds,
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s=com.apple.TimeMachine." + snapshotDate, "/", snapshotPath)
tmutilOutput, err = CommandWithTimeout(timeoutInSeconds, "tmutil", "listlocalsnapshots", ".")
if err != nil {
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: ", err)
LOG_ERROR("VSS_CREATE", "Error while calling 'tmutil listlocalsnapshots': %v", err)
return top
}
snapshotName := "com.apple.TimeMachine." + snapshotDate
r := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`)
snapshotNames := r.FindStringSubmatch(tmutilOutput)
if len(snapshotNames) > 0 {
snapshotName = snapshotNames[0]
} else {
LOG_WARN("VSS_CREATE", "Error while using 'tmutil listlocalsnapshots' to find snapshot name. Will fallback to 'com.apple.TimeMachine.SNAPSHOT_DATE'")
}
// Mount snapshot as readonly and hide from GUI i.e. Finder
_, err = CommandWithTimeout(timeoutInSeconds,
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/", snapshotPath)
if err != nil {
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: %v", err)
return top
}

View File

@@ -11,6 +11,7 @@ import (
"io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
@@ -57,7 +58,7 @@ func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
// CreateSnapshotFromDirectory creates a snapshot from the local directory 'top'. Only 'Files'
// will be constructed, while 'ChunkHashes' and 'ChunkLengths' can only be populated after uploading.
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (snapshot *Snapshot, skippedDirectories []string,
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, filtersFile string) (snapshot *Snapshot, skippedDirectories []string,
skippedFiles []string, err error) {
snapshot = &Snapshot{
@@ -68,47 +69,10 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (sn
var patterns []string
patternFile, err := ioutil.ReadFile(path.Join(GetDuplicacyPreferencePath(), "filters"))
if err == nil {
for _, pattern := range strings.Split(string(patternFile), "\n") {
pattern = strings.TrimSpace(pattern)
if len(pattern) == 0 {
continue
}
if pattern[0] == '#' {
continue
}
if IsUnspecifiedFilter(pattern) {
pattern = "+" + pattern
}
if IsEmptyFilter(pattern) {
continue
}
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
valid, err := IsValidRegex(pattern[2:])
if !valid || err != nil {
LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
}
}
patterns = append(patterns, pattern)
}
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))
LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
if IsTracing() {
for _, pattern := range patterns {
LOG_TRACE("SNAPSHOT_PATTERN", "Pattern: %s", pattern)
}
}
if filtersFile == "" {
filtersFile = joinPath(GetDuplicacyPreferencePath(), "filters")
}
patterns = ProcessFilters(filtersFile)
directories := make([]*Entry, 0, 256)
directories = append(directories, CreateEntry("", 0, 0, 0))
@@ -127,6 +91,10 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (sn
snapshot.Files = append(snapshot.Files, directory)
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, nobackupFile, snapshot.discardAttributes)
if err != nil {
if directory.Path == "" {
LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err)
return nil, nil, nil, err
}
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory: %v", err)
skippedDirectories = append(skippedDirectories, directory.Path)
continue
@@ -150,6 +118,103 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (sn
return snapshot, skippedDirectories, skippedFiles, nil
}
func AppendPattern(patterns []string, new_pattern string) (new_patterns []string) {
for _, pattern := range patterns {
if pattern == new_pattern {
LOG_INFO("SNAPSHOT_FILTER", "Ignoring duplicate pattern: %s ...", new_pattern)
return patterns
}
}
new_patterns = append(patterns, new_pattern)
return new_patterns
}
func ProcessFilters(filtersFile string) (patterns []string) {
patterns = ProcessFilterFile(filtersFile, make([]string, 0))
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))
LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
if IsTracing() {
for _, pattern := range patterns {
LOG_TRACE("SNAPSHOT_PATTERN", "Pattern: %s", pattern)
}
}
return patterns
}
func ProcessFilterFile(patternFile string, includedFiles []string) (patterns []string) {
for _, file := range includedFiles {
if file == patternFile {
// cycle in include mechanism discovered.
LOG_ERROR("SNAPSHOT_FILTER", "The filter file %s has already been included", patternFile)
return patterns
}
}
includedFiles = append(includedFiles, patternFile)
LOG_INFO("SNAPSHOT_FILTER", "Parsing filter file %s", patternFile)
patternFileContent, err := ioutil.ReadFile(patternFile)
if err == nil {
patternFileLines := strings.Split(string(patternFileContent), "\n")
patterns = ProcessFilterLines(patternFileLines, includedFiles)
}
return patterns
}
func ProcessFilterLines(patternFileLines []string, includedFiles []string) (patterns []string) {
for _, pattern := range patternFileLines {
pattern = strings.TrimSpace(pattern)
if len(pattern) == 0 {
continue
}
if strings.HasPrefix(pattern, "@") {
patternIncludeFile := strings.TrimSpace(pattern[1:])
if patternIncludeFile == "" {
continue
}
if ! filepath.IsAbs(patternIncludeFile) {
basePath := ""
if len(includedFiles) == 0 {
basePath, _ = os.Getwd()
} else {
basePath = filepath.Dir(includedFiles[len(includedFiles)-1])
}
patternIncludeFile = joinPath(basePath, patternIncludeFile)
}
for _, pattern := range ProcessFilterFile(patternIncludeFile, includedFiles) {
patterns = AppendPattern(patterns, pattern)
}
continue
}
if pattern[0] == '#' {
continue
}
if IsUnspecifiedFilter(pattern) {
pattern = "+" + pattern
}
if IsEmptyFilter(pattern) {
continue
}
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
valid, err := IsValidRegex(pattern[2:])
if !valid || err != nil {
LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
}
}
patterns = AppendPattern(patterns, pattern)
}
return patterns
}
// This is the struct used to save/load incomplete snapshots
type IncompleteSnapshot struct {
Files []*Entry

View File

@@ -57,7 +57,7 @@ func CreateFossilCollection(allSnapshots map[string][]*Snapshot) *FossilCollecti
}
return &FossilCollection{
LastRevisions: lastRevisions,
LastRevisions: lastRevisions,
DeletedRevisions: make(map[string][]int),
}
}
@@ -386,7 +386,7 @@ func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, all
if allSnapshots == nil {
// If the 'fossils' directory exists then don't clean the cache as all snapshots will be needed later
// during the fossil collection phase. The deletion procedure creates this direcotry.
// during the fossil collection phase. The deletion procedure creates this directory.
// We only check this condition when allSnapshots is nil because
// in thise case it is the deletion procedure that is trying to clean the snapshot cache.
exist, _, _, err := manager.snapshotCache.GetFileInfo(0, "fossils")
@@ -653,6 +653,51 @@ func (manager *SnapshotManager) GetSnapshotChunks(snapshot *Snapshot, keepChunkH
return chunks
}
// GetSnapshotChunkHashes has an option to retrieve chunk hashes in addition to chunk ids.
func (manager *SnapshotManager) GetSnapshotChunkHashes(snapshot *Snapshot, chunkHashes *map[string]bool, chunkIDs map[string]bool) {
for _, chunkHash := range snapshot.FileSequence {
if chunkHashes != nil {
(*chunkHashes)[chunkHash] = true
}
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
}
for _, chunkHash := range snapshot.ChunkSequence {
if chunkHashes != nil {
(*chunkHashes)[chunkHash] = true
}
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
}
for _, chunkHash := range snapshot.LengthSequence {
if chunkHashes != nil {
(*chunkHashes)[chunkHash] = true
}
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
}
if len(snapshot.ChunkHashes) == 0 {
description := manager.DownloadSequence(snapshot.ChunkSequence)
err := snapshot.LoadChunks(description)
if err != nil {
LOG_ERROR("SNAPSHOT_CHUNK", "Failed to load chunks for snapshot %s at revision %d: %v",
snapshot.ID, snapshot.Revision, err)
return
}
}
for _, chunkHash := range snapshot.ChunkHashes {
if chunkHashes != nil {
(*chunkHashes)[chunkHash] = true
}
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
}
snapshot.ClearChunks()
}
// ListSnapshots shows the information about a snapshot.
func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList []int, tag string,
showFiles bool, showChunks bool) int {
@@ -757,10 +802,12 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
// ListSnapshots shows the information about a snapshot.
func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToCheck []int, tag string, showStatistics bool, showTabular bool,
checkFiles bool, searchFossils bool, resurrect bool) bool {
checkFiles bool, checkChunks, searchFossils bool, resurrect bool, threads int) bool {
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
snapshotID, revisionsToCheck, tag, showStatistics, checkFiles, searchFossils, resurrect)
manager.chunkDownloader = CreateChunkDownloader(manager.config, manager.storage, manager.snapshotCache, false, threads)
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, showTabular: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
snapshotID, revisionsToCheck, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
snapshotMap := make(map[string][]*Snapshot)
var err error
@@ -790,7 +837,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
chunkSizeMap[chunk] = allSizes[i]
}
if snapshotID == "" || showStatistics {
if snapshotID == "" || showStatistics || showTabular {
snapshotIDs, err := manager.ListSnapshotIDs()
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
@@ -807,10 +854,10 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
snapshotIDIndex := 0
totalMissingChunks := 0
for snapshotID, _ = range snapshotMap {
for snapshotID = range snapshotMap {
revisions := revisionsToCheck
if len(revisions) == 0 || showStatistics {
if len(revisions) == 0 || showStatistics || showTabular {
revisions, err = manager.ListSnapshotRevisions(snapshotID)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", snapshotID, err)
@@ -839,7 +886,13 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
}
LOG_INFO("SNAPSHOT_CHECK", "Total chunk size is %s in %d chunks", PrettyNumber(totalChunkSize), len(chunkSizeMap))
for snapshotID, _ = range snapshotMap {
var allChunkHashes *map[string]bool
if checkChunks && !checkFiles {
m := make(map[string]bool)
allChunkHashes = &m
}
for snapshotID = range snapshotMap {
for _, snapshot := range snapshotMap[snapshotID] {
@@ -850,16 +903,28 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
}
chunks := make(map[string]bool)
for _, chunkID := range manager.GetSnapshotChunks(snapshot, false) {
chunks[chunkID] = true
}
manager.GetSnapshotChunkHashes(snapshot, allChunkHashes, chunks)
missingChunks := 0
for chunkID, _ := range chunks {
for chunkID := range chunks {
_, found := chunkSizeMap[chunkID]
if !found {
// Look up the chunk again in case it actually exists, but only if there aren't
// too many missing chunks.
if missingChunks < 100 {
_, exist, _, err := manager.storage.FindChunk(0, chunkID, false)
if err != nil {
LOG_WARN("SNAPSHOT_VALIDATE", "Failed to check the existence of chunk %s: %v",
chunkID, err)
} else if exist {
LOG_INFO("SNAPSHOT_VALIDATE", "Chunk %s is confirmed to exist", chunkID)
continue
}
}
if !searchFossils {
missingChunks += 1
LOG_WARN("SNAPSHOT_VALIDATE",
@@ -870,7 +935,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
chunkPath, exist, size, err := manager.storage.FindChunk(0, chunkID, true)
if err != nil {
LOG_ERROR("SNAPSHOT_VALIDATE", "Failed to check the existence of chunk %s: %v",
LOG_ERROR("SNAPSHOT_VALIDATE", "Failed to check the existence of fossil %s: %v",
chunkID, err)
return false
}
@@ -932,6 +997,14 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
manager.ShowStatistics(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
}
if checkChunks && !checkFiles {
LOG_INFO("SNAPSHOT_VERIFY", "Verifying %d chunks", len(*allChunkHashes))
for chunkHash := range *allChunkHashes {
manager.chunkDownloader.AddChunk(chunkHash)
}
manager.chunkDownloader.WaitForCompletion()
LOG_INFO("SNAPSHOT_VERIFY", "All %d chunks have been successfully verified", len(*allChunkHashes))
}
return true
}
@@ -953,7 +1026,7 @@ func (manager *SnapshotManager) ShowStatistics(snapshotMap map[string][]*Snapsho
var totalChunkSize int64
var uniqueChunkSize int64
for chunkID, _ := range chunks {
for chunkID := range chunks {
chunkSize := chunkSizeMap[chunkID]
totalChunkSize += chunkSize
if chunkUniqueMap[chunkID] {
@@ -971,7 +1044,7 @@ func (manager *SnapshotManager) ShowStatistics(snapshotMap map[string][]*Snapsho
var totalChunkSize int64
var uniqueChunkSize int64
for chunkID, _ := range snapshotChunks {
for chunkID := range snapshotChunks {
chunkSize := chunkSizeMap[chunkID]
totalChunkSize += chunkSize
@@ -998,18 +1071,20 @@ func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string][]*
earliestSeenChunks := make(map[string]int)
for _, snapshot := range snapshotList {
for _, chunkID := range manager.GetSnapshotChunks(snapshot, true) {
for _, chunkID := range manager.GetSnapshotChunks(snapshot, false) {
if earliestSeenChunks[chunkID] == 0 {
earliestSeenChunks[chunkID] = math.MaxInt32
}
earliestSeenChunks[chunkID] = MinInt(earliestSeenChunks[chunkID], snapshot.Revision)
if earliestSeenChunks[chunkID] > snapshot.Revision {
earliestSeenChunks[chunkID] = snapshot.Revision
}
}
}
for _, snapshot := range snapshotList {
chunks := make(map[string]bool)
for _, chunkID := range manager.GetSnapshotChunks(snapshot, true) {
for _, chunkID := range manager.GetSnapshotChunks(snapshot, false) {
chunks[chunkID] = true
snapshotChunks[chunkID] = true
}
@@ -1021,7 +1096,7 @@ func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string][]*
var newChunkCount int64
var newChunkSize int64
for chunkID, _ := range chunks {
for chunkID := range chunks {
chunkSize := chunkSizeMap[chunkID]
totalChunkSize += chunkSize
totalChunkCount += 1
@@ -1049,7 +1124,7 @@ func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string][]*
var uniqueChunkSize int64
var totalChunkCount int64
var uniqueChunkCount int64
for chunkID, _ := range snapshotChunks {
for chunkID := range snapshotChunks {
chunkSize := chunkSizeMap[chunkID]
totalChunkSize += chunkSize
totalChunkCount += 1
@@ -1154,7 +1229,7 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool {
}
}
// RetrieveFile retrieve the file in the specifed snapshot.
// RetrieveFile retrieves the file in the specified snapshot.
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, output func([]byte)) bool {
if file.Size == 0 {
@@ -1178,7 +1253,6 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
}
var chunk *Chunk
currentHash := ""
for i := file.StartChunk; i <= file.EndChunk; i++ {
start := 0
@@ -1191,10 +1265,12 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
}
hash := snapshot.ChunkHashes[i]
if currentHash != hash {
lastChunk, lastChunkHash := manager.chunkDownloader.GetLastDownloadedChunk()
if lastChunkHash != hash {
i := manager.chunkDownloader.AddChunk(hash)
chunk = manager.chunkDownloader.WaitForChunk(i)
currentHash = hash
} else {
chunk = lastChunk
}
output(chunk.GetBytes()[start:end])
@@ -1269,21 +1345,20 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
}
file := manager.FindFile(snapshot, path, false)
var content []byte
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) { content = append(content, chunk...) }) {
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) {
fmt.Printf("%s", chunk)
}) {
LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d",
path, snapshot.ID, snapshot.Revision)
return false
}
fmt.Printf("%s", string(content))
return true
}
// Diff compares two snapshots, or two revision of a file if the file argument is given.
func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []int,
filePath string, compareByHash bool, nobackupFile string) bool {
filePath string, compareByHash bool, nobackupFile string, filtersFile string) bool {
LOG_DEBUG("DIFF_PARAMETERS", "top: %s, id: %s, revision: %v, path: %s, compareByHash: %t",
top, snapshotID, revisions, filePath, compareByHash)
@@ -1296,7 +1371,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
if len(revisions) <= 1 {
// Only scan the repository if filePath is not provided
if len(filePath) == 0 {
rightSnapshot, _, _, err = CreateSnapshotFromDirectory(snapshotID, top, nobackupFile)
rightSnapshot, _, _, err = CreateSnapshotFromDirectory(snapshotID, top, nobackupFile, filtersFile)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
return false
@@ -1467,7 +1542,11 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
same = right.IsSameAs(left)
}
} else {
same = left.Hash == right.Hash
if left.Size == 0 && right.Size == 0 {
same = true
} else {
same = left.Hash == right.Hash
}
}
if !same {
@@ -1838,7 +1917,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
if _, found := newChunks[chunk]; found {
// The fossil is referenced so it can't be deleted.
if dryRun {
LOG_INFO("FOSSIL_RESURRECT", "Fossil %s would be resurrected: %v", chunk)
LOG_INFO("FOSSIL_RESURRECT", "Fossil %s would be resurrected", chunk)
continue
}
@@ -2221,7 +2300,7 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s
continue
}
manager.chunkOperator.Resurrect(chunk, chunkDir + file)
manager.chunkOperator.Resurrect(chunk, chunkDir+file)
fmt.Fprintf(logFile, "Found referenced fossil %s\n", file)
} else {
@@ -2232,7 +2311,7 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s
}
if exclusive {
manager.chunkOperator.Delete(chunk, chunkDir + file)
manager.chunkOperator.Delete(chunk, chunkDir+file)
} else {
collection.AddFossil(chunkDir + file)
LOG_DEBUG("FOSSIL_FIND", "Found unreferenced fossil %s", file)
@@ -2247,7 +2326,7 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s
chunk := strings.Replace(file, "/", "", -1)
if !chunkRegex.MatchString(chunk) {
LOG_WARN("CHUNK_UNKONWN_FILE", "File %s is not a chunk", file)
LOG_WARN("CHUNK_UNKNOWN_FILE", "File %s is not a chunk", file)
continue
}
@@ -2409,7 +2488,7 @@ func (manager *SnapshotManager) DownloadFile(path string, derivationKey string)
}
if len(derivationKey) > 64 {
derivationKey = derivationKey[len(derivationKey) - 64:]
derivationKey = derivationKey[len(derivationKey)-64:]
}
err = manager.fileChunk.Decrypt(manager.config.FileKey, derivationKey)
@@ -2443,10 +2522,10 @@ func (manager *SnapshotManager) UploadFile(path string, derivationKey string, co
}
if len(derivationKey) > 64 {
derivationKey = derivationKey[len(derivationKey) - 64:]
derivationKey = derivationKey[len(derivationKey)-64:]
}
err := manager.fileChunk.Encrypt(manager.config.FileKey, derivationKey)
err := manager.fileChunk.Encrypt(manager.config.FileKey, derivationKey, true)
if err != nil {
LOG_ERROR("UPLOAD_File", "Failed to encrypt the file %s: %v", path, err)
return false

View File

@@ -9,12 +9,12 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"testing"
"time"
"io/ioutil"
)
func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot {
@@ -500,7 +500,7 @@ func TestPruneWithRetentionPolicyAndTag(t *testing.T) {
t.Logf("Creating 30 snapshots")
for i := 0; i < 30; i++ {
tag := "auto"
if i % 3 == 0 {
if i%3 == 0 {
tag = "manual"
}
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]}, tag)
@@ -615,12 +615,12 @@ func TestPruneNewSnapshots(t *testing.T) {
// Create another snapshot of vm1 that brings back chunkHash1
createTestSnapshot(snapshotManager, "vm1@host1", 3, now-0*day-3600, now-0*day-60, []string{chunkHash1, chunkHash3}, "tag")
// Create another snapshot of vm2 so the fossil collection will be processed by next prune
createTestSnapshot(snapshotManager, "vm2@host1", 2, now + 3600, now + 3600 * 2, []string{chunkHash4, chunkHash5}, "tag")
createTestSnapshot(snapshotManager, "vm2@host1", 2, now+3600, now+3600*2, []string{chunkHash4, chunkHash5}, "tag")
// Now chunkHash1 wil be resurrected
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 4, 0)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false);
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false, false, 1)
}
// A fossil collection left by an aborted prune should be ignored if any supposedly deleted snapshot exists
@@ -664,12 +664,12 @@ func TestPruneGhostSnapshots(t *testing.T) {
// Create another snapshot of vm1 so the fossil collection becomes eligible for processing.
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 3, now - day - 3600, now - day - 60, []string{chunkHash3, chunkHash4}, "tag")
createTestSnapshot(snapshotManager, "vm1@host1", 3, now-day-3600, now-day-60, []string{chunkHash3, chunkHash4}, "tag")
// Run the prune again but the fossil collection should be igored, since revision 1 still exists
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 2)
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, true /*searchFossils*/, false);
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, false, true /*searchFossils*/, false, 1)
// Prune snapshot 1 again
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
@@ -677,11 +677,11 @@ func TestPruneGhostSnapshots(t *testing.T) {
// Create another snapshot
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 4, now + 3600, now + 3600 * 2, []string{chunkHash5, chunkHash5}, "tag")
createTestSnapshot(snapshotManager, "vm1@host1", 4, now+3600, now+3600*2, []string{chunkHash5, chunkHash5}, "tag")
checkTestSnapshots(snapshotManager, 3, 2)
// Run the prune again and this time the fossil collection will be processed and the fossils removed
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 0)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false);
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false, false, 1)
}

View File

@@ -89,7 +89,7 @@ func (storage *StorageBase) SetRateLimits(downloadRateLimit int, uploadRateLimit
}
// SetDefaultNestingLevels sets the default read and write levels. This is usually called by
// derived storages to set the levels with old values so that storages initialied by ealier versions
// derived storages to set the levels with old values so that storages initialized by earlier versions
// will continue to work.
func (storage *StorageBase) SetDefaultNestingLevels(readLevels []int, writeLevel int) {
storage.readLevels = readLevels
@@ -291,6 +291,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
// If ssh_key_file is set, skip password-based login
keyFile := GetPasswordFromPreference(preference, "ssh_key_file")
passphrase := ""
password := ""
passwordCallback := func() (string, error) {
@@ -335,7 +336,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
keyFile = GetPassword(preference, "ssh_key_file", "Enter the path of the private key file:",
true, resetPassword)
var key ssh.Signer
var keySigner ssh.Signer
var err error
if keyFile == "" {
@@ -346,15 +347,52 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
if err != nil {
LOG_INFO("SSH_PUBLICKEY", "Failed to read the private key file: %v", err)
} else {
key, err = ssh.ParsePrivateKey(content)
keySigner, err = ssh.ParsePrivateKey(content)
if err != nil {
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the private key file %s: %v", keyFile, err)
if _, ok := err.(*ssh.PassphraseMissingError); ok {
LOG_TRACE("SSH_PUBLICKEY", "The private key file is encrypted")
passphrase = GetPassword(preference, "ssh_passphrase", "Enter the passphrase to decrypt the private key file:", false, resetPassword)
if len(passphrase) == 0 {
LOG_INFO("SSH_PUBLICKEY", "No passphrase to descrypt the private key file %s", keyFile)
} else {
keySigner, err = ssh.ParsePrivateKeyWithPassphrase(content, []byte(passphrase))
if err != nil {
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the encrypted private key file %s: %v", keyFile, err)
}
}
} else {
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the private key file %s: %v", keyFile, err)
}
}
if keySigner != nil {
certFile := keyFile + "-cert.pub"
if stat, err := os.Stat(certFile); err == nil && !stat.IsDir() {
LOG_DEBUG("SSH_CERTIFICATE", "Attempting to use ssh certificate from file %s", certFile)
var content []byte
content, err = ioutil.ReadFile(certFile)
if err != nil {
LOG_INFO("SSH_CERTIFICATE", "Failed to read ssh certificate file %s: %v", certFile, err)
} else {
pubKey, _, _, _, err := ssh.ParseAuthorizedKey(content)
if err != nil {
LOG_INFO("SSH_CERTIFICATE", "Failed parse ssh certificate file %s: %v", certFile, err)
} else {
certSigner, err := ssh.NewCertSigner(pubKey.(*ssh.Certificate), keySigner)
if err != nil {
LOG_INFO("SSH_CERTIFICATE", "Failed to create certificate signer: %v", err)
} else {
keySigner = certSigner
}
}
}
}
}
}
}
if key != nil {
signers = append(signers, key)
if keySigner != nil {
signers = append(signers, keySigner)
}
if len(signers) > 0 {
@@ -410,6 +448,9 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
if keyFile != "" {
SavePassword(preference, "ssh_key_file", keyFile)
if passphrase != "" {
SavePassword(preference, "ssh_passphrase", passphrase)
}
} else if password != "" {
SavePassword(preference, "ssh_password", password)
}
@@ -509,11 +550,30 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
return dropboxStorage
} else if matched[1] == "b2" {
bucket := matched[3]
storageDir := matched[5]
accountID := GetPassword(preference, "b2_id", "Enter Backblaze Account ID:", true, resetPassword)
applicationKey := GetPassword(preference, "b2_key", "Enter Backblaze Application Key:", true, resetPassword)
accountID := GetPassword(preference, "b2_id", "Enter Backblaze account or application id:", true, resetPassword)
applicationKey := GetPassword(preference, "b2_key", "Enter corresponding Backblaze application key:", true, resetPassword)
b2Storage, err := CreateB2Storage(accountID, applicationKey, bucket, threads)
b2Storage, err := CreateB2Storage(accountID, applicationKey, "", bucket, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "b2_id", accountID)
SavePassword(preference, "b2_key", applicationKey)
return b2Storage
} else if matched[1] == "b2-custom" {
b2customUrlRegex := regexp.MustCompile(`^b2-custom://([^/]+)/([^/]+)(/(.+))?`)
matched := b2customUrlRegex.FindStringSubmatch(storageURL)
downloadURL := "https://" + matched[1]
bucket := matched[2]
storageDir := matched[4]
accountID := GetPassword(preference, "b2_id", "Enter Backblaze account or application id:", true, resetPassword)
applicationKey := GetPassword(preference, "b2_key", "Enter corresponding Backblaze application key:", true, resetPassword)
b2Storage, err := CreateB2Storage(accountID, applicationKey, downloadURL, bucket, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
return nil
@@ -564,21 +624,30 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
SavePassword(preference, "gcs_token", tokenFile)
return gcsStorage
} else if matched[1] == "gcd" {
// Handle writing directly to the root of the drive
// For gcd://driveid@/, driveid@ is match[3] not match[2]
if matched[2] == "" && strings.HasSuffix(matched[3], "@") {
matched[2], matched[3] = matched[3], matched[2]
}
driveID := matched[2]
if driveID != "" {
driveID = driveID[:len(driveID)-1]
}
storagePath := matched[3] + matched[4]
prompt := fmt.Sprintf("Enter the path of the Google Drive token file (downloadable from https://duplicacy.com/gcd_start):")
tokenFile := GetPassword(preference, "gcd_token", prompt, true, resetPassword)
gcdStorage, err := CreateGCDStorage(tokenFile, storagePath, threads)
gcdStorage, err := CreateGCDStorage(tokenFile, driveID, storagePath, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Drive storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "gcd_token", tokenFile)
return gcdStorage
} else if matched[1] == "one" {
} else if matched[1] == "one" || matched[1] == "odb" {
storagePath := matched[3] + matched[4]
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
tokenFile := GetPassword(preference, "one_token", prompt, true, resetPassword)
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, storagePath, threads)
tokenFile := GetPassword(preference, matched[1] + "_token", prompt, true, resetPassword)
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
return nil
@@ -609,7 +678,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
} else if matched[1] == "webdav" || matched[1] == "webdav-http" {
server := matched[3]
username := matched[2]
username = username[:len(username) - 1]
username = username[:len(username)-1]
storageDir := matched[5]
port := 0
useHTTP := matched[1] == "webdav-http"

View File

@@ -27,6 +27,7 @@ var testRateLimit int
var testQuickMode bool
var testThreads int
var testFixedChunkSize bool
var testRSAEncryption bool
func init() {
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
@@ -34,6 +35,7 @@ func init() {
flag.BoolVar(&testQuickMode, "quick", false, "quick test")
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption")
flag.Parse()
}
@@ -80,12 +82,12 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
return storage, err
} else if testStorageName == "s3" {
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false)
return storage, err
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "wasabi" {
storage, err := CreateWasabiStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
return storage, err
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "s3c" {
storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
@@ -107,7 +109,7 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "b2" {
storage, err := CreateB2Storage(config["account"], config["key"], config["bucket"], threads)
storage, err := CreateB2Storage(config["account"], config["key"], "", config["bucket"], config["directory"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcs-s3" {
@@ -131,11 +133,23 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcd" {
storage, err := CreateGCDStorage(config["token_file"], config["storage_path"], threads)
storage, err := CreateGCDStorage(config["token_file"], "", config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcd-shared" {
storage, err := CreateGCDStorage(config["token_file"], config["drive"], config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "one" {
storage, err := CreateOneDriveStorage(config["token_file"], config["storage_path"], threads)
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "odb" {
storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "one" {
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "hubic" {
@@ -153,10 +167,7 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else {
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
}
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
}
@@ -299,7 +310,8 @@ func TestStorage(t *testing.T) {
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
storage, err := loadStorage(testDir, 1)
threads := 8
storage, err := loadStorage(testDir, threads)
if err != nil {
t.Errorf("Failed to create storage: %v", err)
return
@@ -329,16 +341,16 @@ func TestStorage(t *testing.T) {
storage.CreateDirectory(0, "shared")
// Upload to the same directory by multiple goroutines
count := 8
count := threads
finished := make(chan int, count)
for i := 0; i < count; i++ {
go func(name string) {
err := storage.UploadFile(0, name, []byte("this is a test file"))
go func(threadIndex int, name string) {
err := storage.UploadFile(threadIndex, name, []byte("this is a test file"))
if err != nil {
t.Errorf("Error to upload '%s': %v", name, err)
}
finished <- 0
}(fmt.Sprintf("shared/a/b/c/%d", i))
}(i, fmt.Sprintf("shared/a/b/c/%d", i))
}
for i := 0; i < count; i++ {
@@ -387,7 +399,6 @@ func TestStorage(t *testing.T) {
snapshotIDs := []string{}
for _, snapshotDir := range snapshotDirs {
LOG_INFO("debug", "snapshot dir: %s", snapshotDir)
if len(snapshotDir) > 0 && snapshotDir[len(snapshotDir)-1] == '/' {
snapshotIDs = append(snapshotIDs, snapshotDir[:len(snapshotDir)-1])
}

View File

@@ -21,7 +21,7 @@ type SwiftStorage struct {
threads int
}
// CreateSwiftStorage creates an OpenStack Swift storage object. storageURL is in the form of
// CreateSwiftStorage creates an OpenStack Swift storage object. storageURL is in the form of
// `user@authURL/container/path?arg1=value1&arg2=value2``
func CreateSwiftStorage(storageURL string, key string, threads int) (storage *SwiftStorage, err error) {

View File

@@ -10,10 +10,7 @@ import (
"fmt"
"io"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
@@ -58,7 +55,7 @@ func IsEmptyFilter(pattern string) bool {
}
func IsUnspecifiedFilter(pattern string) bool {
if pattern[0] != '+' && pattern[0] != '-' && pattern[0] != 'i' && pattern[0] != 'e' {
if pattern[0] != '+' && pattern[0] != '-' && !strings.HasPrefix(pattern, "i:") && !strings.HasPrefix(pattern, "e:") {
return true
} else {
return false
@@ -176,6 +173,15 @@ func GetPasswordFromPreference(preference Preference, passwordType string) strin
if password, found := os.LookupEnv(name); found && password != "" {
return password
}
re := regexp.MustCompile(`[^a-zA-Z0-9_]`)
namePlain := re.ReplaceAllString(name, "_")
if namePlain != name {
LOG_DEBUG("PASSWORD_ENV_VAR", "Reading the environment variable %s", namePlain)
if password, found := os.LookupEnv(namePlain); found && password != "" {
return password
}
}
}
// If the password is stored in the preference, there is no need to include the storage name
@@ -390,19 +396,6 @@ func MatchPath(filePath string, patterns []string) (included bool) {
}
}
func joinPath(components ...string) string {
combinedPath := path.Join(components...)
if len(combinedPath) > 257 && runtime.GOOS == "windows" {
combinedPath = `\\?\` + filepath.Join(components...)
// If the path is on a samba drive we must use the UNC format
if strings.HasPrefix(combinedPath, `\\?\\\`) {
combinedPath = `\\?\UNC\` + combinedPath[6:]
}
}
return combinedPath
}
func PrettyNumber(number int64) string {
G := int64(1024 * 1024 * 1024)
@@ -467,10 +460,3 @@ func AtoSize(sizeString string) int {
return size
}
func MinInt(x, y int) int {
if x < y {
return x
}
return y
}

View File

@@ -9,6 +9,7 @@ package duplicacy
import (
"bytes"
"os"
"path"
"path/filepath"
"syscall"
@@ -83,3 +84,11 @@ func (entry *Entry) SetAttributesToFile(fullPath string) {
}
}
func joinPath(components ...string) string {
return path.Join(components...)
}
func SplitDir(fullPath string) (dir string, file string) {
return path.Split(fullPath)
}

View File

@@ -92,6 +92,17 @@ func TestMatchPattern(t *testing.T) {
}
}
for _, pattern := range []string{ "+", "-", "i:", "e:", "+a", "-a", "i:a", "e:a"} {
if IsUnspecifiedFilter(pattern) {
t.Errorf("pattern %s has a specified filter", pattern)
}
}
for _, pattern := range []string{ "i", "e", "ia", "ib", "a", "b"} {
if !IsUnspecifiedFilter(pattern) {
t.Errorf("pattern %s does not have a specified filter", pattern)
}
}
}
func TestRateLimit(t *testing.T) {

View File

@@ -7,6 +7,8 @@ package duplicacy
import (
"fmt"
"os"
"path/filepath"
"strings"
"syscall"
"unsafe"
)
@@ -114,3 +116,18 @@ func (entry *Entry) ReadAttributes(top string) {
func (entry *Entry) SetAttributesToFile(fullPath string) {
}
func joinPath(components ...string) string {
combinedPath := `\\?\` + filepath.Join(components...)
// If the path is on a samba drive we must use the UNC format
if strings.HasPrefix(combinedPath, `\\?\\\`) {
combinedPath = `\\?\UNC\` + combinedPath[6:]
}
return combinedPath
}
func SplitDir(fullPath string) (dir string, file string) {
i := strings.LastIndex(fullPath, "\\")
return fullPath[:i+1], fullPath[i+1:]
}

View File

@@ -93,49 +93,49 @@ func (storage *WasabiStorage) DeleteFile(
// rename. It's designed to get the job done with as few dependencies
// on other packages as possible rather than being somethng
// general-purpose and reusable.
func (storage *WasabiStorage) MoveFile(
threadIndex int, from string, to string,
) (err error) {
func (storage *WasabiStorage) MoveFile(threadIndex int, from string, to string) (err error) {
var from_path string
var fromPath string
// The from path includes the bucket. Take care not to include an empty storageDir
// string as Wasabi's backend will return 404 on URLs with double slashes.
if (storage.storageDir == "") {
from_path = fmt.Sprintf("/%s/%s", storage.bucket, from)
if storage.storageDir == "" {
fromPath = fmt.Sprintf("/%s/%s", storage.bucket, from)
} else {
from_path = fmt.Sprintf("/%s/%s/%s", storage.bucket, storage.storageDir, from)
fromPath = fmt.Sprintf("/%s/%s/%s", storage.bucket, storage.storageDir, from)
}
object := fmt.Sprintf("https://%s@%s%s",
storage.region, storage.endpoint, from_path)
object := fmt.Sprintf("https://%s@%s%s", storage.region, storage.endpoint, fromPath)
toPath := to
// The object's new name is relative to the top of the bucket.
new_name := fmt.Sprintf("%s/%s", storage.storageDir, to)
if storage.storageDir != "" {
toPath = fmt.Sprintf("%s/%s", storage.storageDir, to)
}
timestamp := time.Now().Format(time.RFC1123Z)
signing_string := fmt.Sprintf("MOVE\n\n\n%s\n%s", timestamp, from_path)
signingString := fmt.Sprintf("MOVE\n\n\n%s\n%s", timestamp, fromPath)
signer := hmac.New(sha1.New, []byte(storage.secret))
signer.Write([]byte(signing_string))
signer.Write([]byte(signingString))
signature := base64.StdEncoding.EncodeToString(signer.Sum(nil))
authorization := fmt.Sprintf("AWS %s:%s", storage.key, signature)
request, error := http.NewRequest("MOVE", object, nil)
if error != nil {
return error
request, err := http.NewRequest("MOVE", object, nil)
if err != nil {
return err
}
request.Header.Add("Authorization", authorization)
request.Header.Add("Date", timestamp)
request.Header.Add("Destination", new_name)
request.Header.Add("Destination", toPath)
request.Header.Add("Host", storage.endpoint)
request.Header.Add("Overwrite", "true")
response, error := storage.client.Do(request)
if error != nil {
return error
response, err := storage.client.Do(request)
if err != nil {
return err
}
defer response.Body.Close()

View File

@@ -19,9 +19,9 @@ import (
"net/http"
//"net/http/httputil"
"strconv"
"strings"
"sync"
"time"
"strings"
)
type WebDAVStorage struct {
@@ -42,14 +42,14 @@ type WebDAVStorage struct {
var (
errWebDAVAuthorizationFailure = errors.New("Authentication failed")
errWebDAVMovedPermanently = errors.New("Moved permanently")
errWebDAVNotExist = errors.New("Path does not exist")
errWebDAVMaximumBackoff = errors.New("Maximum backoff reached")
errWebDAVMethodNotAllowed = errors.New("Method not allowed")
errWebDAVMovedPermanently = errors.New("Moved permanently")
errWebDAVNotExist = errors.New("Path does not exist")
errWebDAVMaximumBackoff = errors.New("Maximum backoff reached")
errWebDAVMethodNotAllowed = errors.New("Method not allowed")
)
func CreateWebDAVStorage(host string, port int, username string, password string, storageDir string, useHTTP bool, threads int) (storage *WebDAVStorage, err error) {
if storageDir[len(storageDir)-1] != '/' {
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
storageDir += "/"
}
@@ -59,7 +59,7 @@ func CreateWebDAVStorage(host string, port int, username string, password string
username: username,
password: password,
storageDir: "",
useHTTP: false,
useHTTP: useHTTP,
client: http.DefaultClient,
threads: threads,
@@ -68,7 +68,7 @@ func CreateWebDAVStorage(host string, port int, username string, password string
// Make sure it doesn't follow redirect
storage.client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
return http.ErrUseLastResponse
}
exist, isDir, _, err := storage.GetFileInfo(0, storageDir)
@@ -151,6 +151,10 @@ func (storage *WebDAVStorage) sendRequest(method string, uri string, depth int,
request.Header.Set(key, value)
}
if method == "PUT" {
request.ContentLength = int64(len(data))
}
//requestDump, err := httputil.DumpRequest(request, true)
//LOG_INFO("debug", "Request: %s", requestDump)
@@ -313,6 +317,7 @@ func (storage *WebDAVStorage) ListFiles(threadIndex int, dir string) (files []st
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
properties, err := storage.getProperties(filePath, 0, "getcontentlength", "resourcetype")
if err != nil {
if err == errWebDAVNotExist {
@@ -325,11 +330,18 @@ func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exi
return false, false, 0, err
}
if m, exist := properties["/" + storage.storageDir + filePath]; !exist {
m, exist := properties["/"+storage.storageDir+filePath]
// If no properties exist for the given filePath, remove the trailing / from filePath and search again
if !exist && filePath != "" && filePath[len(filePath) - 1] == '/' {
m, exist = properties["/"+storage.storageDir+filePath[:len(filePath) - 1]]
}
if !exist {
return false, false, 0, nil
} else if resourceType, exist := m["resourcetype"]; exist && strings.Contains(resourceType, "collection") {
return true, true, 0, nil
} else if length, exist := m["getcontentlength"]; exist && length != ""{
} else if length, exist := m["getcontentlength"]; exist && length != "" {
value, _ := strconv.Atoi(length)
return true, false, int64(value), nil
} else {