1
0
mirror of https://github.com/gilbertchen/duplicacy synced 2025-12-06 00:03:38 +00:00

Compare commits

...

103 Commits

Author SHA1 Message Date
Gilbert Chen
72eb339837 Bump version to 3.0.1 2022-10-06 20:30:05 -04:00
Gilbert Chen
901044b348 Update dependencies 2022-10-06 20:29:51 -04:00
Gilbert Chen
d6f5336784 Bump version to 3.0.0 2022-10-05 22:21:17 -04:00
Gilbert Chen
4b47ea55e4 Bump version to 2.8.0 2022-10-04 12:49:50 -04:00
Gilbert Chen
5c35ef799a Switch to go modules 2022-10-04 12:48:58 -04:00
Gilbert Chen
2c63d32142 Use swift V2 2022-10-04 12:47:38 -04:00
Gilbert Chen
6009f64b66 Add a storage backend for Storj
The url format is storj://satellite/bucket/path.  You can get the
satellite along with the api access key when requesting an Access
Grant of type API Access.
2022-09-30 10:30:06 -04:00
Gilbert Chen
cde660ee9f Use long-lived refresh token for the Dropbox backend
The refresh token can be downloaded from https://duplicacy.com/dropbox_start
2022-08-12 22:17:06 -04:00
Gilbert Chen
54952cef26 Fixed a bug that referenced uninitialized operator.snapshotCache 2022-07-10 12:23:11 -04:00
Gilbert Chen
fc2386f9cc Initialize startTime correctly in CreateChunkOperator 2022-06-09 23:28:29 -04:00
Gilbert Chen
0d8a37f9f3 Dependency change: update github.com/ncw/swift to v2.0.1 2022-04-08 23:16:51 -04:00
gilbertchen
345fc5ed87 Merge pull request #626 from markfeit/swift-v2
Swift v2
2022-04-08 22:29:30 -04:00
Gilbert Chen
8df529dffe Fixed the type of a test parameter 2022-04-07 23:34:48 -04:00
gilbertchen
f2d6de3fff Merge pull request #625 from gilbertchen/memory_optimization
Rewrite the backup procedure to reduce memory usage
2022-04-07 23:26:46 -04:00
Gilbert Chen
fede9c74b5 Merge branch 'master' into memory_optimization 2022-04-07 23:26:14 -04:00
Gilbert Chen
a953c4ec28 Don't parse test parameters in init()
This is to make test parameter parsing work with newer versions
of Go
2022-04-07 22:31:18 -04:00
gilbertchen
f52dcf761b Merge branch 'master' into memory_optimization 2022-03-31 15:05:50 -04:00
Gilbert Chen
ade669d14e Update totalChunkSize in the chunk operator to show stats during restore 2022-03-04 16:53:40 -05:00
Mark Feit
4743c7ba0d DOn't cancel the context and use a sane deadline. 2021-12-17 10:09:36 -05:00
Mark Feit
590d3b1b5b More development 2021-12-04 10:56:47 -05:00
Mark Feit
0590daff85 More dev 2021-12-04 10:53:34 -05:00
Mark Feit
95b1227d93 Use empty context 2021-12-04 10:48:01 -05:00
Mark Feit
1661caeb92 More fixups 2021-12-04 10:37:11 -05:00
Mark Feit
041ba944c4 Typo 2021-12-04 10:24:31 -05:00
Mark Feit
934c2515cc Import context 2021-12-04 10:23:42 -05:00
Mark Feit
c363d21954 First cut of Swift v2 2021-12-04 10:11:19 -05:00
Gilbert Chen
d9f6545d63 Rewrite the backup procedure to reduce memory usage
Main changes:

* Change the listing order of files/directories so that the local and remote
  snapshots can be compared on-the-fly.

* Introduce a new struct called EntryList that maintains a list of
  files/directories, which are kept in memory when the number is lower, and
  serialized into a file when there are too many.

* EntryList can also be turned into an on-disk incomplete snapshot quickly,
  to support fast-resume on next run.

* ChunkOperator can now download and upload chunks, thus replacing original
  ChunkDownloader and ChunkUploader.  The new ChunkDownloader is only used
  to prefetch chunks during the restore operation.
2021-10-24 23:34:49 -04:00
Gilbert Chen
68b60499d7 Add a global option to print memory usage
This option, -print-memory-usage, will print memory usage every second while
the program is running.
2021-10-15 20:45:53 -04:00
Gilbert Chen
cacf6618d2 Download a fossil directly instead of turning it back to a chunk first
This is to avoid the read-after-rename consistency issue where the effect
of renaming may not be observed by the subsequent attempt to download the
just renamed chunk.
2021-10-08 14:04:56 -04:00
Gilbert Chen
e43e848d47 Find the storage path in shared folders first when connecting to Google Drive
When connecting to Google Drive with a service account key, only files in the
service account's own hidden drive space are listable.  This change finds
the given storage path among shared folders first so that folders from the user
space can be made accessible via service account.
2021-03-09 22:46:23 -05:00
gilbertchen
fd1b7e1d20 Merge pull request #612 from gilbertchen/gcd_impersonate
Support GCD impersonation via modified service account file
2021-03-09 10:24:08 -05:00
Gilbert Chen
f83e4f3c44 Fix SNAPSHOT_INACTIVE log message 2021-01-28 00:06:49 -05:00
gilbertchen
ecf5191400 Update README.md 2021-01-04 14:19:51 -05:00
gilbertchen
ba091fbe42 Add a link to the paper 2021-01-04 14:17:48 -05:00
Gilbert Chen
ee9355b974 Check in the paper accepted to IEEE Transactions on Cloud Computing 2021-01-04 10:18:04 -05:00
Gilbert Chen
4cfecf12f8 Show the path in the error when a subdirectory can't be listed 2021-01-04 10:17:11 -05:00
Gilbert Chen
4104c2f934 Exit with code 2 when an invalid command is provided 2021-01-04 10:16:10 -05:00
Gilbert Chen
41a8f657c4 Add test storage for StorageMadeEasy's File Fabric 2020-11-23 14:38:15 -05:00
Gilbert Chen
474f07e5cc Support GCD impersonation via modified service account file 2020-11-23 09:44:10 -05:00
Gilbert Chen
175adb14cb Bump version to 2.7.2 2020-11-15 23:20:11 -05:00
Gilbert Chen
ae706e3dcf Update dependency (for gilbertchen/go-dropbox and github.com/pkg/xattr) 2020-11-15 23:19:20 -05:00
Gilbert Chen
5eed6c65f6 Validate the repository id for the init and add command
Only letter, numbers, dashes, and underscores are allowed.
2020-11-04 21:32:07 -05:00
Gilbert Chen
bec3a0edcd Fixed a bug that caused a fresh restore to fail without the -overwrite option
When restoring a file that doesn't exit locally, if the file is large (>100M)
Duplicacy will create an empty sparse file.  But this newly created file will
be mistaken for a local copy and hence the restore will fail with a message
suggesting the -overwrite option.
2020-11-03 10:57:47 -05:00
Gilbert Chen
b392302c06 Use github.com/pkg/xattr for reading/writing extended attributes.
The one previously used, github.com/redsift/xattr, is old and can only process
user-defined extended attributes, not system ones.
2020-10-16 21:16:05 -04:00
Gilbert Chen
7c36311aa9 Change snapshot source path from / to /System/Volumes/Data
Also use a regex to extract the snapshot date from tmutil output.
2020-10-11 15:23:09 -04:00
Gilbert Chen
7f834e84f6 Don't attemp to load verified_chunks when it doesn't exist. 2020-10-09 14:22:45 -04:00
Gilbert Chen
d7c1903d5a Skip chunks already verified in previous runs for check -chunks.
This is done by storing the list of verified chunks in a file
`.duplicacy/cache/<storage>/verified_chunks`.
2020-10-08 19:59:39 -04:00
Gilbert Chen
7da58c6d49 Bump version to 2.7.1 2020-10-01 21:59:48 -04:00
gilbertchen
4402be6763 Update README.md 2020-10-01 14:38:10 -04:00
gilbertchen
3abec4e37a Update README.md 2020-10-01 14:36:04 -04:00
gilbertchen
dd40d4cd2f Update README.md 2020-10-01 13:26:50 -04:00
gilbertchen
923e906b7e Merge pull request #609 from lokeshsammeta/master
Update README.md
2020-10-01 13:13:20 -04:00
Gilbert Chen
0da55f95ab Fixed a 64-bit integer alighment problem on 32-bit OS
A new variable was added previosuly which caused a 64-bit variable to be not
aligned on a 8-byte boundary.  Go still can't handle such variables on 32-bit
OS.
2020-10-01 10:35:59 -04:00
Gilbert Chen
2f407d6af9 Another change needed for symlinked chunk subdirectories 2020-10-01 09:27:20 -04:00
lokeshsammeta
bb680538ee Update README.md
some grammatical errors are modified,
2020-10-01 07:04:36 +05:30
Gilbert Chen
7e372edd68 Allow chunks subdirectories in the disk storage to be symlinks. 2020-09-29 13:15:45 -04:00
Gilbert Chen
836a785798 Add src/duplicacy_utils_freebsd.go 2020-09-26 21:22:49 -04:00
Gilbert Chen
e0a72efb34 Bump version to 2.7.0 2020-09-26 20:52:04 -04:00
Gilbert Chen
d839f26b5a Add new dependency requirements 2020-09-26 20:49:50 -04:00
Gilbert Chen
6ad698328f Fixed test build errors caused by previous merges 2020-09-26 12:01:38 -04:00
Gilbert Chen
ace1ba5848 Remove runtime OS check for excluding by attributes 2020-09-25 22:37:54 -04:00
gilbertchen
04a858b555 Merge pull request #498 from plasticrake/mac-exclude
Add exclude_by_attribute preference to exclude files based on xattr
2020-09-25 20:15:20 -04:00
gilbertchen
1fedfd1b1a Merge branch 'master' into mac-exclude 2020-09-25 20:13:43 -04:00
gilbertchen
3fd3f6b267 Merge pull request #606 from gilbertchen/erasure_coding
Implement Erasure Coding
2020-09-25 14:30:39 -04:00
Gilbert Chen
e3e3e97046 Improvements for the WebDAV backend
* CreateDirectory() looks up the directory in the cache first and don't create
  it if found in cache
* ListFiles() puts subdirectories in the cache
* If CreateDirectory() encounters EOF, assume the directory already exists
2020-09-25 13:56:44 -04:00
Gilbert Chen
3f29ec2ffb File metedata must be restored even if the content is unchanged. 2020-09-24 22:44:43 -04:00
Gilbert Chen
947006411b Improvements for the copy command
* Added a `-download-threads` option for specifying the number of downloading
  threads
* Show progress log messages during copy
2020-09-24 14:56:19 -04:00
Gilbert Chen
6841c989c6 Fixed a bug that caused check -chunks -persist to succeed with broken chunks
The bug was not setting the `isBroken` flag in WaitForChunk()
2020-09-24 14:53:42 -04:00
Gilbert Chen
d0b3b5dc2e Print progress logs when verifying chunks (check -chunks) 2020-09-23 09:02:53 -04:00
Gilbert Chen
73ae3f809e Revert "Add a -max-list-rate option to backup to slow down the listing"
This reverts commit 67a3103467.
2020-09-22 22:08:43 -04:00
Gilbert Chen
67a3103467 Add a -max-list-rate option to backup to slow down the listing
This option sets the maximum number of files that can be listed in one
second.
2020-09-22 08:27:09 -04:00
Gilbert Chen
6ee01a2e74 Allow RSA keys to be passed directly via CML instead of a file
This change is intended to be used by the web GUI to create an RSA encrypted
storage.
2020-09-20 20:03:52 -04:00
Gilbert Chen
b7d820195a Remove a debug log message accidentally checked in 2020-09-18 14:59:44 -04:00
Gilbert Chen
16d2c14c5a Follow-up changes for the -persist PR
* Restore/check should report an error instead of a success at the end if there
  were any errors and -persist is specified
* Don't compute the file hash before passing the file to the chunk maker; this is
  redundant as the chunk maker will produce the file hash
* Add a LOG_WERROR function to switch between LOG_WARN and LOG_ERROR dynamically
2020-09-18 11:23:35 -04:00
Gilbert Chen
eecbb8fa99 Fix OneDrive Business and improve retry mechanism.
* Switch to the upload-by-session api for OneDrive Bussiness as their servers
may keep incomplete files when an upload is aborted when the simple upload API
is used

* Use the delay value in the http Retry-After header if there is one

* Decorate the https traffic in the hope of less rate limiting.
2020-09-18 10:19:03 -04:00
Gilbert Chen
97bae5f1a3 Close the response body when 301 is returned in the WebDAV backend 2020-09-14 11:36:22 -04:00
Gilbert Chen
40243fb043 Fixed a compile error in previous checkin 2020-09-12 11:39:46 -04:00
Gilbert Chen
403df1fd06 Added a call to discard http response where it was missed in previous fixes.
Also added a missing import "io/ioutil".
2020-09-09 21:47:01 -04:00
gilbertchen
4369bcfc0b Merge pull request #549 from Jos635/master
Improve WebDAV performance
2020-09-09 16:02:36 -04:00
gilbertchen
d2b08aebee Merge pull request #594 from alecuyer/fix/swift
Call Authenticate() before using Swift storage
2020-09-09 15:48:36 -04:00
gilbertchen
948994c2b6 Merge pull request #595 from twlee79/add_persist_pr
Adds -persist option to check and restore commands to continue despite errors
2020-09-09 15:42:46 -04:00
gilbertchen
ca4d004aca Merge branch 'master' into add_persist_pr 2020-09-09 15:42:01 -04:00
Gilbert Chen
ce472fe375 Show erasure coding/rsa encryption if enabled for backup and copy 2020-09-04 10:43:37 -04:00
Gilbert Chen
923a6fbc5b Implement Erasure Coding 2020-09-03 12:54:48 -04:00
Gilbert Chen
670cbcd776 Bump version to 2.6.2 2020-08-30 22:14:58 -04:00
Gilbert Chen
fd469bae9e Check the returned value of Close() when uploading a chunk file via SFTP. 2020-08-29 22:22:51 -04:00
Gilbert Chen
acef01770a Bump version to 2.6.1 2020-07-07 23:27:11 -04:00
Gilbert Chen
1eb1fb14a8 Don't throw an error on 0-byte chunk files with suffix '.tmp'. 2020-07-07 23:25:09 -04:00
Gilbert Chen
8b489f04eb Bump version to 2.6.0 2020-07-05 21:27:44 -04:00
Gilbert Chen
089e19f8e6 Add a -key-passphrase option to pass in passphrase for RSA private key
This option is mainly for the web GUI which currently doesn't have a way to
specify the passphrase to decrypt the RSA private key.
2020-07-05 20:58:07 -04:00
Gilbert Chen
1da7e2b536 Fix a crash when a username is not specified with the WebDAV backend 2020-07-03 12:29:53 -04:00
Gilbert Chen
ed8b4393be Add a new backend for StorageMadeEasy's File Fabric storage.
The storage url is fabric://username@storagemadeeasy.com/path/to/storage.
2020-07-03 12:07:33 -04:00
Gilbert Chen
5e28dc4911 Update go-dropbox dependency to fix 0-byte file uploading.
Incorporate gilbertchen/go-dropbox/commit/0baa9015ac2547d8b69b2e88c709aa90cfb8fbc1.
2020-07-03 11:52:16 -04:00
Gilbert Chen
f2f07a120d Retry on "unexpected EOF" errors for the webdav backend. 2020-07-03 11:48:30 -04:00
Gilbert Chen
153f6a2d20 Use multiple threads to list the chunks directory for Google Drive 2020-06-15 12:49:13 -04:00
Gilbert Chen
5d45999077 Clear the loaded content after a snapshot has been verified
The snapshot content is loaded before verifying the snapshot, but after that
it isn't used anymore so it should be released to save memory.
2020-06-10 10:08:53 -04:00
Gilbert Chen
1adcf56890 Add an SFTP backend that supports more ciphers and kex algorithms.
"sftpc://" supports all algorithms implemented in golang.org/x/crypto/ssh,
especially including those weak ones that are excluded from the defaults.
2020-06-08 11:24:20 -04:00
Gilbert Chen
09e3cdfebf Ignore 0-byte chunks passed in by the chunk reader.
The fixed-size chunk reader may create 0-byte chunks from empty files.  This
may cause validation errors when preparing the snapshot file as the last step
of a backup.
2020-06-08 10:53:01 -04:00
Gilbert Chen
fe854d469d Error out in the check command if there are 0-size chunks. 2020-06-02 11:37:12 -04:00
Tet Woo Lee
4ae16dec7f add -persist in check and restore mode (for PR) 2020-05-06 18:39:52 +12:00
Alexandre Lécuyer
dae040681d Call Authenticate() before using Swift storage
Failing to call Authenticate() before using the swift connection will
cause a panic in recent versions of the swift library.
2020-05-05 23:09:28 +02:00
Jos
2eb8ea6094 Improve WebDAV performance 2019-03-01 19:41:00 +01:00
Patrick Seal
a1efbe3b73 Add exclude_by_attribute preference 2018-09-21 21:35:40 -07:00
46 changed files with 5683 additions and 2380 deletions

253
Gopkg.lock generated
View File

@@ -1,253 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata","iam","internal","internal/optional","internal/version","storage"]
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
version = "v0.16.0"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date","logger","version"]
revision = "9bc4033dd347c7f416fca46b2f42a043dc1fbdf6"
version = "v10.15.5"
[[projects]]
branch = "master"
name = "github.com/aryann/difflib"
packages = ["."]
revision = "e206f873d14a916d3d26c40ab667bca123f365a3"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = ["aws","aws/arn","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/processcreds","aws/credentials/stscreds","aws/csm","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/context","internal/ini","internal/s3err","internal/sdkio","internal/sdkmath","internal/sdkrand","internal/sdkuri","internal/shareddefaults","internal/strings","internal/sync/singleflight","private/protocol","private/protocol/eventstream","private/protocol/eventstream/eventstreamapi","private/protocol/json/jsonutil","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/internal/arn","service/sts","service/sts/stsiface"]
revision = "851d5ffb66720c2540cc68020d4d8708950686c8"
version = "v1.30.7"
[[projects]]
name = "github.com/bkaradzic/go-lz4"
packages = ["."]
revision = "74ddf82598bc4745b965729e9c6a463bedd33049"
version = "v1.0.0"
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/azure-sdk-for-go"
packages = ["storage","version"]
revision = "8fd4663cab7c7c1c46d00449291c92ad23b0d0d9"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/cli"
packages = ["."]
revision = "1de0a1836ce9c3ae1bf737a0869c4f04f28a7f98"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/go-dropbox"
packages = ["."]
revision = "994e692c5061cefa14e4296600a773de7119aa15"
[[projects]]
name = "github.com/gilbertchen/go-ole"
packages = ["."]
revision = "0e87ea779d9deb219633b828a023b32e1244dd57"
version = "v1.2.0"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/go.dbus"
packages = ["."]
revision = "8591994fa32f1dbe3fa9486bc6f4d4361ac16649"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/goamz"
packages = ["aws","s3"]
revision = "eada9f4e8cc2a45db775dee08a2c37597ce4760a"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/gopass"
packages = ["."]
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/keyring"
packages = ["."]
revision = "8855f5632086e51468cd7ce91056f8da69687ef6"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/xattr"
packages = ["."]
revision = "68e7a6806b0137a396d7d05601d7403ae1abac58"
[[projects]]
branch = "master"
name = "github.com/golang/groupcache"
packages = ["lru"]
revision = "8c9f03a8e57eb486e42badaed3fb287da51807ba"
[[projects]]
name = "github.com/golang/protobuf"
packages = ["proto","protoc-gen-go","protoc-gen-go/descriptor","protoc-gen-go/generator","protoc-gen-go/generator/internal/remap","protoc-gen-go/grpc","protoc-gen-go/plugin","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
revision = "84668698ea25b64748563aa20726db66a6b8d299"
version = "v1.3.5"
[[projects]]
name = "github.com/googleapis/gax-go"
packages = [".","v2"]
revision = "c8a15bac9b9fe955bd9f900272f9a306465d28cf"
version = "v2.0.3"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "c2b33e84"
[[projects]]
name = "github.com/kr/fs"
packages = ["."]
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
version = "v0.1.0"
[[projects]]
name = "github.com/marstr/guid"
packages = ["."]
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/minio/blake2b-simd"
packages = ["."]
revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4"
[[projects]]
name = "github.com/ncw/swift"
packages = ["."]
revision = "3e1a09f21340e4828e7265aa89f4dc1495fa7ccc"
version = "v1.0.50"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "614d223910a179a466c1767a985424175c39b465"
version = "v0.9.1"
[[projects]]
name = "github.com/pkg/sftp"
packages = ["."]
revision = "5616182052227b951e76d9c9b79a616c608bd91b"
version = "v1.11.0"
[[projects]]
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.2.0"
[[projects]]
branch = "master"
name = "github.com/vaughan0/go-ini"
packages = ["."]
revision = "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
[[projects]]
name = "go.opencensus.io"
packages = [".","internal","internal/tagencoding","metric/metricdata","metric/metricproducer","plugin/ochttp","plugin/ochttp/propagation/b3","resource","stats","stats/internal","stats/view","tag","trace","trace/internal","trace/propagation","trace/tracestate"]
revision = "d835ff86be02193d324330acdb7d65546b05f814"
version = "v0.22.3"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["blowfish","chacha20","curve25519","ed25519","ed25519/internal/edwards25519","internal/subtle","pbkdf2","poly1305","ssh","ssh/agent","ssh/internal/bcrypt_pbkdf","ssh/terminal"]
revision = "056763e48d71961566155f089ac0f02f1dda9b5a"
[[projects]]
branch = "master"
name = "golang.org/x/exp"
packages = ["apidiff","cmd/apidiff"]
revision = "e8c3332aa8e5b8e6acb4707c3a7e5979052b20aa"
[[projects]]
name = "golang.org/x/mod"
packages = ["module","semver"]
revision = "ed3ec21bb8e252814c380df79a80f366440ddb2d"
version = "v0.2.0"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context","context/ctxhttp","http/httpguts","http2","http2/hpack","idna","internal/timeseries","trace"]
revision = "d3edc9973b7eb1fb302b0ff2c62357091cea9a30"
[[projects]]
name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"]
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["cpu","unix","windows"]
revision = "59c9f1ba88faf592b225274f69c5ef1e4ebacf82"
[[projects]]
name = "golang.org/x/text"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/language","internal/language/compact","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
version = "v0.3.2"
[[projects]]
branch = "master"
name = "golang.org/x/tools"
packages = ["cmd/goimports","go/ast/astutil","go/gcexportdata","go/internal/gcimporter","go/internal/packagesdriver","go/packages","go/types/typeutil","internal/fastwalk","internal/gocommand","internal/gopathwalk","internal/imports","internal/packagesinternal","internal/telemetry/event"]
revision = "700752c244080ed7ef6a61c3cfd73382cd334e57"
[[projects]]
branch = "master"
name = "golang.org/x/xerrors"
packages = [".","internal"]
revision = "9bdfabe68543c54f90421aeb9a60ef8061b5b544"
[[projects]]
name = "google.golang.org/api"
packages = ["drive/v3","googleapi","googleapi/transport","internal","internal/gensupport","internal/third_party/uritemplates","iterator","option","option/internaloption","storage/v1","transport/cert","transport/http","transport/http/internal/propagation"]
revision = "52f0532eadbcc6f6b82d6f5edf66e610d10bfde6"
version = "v0.21.0"
[[projects]]
name = "google.golang.org/appengine"
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
revision = "971852bfffca25b069c31162ae8f247a3dba083b"
version = "v1.6.5"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status","googleapis/type/expr"]
revision = "baae70f3302d3efdff74db41e48a5d476d036906"
[[projects]]
name = "google.golang.org/grpc"
packages = [".","attributes","backoff","balancer","balancer/base","balancer/roundrobin","binarylog/grpc_binarylog_v1","codes","connectivity","credentials","credentials/internal","encoding","encoding/proto","grpclog","internal","internal/backoff","internal/balancerload","internal/binarylog","internal/buffer","internal/channelz","internal/envconfig","internal/grpclog","internal/grpcrand","internal/grpcsync","internal/grpcutil","internal/resolver/dns","internal/resolver/passthrough","internal/syscall","internal/transport","keepalive","metadata","naming","peer","resolver","serviceconfig","stats","status","tap"]
revision = "ac54eec90516cee50fc6b9b113b34628a85f976f"
version = "v1.28.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "e124cf64f7f8770e51ae52ac89030d512da946e3fdc2666ebd3a604a624dd679"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -1,98 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
name = "cloud.google.com/go"
version = "0.16.0"
[[constraint]]
branch = "master"
name = "github.com/aryann/difflib"
[[constraint]]
name = "github.com/aws/aws-sdk-go"
version = "1.30.7"
[[constraint]]
name = "github.com/bkaradzic/go-lz4"
version = "1.0.0"
[[constraint]]
name = "github.com/gilbertchen/azure-sdk-for-go"
branch = "master"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/cli"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/go-dropbox"
[[constraint]]
name = "github.com/gilbertchen/go-ole"
version = "1.2.0"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/goamz"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/gopass"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/keyring"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/xattr"
[[constraint]]
branch = "master"
name = "github.com/minio/blake2b-simd"
[[constraint]]
name = "github.com/pkg/sftp"
version = "1.10.1"
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[[constraint]]
name = "golang.org/x/oauth2"
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
[[constraint]]
name = "google.golang.org/api"
version = "0.21.0"
[[constraint]]
name = "google.golang.org/grpc"
version = "1.28.0"

View File

@@ -1,8 +1,10 @@
# Duplicacy: A lock-free deduplication cloud backup tool
Duplicacy is a new generation cross-platform cloud backup tool based on the idea of [Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy/wiki/Lock-Free-Deduplication).
Duplicacy is a new generation cross-platform cloud backup tool based on the idea of [Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy/wiki/Lock-Free-Deduplication).
This repository hosts source code, design documents, and binary releases of the command line version of Duplicacy. There is also a Duplicacy GUI frontend built for Windows and Mac OS X available from https://duplicacy.com.
Our paper explaining the inner workings of Duplicacy has been accepted by [IEEE Transactions on Cloud Computing](https://ieeexplore.ieee.org/document/9310668) and will appear in a future issue this year. The final draft version is available [here](https://github.com/gilbertchen/duplicacy/blob/master/duplicacy_paper.pdf) for those who don't have IEEE subscriptions.
This repository hosts source code, design documents, and binary releases of the command line version of Duplicacy. There is also a Web GUI frontend built for Windows, macOS, and Linux, available from https://duplicacy.com.
There is a special edition of Duplicacy developed for VMware vSphere (ESXi) named [Vertical Backup](https://www.verticalbackup.com) that can back up virtual machine files on ESXi to local drives, network or cloud storages.
@@ -10,14 +12,15 @@ There is a special edition of Duplicacy developed for VMware vSphere (ESXi) name
There are 3 core advantages of Duplicacy over any other open-source or commercial backup tools:
* Duplicacy is the *only* cloud backup tool that allows multiple computers to back up to the same cloud storage, taking advantage of cross-computer deduplication whenever possible, without direct communication among them. This feature literally turns any cloud storage server supporting only a basic set of file operations into a sophisticated deduplication-aware server.
* Duplicacy is the *only* cloud backup tool that allows multiple computers to back up to the same cloud storage, taking advantage of cross-computer deduplication whenever possible, without direct communication among them. This feature turns any cloud storage server supporting only a basic set of file operations into a sophisticated deduplication-aware server.
* Unlike other chunk-based backup tools where chunks are grouped into pack files and a chunk database is used to track which chunks are stored inside each pack file, Duplicacy takes a database-less approach where every chunk is saved independently using its hash as the file name to facilitate quick lookups. The lack of a centralized chunk database not only makes the implementation less error-prone, but also produces a highly maintainable piece of software with plenty of room for development of new features and usability enhancements.
* Unlike other chunk-based backup tools where chunks are grouped into pack files and a chunk database is used to track which chunks are stored inside each pack file, Duplicacy takes a database-less approach where every chunk is saved independently using its hash as the file name to facilitate quick lookups. The avoidance of a centralized chunk database not only produces a simpler and less error-prone implementation, but also makes it easier to develop advanced features, such as [Asymmetric Encryption](https://github.com/gilbertchen/duplicacy/wiki/RSA-encryption) for stronger encryption and [Erasure Coding](https://github.com/gilbertchen/duplicacy/wiki/Erasure-coding) for resilient data protection.
* Duplicacy is fast. While the performance wasn't the top-priority design goal, Duplicacy has been shown to outperform other backup tools by a considerable margin, as indicated by the following results obtained from a [benchmarking experiment](https://github.com/gilbertchen/benchmarking) backing up the [Linux code base](https://github.com/torvalds/linux) using Duplicacy and 3 other open-source backup tools.
[![Comparison of Duplicacy, restic, Attic, duplicity](https://github.com/gilbertchen/duplicacy/blob/master/images/duplicacy_benchmark_speed.png "Comparison of Duplicacy, restic, Attic, duplicity")](https://github.com/gilbertchen/benchmarking)
## Getting Started
* [A brief introduction](https://github.com/gilbertchen/duplicacy/wiki/Quick-Start)
@@ -44,6 +47,7 @@ Duplicacy currently provides the following storage backends:
* WebDAV (under beta testing)
* pcloud (via WebDAV)
* Box.com (via WebDAV)
* File Fabric by [Storage Made Easy](https://storagemadeeasy.com/)
Please consult the [wiki page](https://github.com/gilbertchen/duplicacy/wiki/Storage-Backends) on how to set up Duplicacy to work with each cloud storage.
@@ -64,9 +68,9 @@ to find the differences from previous backups and only then uploading the differ
[Duplicati](https://duplicati.com) is one of the first backup tools that adopt the chunk-based approach to split files into chunks which are then uploaded to the storage. The chunk-based approach got the incremental backup model right in the sense that every incremental backup is actually a full snapshot. As Duplicati splits files into fixed-size chunks, deletions or insertions of a few bytes will foil the deduplication. Cloud support is extensive, but multiple clients can't back up to the same storage location.
[Attic](https://attic-backup.org) has been acclaimed by some as the [Holy Grail of backups](https://www.stavros.io/posts/holy-grail-backups). It follows the same incremental backup model like Duplicati, but embraces the variable-size chunk algorithm for better performance and higher deduplication efficiency (not susceptible to byte insertion and deletion any more). Deletions of old backup is also supported. However, no cloud backends are implemented. Although concurrent backups from multiple clients to the same storage is in theory possible by the use of locking, it is
[Attic](https://attic-backup.org) has been acclaimed by some as the [Holy Grail of backups](https://www.stavros.io/posts/holy-grail-backups). It follows the same incremental backup model like Duplicati but embraces the variable-size chunk algorithm for better performance and higher deduplication efficiency (not susceptible to byte insertion and deletion any more). Deletions of old backup are also supported. However, no cloud backends are implemented. Although concurrent backups from multiple clients to the same storage is in theory possible by the use of locking, it is
[not recommended](http://librelist.com/browser//attic/2014/11/11/backing-up-multiple-servers-into-a-single-repository/#e96345aa5a3469a87786675d65da492b) by the developer due to chunk indices being kept in a local cache.
Concurrent access is not only a convenience; it is a necessity for better deduplication. For instance, if multiple machines with the same OS installed can back up their entire drives to the same storage, only one copy of the system files needs to be stored, greatly reducing the storage space regardless of the number of machines. Attic still adopts the traditional approach of using a centralized indexing database to manage chunks, and relies heavily on caching to improve performance. The presence of exclusive locking makes it hard to be extended to cloud storages.
Concurrent access is not only a convenience; it is a necessity for better deduplication. For instance, if multiple machines with the same OS installed can back up their entire drives to the same storage, only one copy of the system files needs to be stored, greatly reducing the storage space regardless of the number of machines. Attic still adopts the traditional approach of using a centralized indexing database to manage chunks and relies heavily on caching to improve performance. The presence of exclusive locking makes it hard to be extended to cloud storages.
[restic](https://restic.github.io) is a more recent addition. It uses a format similar to the git packfile format. Multiple clients backing up to the same storage are still guarded by
[locks](https://github.com/restic/restic/blob/master/doc/Design.md#locks), and because a chunk database is used, deduplication isn't real-time (different clients sharing the same files will upload different copies of the same chunks). A prune operation will completely block all other clients connected to the storage from doing their regular backups. Moreover, since most cloud storage services do not provide a locking service, the best effort is to use some basic file operations to simulate a lock, but distributed locking is known to be a hard problem and it is unclear how reliable restic's lock implementation is. A faulty implementation may cause a prune operation to accidentally delete data still in use, resulting in unrecoverable data loss. This is the exact problem that we avoided by taking the lock-free approach.

View File

@@ -147,6 +147,10 @@ func setGlobalOptions(context *cli.Context) {
duplicacy.SetLoggingLevel(duplicacy.DEBUG)
}
if context.GlobalBool("print-memory-usage") {
go duplicacy.PrintMemoryUsage()
}
ScriptEnabled = true
if context.GlobalBool("no-script") {
ScriptEnabled = false
@@ -212,15 +216,20 @@ func runScript(context *cli.Context, storageName string, phase string) bool {
return true
}
func loadRSAPrivateKey(keyFile string, preference *duplicacy.Preference, backupManager *duplicacy.BackupManager, resetPasswords bool) {
func loadRSAPrivateKey(keyFile string, passphrase string, preference *duplicacy.Preference, backupManager *duplicacy.BackupManager, resetPasswords bool) {
if keyFile == "" {
return
}
prompt := fmt.Sprintf("Enter the passphrase for %s:", keyFile)
passphrase := duplicacy.GetPassword(*preference, "rsa_passphrase", prompt, false, resetPasswords)
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
duplicacy.SavePassword(*preference, "rsa_passphrase", passphrase)
if passphrase == "" {
passphrase = duplicacy.GetPassword(*preference, "rsa_passphrase", prompt, false, resetPasswords)
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
duplicacy.SavePassword(*preference, "rsa_passphrase", passphrase)
} else {
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
}
}
func initRepository(context *cli.Context) {
@@ -269,6 +278,13 @@ func configRepository(context *cli.Context, init bool) {
}
}
snapshotIDRegex := regexp.MustCompile(`^[A-Za-z0-9_\-]+$`)
matched := snapshotIDRegex.FindStringSubmatch(snapshotID)
if matched == nil {
duplicacy.LOG_ERROR("PREFERENCE_INVALID", "'%s' is an invalid snapshot id", snapshotID)
return
}
var repository string
var err error
@@ -453,8 +469,26 @@ func configRepository(context *cli.Context, init bool) {
if iterations == 0 {
iterations = duplicacy.CONFIG_DEFAULT_ITERATIONS
}
dataShards := 0
parityShards := 0
shards := context.String("erasure-coding")
if shards != "" {
shardsRegex := regexp.MustCompile(`^([0-9]+):([0-9]+)$`)
matched := shardsRegex.FindStringSubmatch(shards)
if matched == nil {
duplicacy.LOG_ERROR("STORAGE_ERASURECODE", "Invalid erasure coding parameters: %s", shards)
} else {
dataShards, _ = strconv.Atoi(matched[1])
parityShards, _ = strconv.Atoi(matched[2])
if dataShards == 0 || dataShards > 256 || parityShards == 0 || parityShards > dataShards {
duplicacy.LOG_ERROR("STORAGE_ERASURECODE", "Invalid erasure coding parameters: %s", shards)
}
}
}
duplicacy.ConfigStorage(storage, iterations, compressionLevel, averageChunkSize, maximumChunkSize,
minimumChunkSize, storagePassword, otherConfig, bitCopy, context.String("key"))
minimumChunkSize, storagePassword, otherConfig, bitCopy, context.String("key"), dataShards, parityShards)
}
duplicacy.Preferences = append(duplicacy.Preferences, preference)
@@ -560,6 +594,11 @@ func setPreference(context *cli.Context) {
newPreference.FiltersFile = context.String("filters")
}
triBool = context.Generic("exclude-by-attribute").(*TriBool)
if triBool.IsSet() {
newPreference.ExcludeByAttribute = triBool.IsTrue()
}
key := context.String("key")
value := context.String("value")
@@ -741,12 +780,15 @@ func backupRepository(context *cli.Context) {
uploadRateLimit := context.Int("limit-rate")
enumOnly := context.Bool("enum-only")
storage.SetRateLimits(0, uploadRateLimit)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile, preference.ExcludeByAttribute)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SetDryRun(dryRun)
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS, vssTimeout, enumOnly)
metadataChunkSize := context.Int("metadata-chunk-size")
maximumInMemoryEntries := context.Int("max-in-memory-entries")
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS, vssTimeout, enumOnly, metadataChunkSize, maximumInMemoryEntries)
runScript(context, preference.Name, "post")
}
@@ -794,6 +836,7 @@ func restoreRepository(context *cli.Context) {
setOwner := !context.Bool("ignore-owner")
showStatistics := context.Bool("stats")
persist := context.Bool("persist")
var patterns []string
for _, pattern := range context.Args() {
@@ -818,13 +861,17 @@ func restoreRepository(context *cli.Context) {
duplicacy.LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
storage.SetRateLimits(context.Int("limit-rate"), 0)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile, preference.ExcludeByAttribute)
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, setOwner, showStatistics, patterns)
failed := backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, setOwner, showStatistics, patterns, persist)
if failed > 0 {
duplicacy.LOG_ERROR("RESTORE_FAIL", "%d file(s) were not restored correctly", failed)
return
}
runScript(context, preference.Name, "post")
}
@@ -860,7 +907,7 @@ func listSnapshots(context *cli.Context) {
tag := context.String("t")
revisions := getRevisions(context)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", preference.ExcludeByAttribute)
duplicacy.SavePassword(*preference, "password", password)
id := preference.SnapshotID
@@ -874,7 +921,7 @@ func listSnapshots(context *cli.Context) {
showChunks := context.Bool("chunks")
// list doesn't need to decrypt file chunks; but we need -key here so we can reset the passphrase for the private key
loadRSAPrivateKey(context.String("key"), preference, backupManager, resetPassword)
loadRSAPrivateKey(context.String("key"), "", preference, backupManager, resetPassword)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.ListSnapshots(id, revisions, tag, showFiles, showChunks)
@@ -916,10 +963,10 @@ func checkSnapshots(context *cli.Context) {
tag := context.String("t")
revisions := getRevisions(context)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
id := preference.SnapshotID
if context.Bool("all") {
@@ -934,9 +981,10 @@ func checkSnapshots(context *cli.Context) {
checkChunks := context.Bool("chunks")
searchFossils := context.Bool("fossils")
resurrect := context.Bool("resurrect")
persist := context.Bool("persist")
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, checkChunks, searchFossils, resurrect, threads)
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, checkChunks, searchFossils, resurrect, threads, persist)
runScript(context, preference.Name, "post")
}
@@ -974,10 +1022,11 @@ func printFile(context *cli.Context) {
snapshotID = context.String("id")
}
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
backupManager.SetupSnapshotCache(preference.Name)
@@ -1032,13 +1081,13 @@ func diff(context *cli.Context) {
}
compareByHash := context.Bool("hash")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile, preference.FiltersFile)
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile, preference.FiltersFile, preference.ExcludeByAttribute)
runScript(context, preference.Name, "post")
}
@@ -1077,7 +1126,7 @@ func showHistory(context *cli.Context) {
revisions := getRevisions(context)
showLocalHash := context.Bool("hash")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
@@ -1140,7 +1189,7 @@ func pruneSnapshots(context *cli.Context) {
os.Exit(ArgumentExitCode)
}
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "", false)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
@@ -1160,9 +1209,14 @@ func copySnapshots(context *cli.Context) {
os.Exit(ArgumentExitCode)
}
threads := context.Int("threads")
if threads < 1 {
threads = 1
uploadingThreads := context.Int("threads")
if uploadingThreads < 1 {
uploadingThreads = 1
}
downloadingThreads := context.Int("download-threads")
if downloadingThreads < 1 {
downloadingThreads = 1
}
repository, source := getRepositoryPreference(context, context.String("from"))
@@ -1170,7 +1224,7 @@ func copySnapshots(context *cli.Context) {
runScript(context, source.Name, "pre")
duplicacy.LOG_INFO("STORAGE_SET", "Source storage set to %s", source.StorageURL)
sourceStorage := duplicacy.CreateStorage(*source, false, threads)
sourceStorage := duplicacy.CreateStorage(*source, false, downloadingThreads)
if sourceStorage == nil {
return
}
@@ -1180,11 +1234,11 @@ func copySnapshots(context *cli.Context) {
sourcePassword = duplicacy.GetPassword(*source, "password", "Enter source storage password:", false, false)
}
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, "", "")
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, "", "", false)
sourceManager.SetupSnapshotCache(source.Name)
duplicacy.SavePassword(*source, "password", sourcePassword)
loadRSAPrivateKey(context.String("key"), source, sourceManager, false)
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), source, sourceManager, false)
_, destination := getRepositoryPreference(context, context.String("to"))
@@ -1200,7 +1254,7 @@ func copySnapshots(context *cli.Context) {
}
duplicacy.LOG_INFO("STORAGE_SET", "Destination storage set to %s", destination.StorageURL)
destinationStorage := duplicacy.CreateStorage(*destination, false, threads)
destinationStorage := duplicacy.CreateStorage(*destination, false, uploadingThreads)
if destinationStorage == nil {
return
}
@@ -1215,7 +1269,7 @@ func copySnapshots(context *cli.Context) {
destinationStorage.SetRateLimits(0, context.Int("upload-limit-rate"))
destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository,
destinationPassword, "", "")
destinationPassword, "", "", false)
duplicacy.SavePassword(*destination, "password", destinationPassword)
destinationManager.SetupSnapshotCache(destination.Name)
@@ -1225,7 +1279,7 @@ func copySnapshots(context *cli.Context) {
snapshotID = context.String("id")
}
sourceManager.CopySnapshots(destinationManager, snapshotID, revisions, threads)
sourceManager.CopySnapshots(destinationManager, snapshotID, revisions, uploadingThreads, downloadingThreads)
runScript(context, source.Name, "post")
}
@@ -1398,6 +1452,11 @@ func main() {
Usage: "the RSA public key to encrypt file chunks",
Argument: "<public key>",
},
cli.StringFlag{
Name: "erasure-coding",
Usage: "enable erasure coding to protect against storage corruption",
Argument: "<data shards>:<parity shards>",
},
},
Usage: "Initialize the storage if necessary and the current directory as the repository",
ArgsUsage: "<snapshot id> <storage url>",
@@ -1454,6 +1513,19 @@ func main() {
Name: "enum-only",
Usage: "enumerate the repository recursively and then exit",
},
cli.IntFlag{
Name: "metadata-chunk-size",
Value: 1024 * 1024,
Usage: "the average size of metadata chunks (defaults to 1M)",
Argument: "<size>",
},
cli.IntFlag{
Name: "max-in-memory-entries",
Value: 1024 * 1024,
Usage: "the maximum number of entries kept in memory (defaults to 1M)",
Argument: "<number>",
},
},
Usage: "Save a snapshot of the repository to the storage",
ArgsUsage: " ",
@@ -1510,6 +1582,15 @@ func main() {
Usage: "the RSA private key to decrypt file chunks",
Argument: "<private key>",
},
cli.BoolFlag{
Name: "persist",
Usage: "continue processing despite chunk errors or existing files (without -overwrite), reporting any affected files",
},
cli.StringFlag{
Name: "key-passphrase",
Usage: "the passphrase to decrypt the RSA private key",
Argument: "<private key passphrase>",
},
},
Usage: "Restore the repository to a previously saved snapshot",
ArgsUsage: "[--] [pattern] ...",
@@ -1621,12 +1702,21 @@ func main() {
Usage: "the RSA private key to decrypt file chunks",
Argument: "<private key>",
},
cli.StringFlag{
Name: "key-passphrase",
Usage: "the passphrase to decrypt the RSA private key",
Argument: "<private key passphrase>",
},
cli.IntFlag{
Name: "threads",
Value: 1,
Usage: "number of threads used to verify chunks",
Argument: "<n>",
},
cli.BoolFlag{
Name: "persist",
Usage: "continue processing despite chunk errors, reporting any affected (corrupted) files",
},
},
Usage: "Check the integrity of snapshots",
ArgsUsage: " ",
@@ -1655,6 +1745,11 @@ func main() {
Usage: "the RSA private key to decrypt file chunks",
Argument: "<private key>",
},
cli.StringFlag{
Name: "key-passphrase",
Usage: "the passphrase to decrypt the RSA private key",
Argument: "<private key passphrase>",
},
},
Usage: "Print to stdout the specified file, or the snapshot content if no file is specified",
ArgsUsage: "[<file>]",
@@ -1688,6 +1783,11 @@ func main() {
Usage: "the RSA private key to decrypt file chunks",
Argument: "<private key>",
},
cli.StringFlag{
Name: "key-passphrase",
Usage: "the passphrase to decrypt the RSA private key",
Argument: "<private key passphrase>",
},
},
Usage: "Compare two snapshots or two revisions of a file",
ArgsUsage: "[<file>]",
@@ -1857,6 +1957,11 @@ func main() {
Usage: "the RSA public key to encrypt file chunks",
Argument: "<public key>",
},
cli.StringFlag{
Name: "erasure-coding",
Usage: "enable erasure coding to protect against storage corruption",
Argument: "<data shards>:<parity shards>",
},
},
Usage: "Add an additional storage to be used for the existing repository",
ArgsUsage: "<storage name> <snapshot id> <storage url>",
@@ -1896,6 +2001,12 @@ func main() {
Argument: "<file name>",
Value: "",
},
cli.GenericFlag{
Name: "exclude-by-attribute",
Usage: "Exclude files based on file attributes. (macOS only, com_apple_backup_excludeItem)",
Value: &TriBool{},
Arg: "true",
},
cli.StringFlag{
Name: "key",
Usage: "add a key/password whose value is supplied by the -value option",
@@ -1960,11 +2071,22 @@ func main() {
Usage: "number of uploading threads",
Argument: "<n>",
},
cli.IntFlag{
Name: "download-threads",
Value: 1,
Usage: "number of downloading threads",
Argument: "<n>",
},
cli.StringFlag{
Name: "key",
Usage: "the RSA private key to decrypt file chunks from the source storage",
Argument: "<private key>",
},
cli.StringFlag{
Name: "key-passphrase",
Usage: "the passphrase to decrypt the RSA private key",
Argument: "<private key passphrase>",
},
},
Usage: "Copy snapshots between compatible storages",
ArgsUsage: " ",
@@ -2078,13 +2200,23 @@ func main() {
Usage: "suppress logs with the specified id",
Argument: "<id>",
},
cli.BoolFlag{
Name: "print-memory-usage",
Usage: "print memory usage every second",
},
}
app.HideVersion = true
app.Name = "duplicacy"
app.HelpName = "duplicacy"
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
app.Version = "2.5.2" + " (" + GitCommit + ")"
app.Version = "3.0.1" + " (" + GitCommit + ")"
// Exit with code 2 if an invalid command is provided
app.CommandNotFound = func(context *cli.Context, command string) {
fmt.Fprintf(context.App.Writer, "Invalid command: %s\n", command)
os.Exit(2)
}
// If the program is interrupted, call the RunAtError function.
c := make(chan os.Signal, 1)

BIN
duplicacy_paper.pdf Normal file

Binary file not shown.

68
go.mod Normal file
View File

@@ -0,0 +1,68 @@
module github.com/gilbertchen/duplicacy
go 1.16
require (
cloud.google.com/go v0.38.0
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a
github.com/aws/aws-sdk-go v1.30.7
github.com/bkaradzic/go-lz4 v1.0.0
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible
github.com/gilbertchen/cli v1.2.1-0.20160223210219-1de0a1836ce9
github.com/gilbertchen/go-dropbox v0.0.0-20221004154447-61204091e804
github.com/gilbertchen/go-ole v1.2.0
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2
github.com/gilbertchen/gopass v0.0.0-20170109162249-bf9dde6d0d2c
github.com/gilbertchen/keyring v0.0.0-20221004152639-1661cbebc508
github.com/gilbertchen/xattr v0.0.0-20160926155429-68e7a6806b01
github.com/klauspost/reedsolomon v1.9.9
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
github.com/minio/highwayhash v1.0.1
github.com/ncw/swift/v2 v2.0.1
github.com/pkg/sftp v1.11.0
github.com/pkg/xattr v0.4.1
github.com/vmihailenco/msgpack v4.0.4+incompatible
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
google.golang.org/api v0.21.0
storj.io/uplink v1.9.0
)
require (
github.com/Azure/go-autorest v10.15.5+incompatible // indirect
github.com/calebcase/tmpfile v1.0.3 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
github.com/dnaeon/go-vcr v1.2.0 // indirect
github.com/goamz/goamz v0.0.0-20180131231218-8b901b531db8 // indirect
github.com/godbus/dbus v4.1.0+incompatible // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/jmespath/go-jmespath v0.3.0 // indirect
github.com/klauspost/cpuid v1.3.1 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/marstr/guid v1.1.0 // indirect
github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/satori/go.uuid v1.2.0 // indirect
github.com/segmentio/go-env v1.1.0 // indirect
github.com/spacemonkeygo/monkit/v3 v3.0.17 // indirect
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec // indirect
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 // indirect
github.com/zeebo/errs v1.3.0 // indirect
go.opencensus.io v0.22.3 // indirect
golang.org/x/mod v0.4.2 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/tools v0.1.1 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.5 // indirect
google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d // indirect
google.golang.org/grpc v1.28.1 // indirect
storj.io/common v0.0.0-20220414110316-a5cb7172d6bf // indirect
storj.io/drpc v0.0.30 // indirect
)

459
go.sum Normal file
View File

@@ -0,0 +1,459 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/Azure/go-autorest v10.15.5+incompatible h1:vdxx6wM1rVkKt/3niByPVjguoLWkWImOcJNvEykgBzY=
github.com/Azure/go-autorest v10.15.5+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a h1:pv34s756C4pEXnjgPfGYgdhg/ZdajGhyOvzx8k+23nw=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
github.com/aws/aws-sdk-go v1.30.7 h1:IaXfqtioP6p9SFAnNfsqdNczbR5UNbYqvcZUSsCAdTY=
github.com/aws/aws-sdk-go v1.30.7/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk=
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/calebcase/tmpfile v1.0.3 h1:BZrOWZ79gJqQ3XbAQlihYZf/YCV0H4KPIdM5K5oMpJo=
github.com/calebcase/tmpfile v1.0.3/go.mod h1:UAUc01aHeC+pudPagY/lWvt2qS9ZO5Zzof6/tIUzqeI=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible h1:2fZxTUw5D9uGWnYTsU/obVavn+1qTF+TsVok3U8uN2Q=
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible/go.mod h1:qsVRCpBUm2l0eMUeI9wZ47yzra2+lv2YkGhMZpzBVUc=
github.com/gilbertchen/cli v1.2.1-0.20160223210219-1de0a1836ce9 h1:uMgtTp4sRJ7kMQMF3xEKeFntf3XatwkLNL/byj8v97g=
github.com/gilbertchen/cli v1.2.1-0.20160223210219-1de0a1836ce9/go.mod h1:WOnN3JdZiZwUaYtLH2DRxe5PpD43wuOIvc/Wem/39M0=
github.com/gilbertchen/go-dropbox v0.0.0-20221004154447-61204091e804 h1:JZ0P02xoeaITbKLFAdBfiH8SNNvKGE2Y/RLdYtWoEVE=
github.com/gilbertchen/go-dropbox v0.0.0-20221004154447-61204091e804/go.mod h1:85+2CRHC/klHy4vEM+TYtbhDo2wMjPa4JNdVzUHsDIk=
github.com/gilbertchen/go-ole v1.2.0 h1:ay65uwxo6w8UVOxN0+fuCqUXGaXxbmkGs5m4uY6e1Zw=
github.com/gilbertchen/go-ole v1.2.0/go.mod h1:NNiozp7QxhyGmHxxNdFKIcVaINvJFTAjBJ2gYzh8fsg=
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2 h1:VDPwi3huqeJBtymgLOvPAP4S2gbSSK/UrWVwRbRAmnw=
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2/go.mod h1:AoxJeh8meXUrSWBLiq9BJvYMd9RAAGgEUU0gSkNedRY=
github.com/gilbertchen/gopass v0.0.0-20170109162249-bf9dde6d0d2c h1:0SR0aXvil/eQReU0olxp/j04B+Y/47fjDMotIxaAgKo=
github.com/gilbertchen/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:HDsXH7AAfDsfYYX0te4zsNbnwVvZ2RtLEOCjN4y84jw=
github.com/gilbertchen/keyring v0.0.0-20221004152639-1661cbebc508 h1:SqTyk5KkNXp7zTdTttIZSDcTrL5uau4K/2OpKvgBZVI=
github.com/gilbertchen/keyring v0.0.0-20221004152639-1661cbebc508/go.mod h1:w/pisxUZezf2XzU9Ewjphcf6q1mZtOzKPHhJiuc8cag=
github.com/gilbertchen/xattr v0.0.0-20160926155429-68e7a6806b01 h1:LqwS9qL6SrDkp0g0iwUkETrDdtB9gTKaIbSn9imUq5o=
github.com/gilbertchen/xattr v0.0.0-20160926155429-68e7a6806b01/go.mod h1:TMlibuxKfkdtHyltooAw7+DHqRpaXs9nxaffk00Sh1Q=
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/goamz/goamz v0.0.0-20180131231218-8b901b531db8 h1:G1U0vew/vA/1/hBmf1XNeyIzJJbPFVv+kb+HPl6rj6c=
github.com/goamz/goamz v0.0.0-20180131231218-8b901b531db8/go.mod h1:/Ya1YZsqLQp17bDgHdyE9/XBR1uIH1HKasTvLxcoM/A=
github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4=
github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20211108044417-e9b028704de0 h1:rsq1yB2xiFLDYYaYdlGBsSkwVzsCo500wMhxvW5A/bk=
github.com/google/pprof v0.0.0-20211108044417-e9b028704de0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/cpuid v1.2.4/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
github.com/klauspost/reedsolomon v1.9.9 h1:qCL7LZlv17xMixl55nq2/Oa1Y86nfO8EqDfv2GHND54=
github.com/klauspost/reedsolomon v1.9.9/go.mod h1:O7yFFHiQwDR6b2t63KPUpccPtNdp5ADgh1gg4fd12wo=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lucas-clemente/quic-go v0.25.0/go.mod h1:YtzP8bxRVCBlO77yRanE264+fY/T2U9ZlW1AaHOsMOg=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I=
github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk=
github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8=
github.com/marten-seemann/qtls-go1-18 v0.1.0-beta.1/go.mod h1:PUhIQk19LoFt2174H4+an8TYvWOGjb/hHwphBeaDHwI=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0=
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104 h1:ULR/QWMgcgRiZLUjSSJMU+fW+RDMstRdmnDWj9Q+AsA=
github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104/go.mod h1:wqKykBG2QzQDJEzvRkcS8x6MiSJkF52hXZsXcjaB3ls=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/ncw/swift/v2 v2.0.1 h1:q1IN8hNViXEv8Zvg3Xdis4a3c4IlIGezkYz09zQL5J0=
github.com/ncw/swift/v2 v2.0.1/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg=
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.11.0 h1:4Zv0OGbpkg4yNuUtH0s8rvoYxRCNyT29NVUo6pgPmxI=
github.com/pkg/sftp v1.11.0/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/xattr v0.4.1 h1:dhclzL6EqOXNaPDWqoeb9tIxATfBSmjqL0b4DpSjwRw=
github.com/pkg/xattr v0.4.1/go.mod h1:W2cGD0TBEus7MkUgv0tNZ9JutLtVO3cXu+IBRuHqnFs=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/segmentio/go-env v1.1.0 h1:AGJ7OnCx9M5NWpkYPGYELS6III/pFSnAs1GvKWStiEo=
github.com/segmentio/go-env v1.1.0/go.mod h1:pEKO2ieHe8zF098OMaAHw21SajMuONlnI/vJNB3pB7I=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
github.com/spacemonkeygo/monkit/v3 v3.0.17 h1:rqIuLhRUr2UtS3WNVbPY/BwvjlwKVvSOVY5p0QVocxE=
github.com/spacemonkeygo/monkit/v3 v3.0.17/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4=
github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec h1:DGmKwyZwEB8dI7tbLt/I/gQuP559o/0FrAkHKlQM/Ks=
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw=
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 h1:zMsHhfK9+Wdl1F7sIKLyx3wrOFofpb3rWFbA4HgcK5k=
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw7ElaGSLOZUSwBm/GgVwMd30jWxBDdAyMOeTuc=
github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI=
github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/zeebo/admission/v3 v3.0.3/go.mod h1:2OWyAS5yo0Xvj2AEUosOjTUHxaY0oIIiCrXGKCYzWpo=
github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs=
github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
github.com/zeebo/float16 v0.1.0/go.mod h1:fssGvvXu+XS8MH57cKmyrLB/cqioYeYX/2mXCN3a5wo=
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54/go.mod h1:EI8LcOBDlSL3POyqwC1eJhOYlMBMidES+613EtmmT5w=
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
golang.org/x/arch v0.0.0-20190909030613-46d78d1859ac/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4=
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 h1:71vQrMauZZhcTVK6KdYM+rklehEEwb3E+ZhaE5jrPrE=
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181021155630-eda9bb28ed51/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200425043458-8463f397d07c/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.21.0 h1:zS+Q/CJJnVlXpXQVIz+lH0ZT2lBuT2ac7XD8Y/3w6hY=
google.golang.org/api v0.21.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d h1:I7Vuu5Ejagca+VcgfBINHke3xwjCTYnIG4Q57fv0wYY=
google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.1 h1:C1QC6KzgSiLyBabDi87BbjaGreoRgGUF5nOyvfrAZ1k=
google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
storj.io/common v0.0.0-20220414110316-a5cb7172d6bf h1:D5xZTDOlTTQWdAWeKKm2pFLcz1sceH+f/pVAcYB9jL8=
storj.io/common v0.0.0-20220414110316-a5cb7172d6bf/go.mod h1:LBJrpAqL4MNSrhGEwc8SJ+tIVtgfCtFEZqDy6/0j67A=
storj.io/drpc v0.0.30 h1:jqPe4T9KEu3CDBI05A2hCMgMSHLtd/E0N0yTF9QreIE=
storj.io/drpc v0.0.30/go.mod h1:6rcOyR/QQkSTX/9L5ZGtlZaE2PtXTTZl8d+ulSeeYEg=
storj.io/uplink v1.9.0 h1:Zg1kX1VqOQIKm0yAukteKpLuT68Be3euyNRML612ERM=
storj.io/uplink v1.9.0/go.mod h1:f6D8306j5mnRHnPDKWCiwtPM6ukyGg77to9LaAY9l6k=

File diff suppressed because it is too large Load Diff

View File

@@ -169,6 +169,12 @@ func getFileHash(path string) (hash string) {
return hex.EncodeToString(hasher.Sum(nil))
}
func assertRestoreFailures(t *testing.T, failedFiles int, expectedFailedFiles int) {
if failedFiles != expectedFailedFiles {
t.Errorf("Failed to restore %d instead of %d file(s)", failedFiles, expectedFailedFiles)
}
}
func TestBackupManager(t *testing.T) {
rand.Seed(time.Now().UnixNano())
@@ -226,12 +232,20 @@ func TestBackupManager(t *testing.T) {
cleanStorage(storage)
time.Sleep(time.Duration(delay) * time.Second)
if testFixedChunkSize {
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "") {
dataShards := 0
parityShards := 0
if *testErasureCoding {
dataShards = 5
parityShards = 2
}
if *testFixedChunkSize {
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "", dataShards, parityShards) {
t.Errorf("Failed to initialize the storage")
}
} else {
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false, "") {
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false, "", dataShards, parityShards) {
t.Errorf("Failed to initialize the storage")
}
}
@@ -239,15 +253,16 @@ func TestBackupManager(t *testing.T) {
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager := CreateBackupManager("host1", storage, testDir, password, "", "")
backupManager := CreateBackupManager("host1", storage, testDir, password, "", "", false)
backupManager.SetupSnapshotCache("default")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false, 1024, 1024)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
failedFiles := backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
assertRestoreFailures(t, failedFiles, 0)
for _, f := range []string{"file1", "file2", "dir1/file3"} {
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
@@ -267,11 +282,12 @@ func TestBackupManager(t *testing.T) {
modifyFile(testDir+"/repository1/dir1/file3", 0.3)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false, 1024, 1024)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
failedFiles = backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
assertRestoreFailures(t, failedFiles, 0)
for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f)
@@ -287,7 +303,7 @@ func TestBackupManager(t *testing.T) {
os.Mkdir(testDir+"/repository1/dir2/dir3", 0700)
os.Mkdir(testDir+"/repository1/dir4", 0700)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "third", false, false, 0, false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "third", false, false, 0, false, 1024, 1024)
time.Sleep(time.Duration(delay) * time.Second)
// Create some directories and files under repository2 that will be deleted during restore
@@ -298,8 +314,9 @@ func TestBackupManager(t *testing.T) {
createRandomFile(testDir+"/repository2/dir5/file5", 100)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ true /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
failedFiles = backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ true /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
assertRestoreFailures(t, failedFiles, 0)
for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f)
@@ -325,8 +342,9 @@ func TestBackupManager(t *testing.T) {
os.Remove(testDir + "/repository1/file2")
os.Remove(testDir + "/repository1/dir1/file3")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"})
failedFiles = backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"} /*allowFailures=*/, false)
assertRestoreFailures(t, failedFiles, 0)
for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f)
@@ -341,7 +359,7 @@ func TestBackupManager(t *testing.T) {
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
}
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1, 2, 3} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1)
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, []int{1} /*tags*/, nil /*retentions*/, nil,
/*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
@@ -349,8 +367,8 @@ func TestBackupManager(t *testing.T) {
t.Errorf("Expected 2 snapshots but got %d", numberOfSnapshots)
}
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false)
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false, 1024, 1024)
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, nil /*tags*/, nil /*retentions*/, nil,
/*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
@@ -358,9 +376,348 @@ func TestBackupManager(t *testing.T) {
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
}
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3, 4} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1)
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
/*buf := make([]byte, 1<<16)
runtime.Stack(buf, true)
fmt.Printf("%s", buf)*/
}
// Create file with random file with certain seed
func createRandomFileSeeded(path string, maxSize int, seed int64) {
rand.Seed(seed)
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
LOG_ERROR("RANDOM_FILE", "Can't open %s for writing: %v", path, err)
return
}
defer file.Close()
size := maxSize/2 + rand.Int()%(maxSize/2)
buffer := make([]byte, 32*1024)
for size > 0 {
bytes := size
if bytes > cap(buffer) {
bytes = cap(buffer)
}
rand.Read(buffer[:bytes])
bytes, err = file.Write(buffer[:bytes])
if err != nil {
LOG_ERROR("RANDOM_FILE", "Failed to write to %s: %v", path, err)
return
}
size -= bytes
}
}
func corruptFile(path string, start int, length int, seed int64) {
rand.Seed(seed)
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
if err != nil {
LOG_ERROR("CORRUPT_FILE", "Can't open %s for writing: %v", path, err)
return
}
defer func() {
if file != nil {
file.Close()
}
}()
_, err = file.Seek(int64(start), 0)
if err != nil {
LOG_ERROR("CORRUPT_FILE", "Can't seek to the offset %d: %v", start, err)
return
}
buffer := make([]byte, length)
rand.Read(buffer)
_, err = file.Write(buffer)
if err != nil {
LOG_ERROR("CORRUPT_FILE", "Failed to write to %s: %v", path, err)
return
}
}
func TestPersistRestore(t *testing.T) {
// We want deterministic output here so we can test the expected files are corrupted by missing or corrupt chunks
// There use rand functions with fixed seed, and known keys
setTestingT(t)
SetLoggingLevel(INFO)
defer func() {
if r := recover(); r != nil {
switch e := r.(type) {
case Exception:
t.Errorf("%s %s", e.LogID, e.Message)
debug.PrintStack()
default:
t.Errorf("%v", e)
debug.PrintStack()
}
}
}()
testDir := path.Join(os.TempDir(), "duplicacy_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
os.Mkdir(testDir+"/repository1", 0700)
os.Mkdir(testDir+"/repository1/dir1", 0700)
os.Mkdir(testDir+"/repository1/.duplicacy", 0700)
os.Mkdir(testDir+"/repository2", 0700)
os.Mkdir(testDir+"/repository2/.duplicacy", 0700)
os.Mkdir(testDir+"/repository3", 0700)
os.Mkdir(testDir+"/repository3/.duplicacy", 0700)
maxFileSize := 1000000
//maxFileSize := 200000
createRandomFileSeeded(testDir+"/repository1/file1", maxFileSize,1)
createRandomFileSeeded(testDir+"/repository1/file2", maxFileSize,2)
createRandomFileSeeded(testDir+"/repository1/dir1/file3", maxFileSize,3)
threads := 1
password := "duplicacy"
// We want deterministic output, plus ability to test encrypted storage
// So make unencrypted storage with default keys, and encrypted as bit-identical copy of this but with password
unencStorage, err := loadStorage(testDir+"/unenc_storage", threads)
if err != nil {
t.Errorf("Failed to create storage: %v", err)
return
}
delay := 0
if _, ok := unencStorage.(*ACDStorage); ok {
delay = 1
}
if _, ok := unencStorage.(*OneDriveStorage); ok {
delay = 5
}
time.Sleep(time.Duration(delay) * time.Second)
cleanStorage(unencStorage)
if !ConfigStorage(unencStorage, 16384, 100, 64*1024, 256*1024, 16*1024, "", nil, false, "", 0, 0) {
t.Errorf("Failed to initialize the unencrypted storage")
}
time.Sleep(time.Duration(delay) * time.Second)
unencConfig, _, err := DownloadConfig(unencStorage, "")
if err != nil {
t.Errorf("Failed to download storage config: %v", err)
return
}
// Make encrypted storage
storage, err := loadStorage(testDir+"/enc_storage", threads)
if err != nil {
t.Errorf("Failed to create encrypted storage: %v", err)
return
}
time.Sleep(time.Duration(delay) * time.Second)
cleanStorage(storage)
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, unencConfig, true, "", 0, 0) {
t.Errorf("Failed to initialize the encrypted storage")
}
time.Sleep(time.Duration(delay) * time.Second)
// do unencrypted backup
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
unencBackupManager := CreateBackupManager("host1", unencStorage, testDir, "", "", "", false)
unencBackupManager.SetupSnapshotCache("default")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
unencBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false, 1024, 1024)
time.Sleep(time.Duration(delay) * time.Second)
// do encrypted backup
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
encBackupManager := CreateBackupManager("host1", storage, testDir, password, "", "", false)
encBackupManager.SetupSnapshotCache("default")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
encBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false, 1024, 1024)
time.Sleep(time.Duration(delay) * time.Second)
// check snapshots
unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, false)
encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, false)
// check functions
checkAllUncorrupted := func(cmpRepository string) {
for _, f := range []string{"file1", "file2", "dir1/file3"} {
if _, err := os.Stat(testDir + cmpRepository + "/" + f); os.IsNotExist(err) {
t.Errorf("File %s does not exist", f)
continue
}
hash1 := getFileHash(testDir + "/repository1/" + f)
hash2 := getFileHash(testDir + cmpRepository + "/" + f)
if hash1 != hash2 {
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
}
}
}
checkMissingFile := func(cmpRepository string, expectMissing string) {
for _, f := range []string{"file1", "file2", "dir1/file3"} {
_, err := os.Stat(testDir + cmpRepository + "/" + f)
if err==nil {
if f==expectMissing {
t.Errorf("File %s exists, expected to be missing", f)
}
continue
}
if os.IsNotExist(err) {
if f!=expectMissing {
t.Errorf("File %s does not exist", f)
}
continue
}
hash1 := getFileHash(testDir + "/repository1/" + f)
hash2 := getFileHash(testDir + cmpRepository + "/" + f)
if hash1 != hash2 {
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
}
}
}
checkCorruptedFile := func(cmpRepository string, expectCorrupted string) {
for _, f := range []string{"file1", "file2", "dir1/file3"} {
if _, err := os.Stat(testDir + cmpRepository + "/" + f); os.IsNotExist(err) {
t.Errorf("File %s does not exist", f)
continue
}
hash1 := getFileHash(testDir + "/repository1/" + f)
hash2 := getFileHash(testDir + cmpRepository + "/" + f)
if (f==expectCorrupted) {
if hash1 == hash2 {
t.Errorf("File %s has same hashes, expected to be corrupted: %s vs %s", f, hash1, hash2)
}
} else {
if hash1 != hash2 {
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
}
}
}
}
// test restore all uncorrupted to repository3
SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy")
failedFiles := unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
assertRestoreFailures(t, failedFiles, 0)
checkAllUncorrupted("/repository3")
// test for corrupt files and -persist
// corrupt a chunk
chunkToCorrupt1 := "/4d/538e5dfd2b08e782bfeb56d1360fb5d7eb9d8c4b2531cc2fca79efbaec910c"
// this should affect file1
chunkToCorrupt2 := "/2b/f953a766d0196ce026ae259e76e3c186a0e4bcd3ce10f1571d17f86f0a5497"
// this should affect dir1/file3
for i := 0; i < 2; i++ {
if i==0 {
// test corrupt chunks
corruptFile(testDir+"/unenc_storage"+"/chunks"+chunkToCorrupt1, 128, 128, 4)
corruptFile(testDir+"/enc_storage"+"/chunks"+chunkToCorrupt2, 128, 128, 4)
} else {
// test missing chunks
os.Remove(testDir+"/unenc_storage"+"/chunks"+chunkToCorrupt1)
os.Remove(testDir+"/enc_storage"+"/chunks"+chunkToCorrupt2)
}
// check snapshots with --persist (allowFailures == true)
// this would cause a panic and os.Exit from duplicacy_log if allowFailures == false
unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, true)
encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, true)
// test restore corrupted, inPlace = true, corrupted files will have hash failures
os.RemoveAll(testDir+"/repository2")
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
assertRestoreFailures(t, failedFiles, 1)
// check restore, expect file1 to be corrupted
checkCorruptedFile("/repository2", "file1")
os.RemoveAll(testDir+"/repository2")
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
assertRestoreFailures(t, failedFiles, 1)
// check restore, expect file3 to be corrupted
checkCorruptedFile("/repository2", "dir1/file3")
//SetLoggingLevel(DEBUG)
// test restore corrupted, inPlace = false, corrupted files will be missing
os.RemoveAll(testDir+"/repository2")
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
assertRestoreFailures(t, failedFiles, 1)
// check restore, expect file1 to be corrupted
checkMissingFile("/repository2", "file1")
os.RemoveAll(testDir+"/repository2")
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
assertRestoreFailures(t, failedFiles, 1)
// check restore, expect file3 to be corrupted
checkMissingFile("/repository2", "dir1/file3")
// test restore corrupted files from different backups, inPlace = true
// with overwrite=true, corrupted file1 from unenc will be restored correctly from enc
// the latter will not touch the existing file3 with correct hash
os.RemoveAll(testDir+"/repository2")
failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
assertRestoreFailures(t, failedFiles, 1)
failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
assertRestoreFailures(t, failedFiles, 0)
checkAllUncorrupted("/repository2")
// restore to repository3, with overwrite and allowFailures (true/false), quickMode = false (use hashes)
// should always succeed as uncorrupted files already exist with correct hash, so these will be ignored
SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy")
failedFiles = unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
assertRestoreFailures(t, failedFiles, 0)
checkAllUncorrupted("/repository3")
failedFiles = unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
assertRestoreFailures(t, failedFiles, 0)
checkAllUncorrupted("/repository3")
}
}

View File

@@ -29,29 +29,29 @@ func benchmarkSplit(reader *bytes.Reader, fileSize int64, chunkSize int, compres
config.HashKey = DEFAULT_KEY
config.IDKey = DEFAULT_KEY
maker := CreateChunkMaker(config, false)
maker := CreateFileChunkMaker(config, false)
startTime := float64(time.Now().UnixNano()) / 1e9
numberOfChunks := 0
reader.Seek(0, os.SEEK_SET)
maker.ForEachChunk(reader,
func(chunk *Chunk, final bool) {
if compression {
key := ""
if encryption {
key = "0123456789abcdef0123456789abcdef"
}
err := chunk.Encrypt([]byte(key), "", false)
if err != nil {
LOG_ERROR("BENCHMARK_ENCRYPT", "Failed to encrypt the chunk: %v", err)
}
chunkFunc := func(chunk *Chunk) {
if compression {
key := ""
if encryption {
key = "0123456789abcdef0123456789abcdef"
}
config.PutChunk(chunk)
numberOfChunks++
},
func(size int64, hash string) (io.Reader, bool) {
return nil, false
})
err := chunk.Encrypt([]byte(key), "", false)
if err != nil {
LOG_ERROR("BENCHMARK_ENCRYPT", "Failed to encrypt the chunk: %v", err)
}
}
config.PutChunk(chunk)
numberOfChunks++
}
maker.AddData(reader, chunkFunc)
maker.AddData(nil, chunkFunc)
runningTime := float64(time.Now().UnixNano())/1e9 - startTime
speed := int64(float64(fileSize) / runningTime)

View File

@@ -22,6 +22,8 @@ import (
"runtime"
"github.com/bkaradzic/go-lz4"
"github.com/minio/highwayhash"
"github.com/klauspost/reedsolomon"
)
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
@@ -63,16 +65,20 @@ type Chunk struct {
config *Config // Every chunk is associated with a Config object. Which hashing algorithm to use is determined
// by the config
isSnapshot bool // Indicates if the chunk is a snapshot chunk (instead of a file chunk). This is only used by RSA
// encryption, where a snapshot chunk is not encrypted by RSA
isMetadata bool // Indicates if the chunk is a metadata chunk (instead of a file chunk). This is primarily used by RSA
// encryption, where a metadata chunk is not encrypted by RSA
isBroken bool // Indicates the chunk did not download correctly. This is only used for -persist (allowFailures) mode
}
// Magic word to identify a duplicacy format encrypted file, plus a version number.
var ENCRYPTION_HEADER = "duplicacy\000"
var ENCRYPTION_BANNER = "duplicacy\000"
// RSA encrypted chunks start with "duplicacy\002"
var ENCRYPTION_VERSION_RSA byte = 2
var ERASURE_CODING_BANNER = "duplicacy\003"
// CreateChunk creates a new chunk.
func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
@@ -121,7 +127,8 @@ func (chunk *Chunk) Reset(hashNeeded bool) {
chunk.hash = nil
chunk.id = ""
chunk.size = 0
chunk.isSnapshot = false
chunk.isMetadata = false
chunk.isBroken = false
}
// Write implements the Writer interface.
@@ -179,7 +186,7 @@ func (chunk *Chunk) VerifyID() {
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapshot bool) (err error) {
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isMetadata bool) (err error) {
var aesBlock cipher.Block
var gcm cipher.AEAD
@@ -196,8 +203,8 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
key := encryptionKey
usingRSA := false
// Enable RSA encryption only when the chunk is not a snapshot chunk
if chunk.config.rsaPublicKey != nil && !isSnapshot && !chunk.isSnapshot {
// Enable RSA encryption only when the chunk is not a metadata chunk
if chunk.config.rsaPublicKey != nil && !isMetadata && !chunk.isMetadata {
randomKey := make([]byte, 32)
_, err := rand.Read(randomKey)
if err != nil {
@@ -224,7 +231,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
// Start with the magic number and the version number.
if usingRSA {
// RSA encryption starts "duplicacy\002"
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER)[:len(ENCRYPTION_HEADER) - 1])
encryptedBuffer.Write([]byte(ENCRYPTION_BANNER)[:len(ENCRYPTION_BANNER) - 1])
encryptedBuffer.Write([]byte{ENCRYPTION_VERSION_RSA})
// Then the encrypted key
@@ -235,7 +242,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
binary.Write(encryptedBuffer, binary.LittleEndian, uint16(len(encryptedKey)))
encryptedBuffer.Write(encryptedKey)
} else {
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER))
encryptedBuffer.Write([]byte(ENCRYPTION_BANNER))
}
// Followed by the nonce
@@ -248,7 +255,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
offset = encryptedBuffer.Len()
}
// offset is either 0 or the length of header + nonce
// offset is either 0 or the length of banner + nonce
if chunk.config.CompressionLevel >= -1 && chunk.config.CompressionLevel <= 9 {
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
@@ -273,26 +280,79 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
return fmt.Errorf("Invalid compression level: %d", chunk.config.CompressionLevel)
}
if len(encryptionKey) == 0 {
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
return nil
if len(encryptionKey) > 0 {
// PKCS7 is used. The sizes of compressed chunks leak information about the original chunks so we want the padding sizes
// to be the maximum allowed by PKCS7
dataLength := encryptedBuffer.Len() - offset
paddingLength := 256 - dataLength%256
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
// The encrypted data will be appended to the duplicacy banner and the once.
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
encryptedBuffer.Bytes()[offset:offset+dataLength+paddingLength], nil)
encryptedBuffer.Truncate(len(encryptedBytes))
}
// PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes
// to be the maximum allowed by PKCS7
dataLength := encryptedBuffer.Len() - offset
paddingLength := 256 - dataLength%256
if chunk.config.DataShards == 0 || chunk.config.ParityShards == 0 {
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
return
}
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
// Start erasure coding
encoder, err := reedsolomon.New(chunk.config.DataShards, chunk.config.ParityShards)
if err != nil {
return err
}
chunkSize := len(encryptedBuffer.Bytes())
shardSize := (chunkSize + chunk.config.DataShards - 1) / chunk.config.DataShards
// Append zeros to make the last shard to have the same size as other
encryptedBuffer.Write(make([]byte, shardSize * chunk.config.DataShards - chunkSize))
// Grow the buffer for parity shards
encryptedBuffer.Grow(shardSize * chunk.config.ParityShards)
// Now create one slice for each shard, reusing the data in the buffer
data := make([][]byte, chunk.config.DataShards + chunk.config.ParityShards)
for i := 0; i < chunk.config.DataShards + chunk.config.ParityShards; i++ {
data[i] = encryptedBuffer.Bytes()[i * shardSize: (i + 1) * shardSize]
}
// This populates the parity shard
encoder.Encode(data)
// The encrypted data will be appended to the duplicacy header and the once.
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
encryptedBuffer.Bytes()[offset:offset+dataLength+paddingLength], nil)
// Prepare the chunk to be uploaded
chunk.buffer.Reset()
// First the banner
chunk.buffer.Write([]byte(ERASURE_CODING_BANNER))
// Then the header which includes the chunk size, data/parity and a 2-byte checksum
header := make([]byte, 14)
binary.LittleEndian.PutUint64(header[0:], uint64(chunkSize))
binary.LittleEndian.PutUint16(header[8:], uint16(chunk.config.DataShards))
binary.LittleEndian.PutUint16(header[10:], uint16(chunk.config.ParityShards))
header[12] = header[0] ^ header[2] ^ header[4] ^ header[6] ^ header[8] ^ header[10]
header[13] = header[1] ^ header[3] ^ header[5] ^ header[7] ^ header[9] ^ header[11]
chunk.buffer.Write(header)
// Calculate the highway hash for each shard
hashKey := make([]byte, 32)
for _, part := range data {
hasher, err := highwayhash.New(hashKey)
if err != nil {
return err
}
_, err = hasher.Write(part)
if err != nil {
return err
}
chunk.buffer.Write(hasher.Sum(nil))
}
encryptedBuffer.Truncate(len(encryptedBytes))
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
// Copy the data
for _, part := range data {
chunk.buffer.Write(part)
}
// Append the header again for redundancy
chunk.buffer.Write(header)
return nil
@@ -322,7 +382,122 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
}()
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
headerLength := len(ENCRYPTION_HEADER)
bannerLength := len(ENCRYPTION_BANNER)
if len(encryptedBuffer.Bytes()) > bannerLength && string(encryptedBuffer.Bytes()[:bannerLength]) == ERASURE_CODING_BANNER {
// The chunk was encoded with erasure coding
if len(encryptedBuffer.Bytes()) < bannerLength + 14 {
return fmt.Errorf("Erasure coding header truncated (%d bytes)", len(encryptedBuffer.Bytes()))
}
// Check the header checksum
header := encryptedBuffer.Bytes()[bannerLength: bannerLength + 14]
if header[12] != header[0] ^ header[2] ^ header[4] ^ header[6] ^ header[8] ^ header[10] ||
header[13] != header[1] ^ header[3] ^ header[5] ^ header[7] ^ header[9] ^ header[11] {
return fmt.Errorf("Erasure coding header corrupted (%x)", header)
}
// Read the parameters
chunkSize := int(binary.LittleEndian.Uint64(header[0:8]))
dataShards := int(binary.LittleEndian.Uint16(header[8:10]))
parityShards := int(binary.LittleEndian.Uint16(header[10:12]))
shardSize := (chunkSize + chunk.config.DataShards - 1) / chunk.config.DataShards
// This is the length the chunk file should have
expectedLength := bannerLength + 2 * len(header) + (dataShards + parityShards) * (shardSize + 32)
// The minimum length that can be recovered from
minimumLength := bannerLength + len(header) + (dataShards + parityShards) * 32 + dataShards * shardSize
LOG_DEBUG("CHUNK_ERASURECODE", "Chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards)
if len(encryptedBuffer.Bytes()) > expectedLength {
LOG_WARN("CHUNK_ERASURECODE", "Chunk has %d bytes (instead of %d)", len(encryptedBuffer.Bytes()), expectedLength)
} else if len(encryptedBuffer.Bytes()) == expectedLength {
// Correct size; fall through
} else if len(encryptedBuffer.Bytes()) > minimumLength {
LOG_WARN("CHUNK_ERASURECODE", "Chunk is truncated (%d out of %d bytes)", len(encryptedBuffer.Bytes()), expectedLength)
} else {
return fmt.Errorf("Not enough chunk data for recovery; chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards)
}
// Where the hashes start
hashOffset := bannerLength + len(header)
// Where the data start
dataOffset := hashOffset + (dataShards + parityShards) * 32
data := make([][]byte, dataShards + parityShards)
recoveryNeeded := false
hashKey := make([]byte, 32)
availableShards := 0
for i := 0; i < dataShards + parityShards; i++ {
start := dataOffset + i * shardSize
if start + shardSize > len(encryptedBuffer.Bytes()) {
// the current shard is incomplete
break
}
// Now verify the hash
hasher, err := highwayhash.New(hashKey)
if err != nil {
return err
}
_, err = hasher.Write(encryptedBuffer.Bytes()[start: start + shardSize])
if err != nil {
return err
}
if bytes.Compare(hasher.Sum(nil), encryptedBuffer.Bytes()[hashOffset + i * 32: hashOffset + (i + 1) * 32]) != 0 {
if i < dataShards {
recoveryNeeded = true
}
} else {
// The shard is good
data[i] = encryptedBuffer.Bytes()[start: start + shardSize]
availableShards++
if availableShards >= dataShards {
// We have enough shards to recover; skip the remaining shards
break
}
}
}
if !recoveryNeeded {
// Remove the padding zeros from the last shard
encryptedBuffer.Truncate(dataOffset + chunkSize)
// Skip the header and hashes
encryptedBuffer.Read(encryptedBuffer.Bytes()[:dataOffset])
} else {
if availableShards < dataShards {
return fmt.Errorf("Not enough chunk data for recover; only %d out of %d shards are complete", availableShards, dataShards + parityShards)
}
// Show the validity of shards using a string of * and -
slots := ""
for _, part := range data {
if len(part) != 0 {
slots += "*"
} else {
slots += "-"
}
}
LOG_WARN("CHUNK_ERASURECODE", "Recovering a %d byte chunk from %d byte shards: %s", chunkSize, shardSize, slots)
encoder, err := reedsolomon.New(dataShards, parityShards)
if err != nil {
return err
}
err = encoder.Reconstruct(data)
if err != nil {
return err
}
LOG_DEBUG("CHUNK_ERASURECODE", "Chunk data successfully recovered")
buffer := AllocateChunkBuffer()
buffer.Reset()
for i := 0; i < dataShards; i++ {
buffer.Write(data[i])
}
buffer.Truncate(chunkSize)
ReleaseChunkBuffer(encryptedBuffer)
encryptedBuffer = buffer
}
}
if len(encryptionKey) > 0 {
@@ -340,15 +515,15 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
key = hasher.Sum(nil)
}
if len(encryptedBuffer.Bytes()) < headerLength + 12 {
if len(encryptedBuffer.Bytes()) < bannerLength + 12 {
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
}
if string(encryptedBuffer.Bytes()[:headerLength-1]) != ENCRYPTION_HEADER[:headerLength-1] {
if string(encryptedBuffer.Bytes()[:bannerLength-1]) != ENCRYPTION_BANNER[:bannerLength-1] {
return fmt.Errorf("The storage doesn't seem to be encrypted")
}
encryptionVersion := encryptedBuffer.Bytes()[headerLength-1]
encryptionVersion := encryptedBuffer.Bytes()[bannerLength-1]
if encryptionVersion != 0 && encryptionVersion != ENCRYPTION_VERSION_RSA {
return fmt.Errorf("Unsupported encryption version %d", encryptionVersion)
}
@@ -359,14 +534,14 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
return fmt.Errorf("An RSA private key is required to decrypt the chunk")
}
encryptedKeyLength := binary.LittleEndian.Uint16(encryptedBuffer.Bytes()[headerLength:headerLength+2])
encryptedKeyLength := binary.LittleEndian.Uint16(encryptedBuffer.Bytes()[bannerLength:bannerLength+2])
if len(encryptedBuffer.Bytes()) < headerLength + 14 + int(encryptedKeyLength) {
if len(encryptedBuffer.Bytes()) < bannerLength + 14 + int(encryptedKeyLength) {
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
}
encryptedKey := encryptedBuffer.Bytes()[headerLength + 2:headerLength + 2 + int(encryptedKeyLength)]
headerLength += 2 + int(encryptedKeyLength)
encryptedKey := encryptedBuffer.Bytes()[bannerLength + 2:bannerLength + 2 + int(encryptedKeyLength)]
bannerLength += 2 + int(encryptedKeyLength)
decryptedKey, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPrivateKey, encryptedKey, nil)
if err != nil {
@@ -385,8 +560,8 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
return err
}
offset = headerLength + gcm.NonceSize()
nonce := encryptedBuffer.Bytes()[headerLength:offset]
offset = bannerLength + gcm.NonceSize()
nonce := encryptedBuffer.Bytes()[bannerLength:offset]
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
encryptedBuffer.Bytes()[offset:], nil)

View File

@@ -12,7 +12,46 @@ import (
"testing"
)
func TestChunk(t *testing.T) {
func TestErasureCoding(t *testing.T) {
key := []byte("duplicacydefault")
config := CreateConfig()
config.HashKey = key
config.IDKey = key
config.MinimumChunkSize = 100
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
config.DataShards = 5
config.ParityShards = 2
chunk := CreateChunk(config, true)
chunk.Reset(true)
data := make([]byte, 100)
for i := 0; i < len(data); i++ {
data[i] = byte(i)
}
chunk.Write(data)
err := chunk.Encrypt([]byte(""), "", false)
if err != nil {
t.Errorf("Failed to encrypt the test data: %v", err)
return
}
encryptedData := make([]byte, chunk.GetLength())
copy(encryptedData, chunk.GetBytes())
crypto_rand.Read(encryptedData[280:300])
chunk.Reset(false)
chunk.Write(encryptedData)
err = chunk.Decrypt([]byte(""), "")
if err != nil {
t.Errorf("Failed to decrypt the data: %v", err)
return
}
return
}
func TestChunkBasic(t *testing.T) {
key := []byte("duplicacydefault")
@@ -23,7 +62,7 @@ func TestChunk(t *testing.T) {
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
maxSize := 1000000
if testRSAEncryption {
if *testRSAEncryption {
privateKey, err := rsa.GenerateKey(crypto_rand.Reader, 2048)
if err != nil {
t.Errorf("Failed to generate a random private key: %v", err)
@@ -32,7 +71,10 @@ func TestChunk(t *testing.T) {
config.rsaPublicKey = privateKey.Public().(*rsa.PublicKey)
}
remainderLength := -1
if *testErasureCoding {
config.DataShards = 5
config.ParityShards = 2
}
for i := 0; i < 500; i++ {
@@ -56,10 +98,14 @@ func TestChunk(t *testing.T) {
encryptedData := make([]byte, chunk.GetLength())
copy(encryptedData, chunk.GetBytes())
if remainderLength == -1 {
remainderLength = len(encryptedData) % 256
} else if len(encryptedData)%256 != remainderLength {
t.Errorf("Incorrect padding size")
if *testErasureCoding {
offset := 24 + 32 * 7
start := rand.Int() % (len(encryptedData) - offset) + offset
length := (len(encryptedData) - offset) / 7
if start + length > len(encryptedData) {
length = len(encryptedData) - start
}
crypto_rand.Read(encryptedData[start: start+length])
}
chunk.Reset(false)

View File

@@ -5,7 +5,6 @@
package duplicacy
import (
"io"
"sync/atomic"
"time"
)
@@ -20,73 +19,47 @@ type ChunkDownloadTask struct {
isDownloading bool // 'true' means the chunk has been downloaded or is being downloaded
}
// ChunkDownloadCompletion represents the nofication when a chunk has been downloaded.
type ChunkDownloadCompletion struct {
chunkIndex int // The index of this chunk in the chunk list
chunk *Chunk // The chunk that has been downloaded
chunk *Chunk
chunkIndex int
}
// ChunkDownloader is capable of performing multi-threaded downloading. Chunks to be downloaded are first organized
// ChunkDownloader is a wrapper of ChunkOperator and is only used by the restore procedure.capable of performing multi-threaded downloading. Chunks to be downloaded are first organized
// as a list of ChunkDownloadTasks, with only the chunkHash field initialized. When a chunk is needed, the
// corresponding ChunkDownloadTask is sent to the dowloading goroutine. Once a chunk is downloaded, it will be
// inserted in the completed task list.
type ChunkDownloader struct {
config *Config // Associated config
storage Storage // Download from this storage
snapshotCache *FileStorage // Used as cache if not nil; usually for downloading snapshot chunks
showStatistics bool // Show a stats log for each chunk if true
threads int // Number of threads
operator *ChunkOperator
totalChunkSize int64 // Total chunk size
downloadedChunkSize int64 // Downloaded chunk size
taskList []ChunkDownloadTask // The list of chunks to be downloaded
completedTasks map[int]bool // Store downloaded chunks
lastChunkIndex int // a monotonically increasing number indicating the last chunk to be downloaded
taskQueue chan ChunkDownloadTask // Downloading goroutines are waiting on this channel for input
stopChannel chan bool // Used to stop the dowloading goroutines
completionChannel chan ChunkDownloadCompletion // A downloading goroutine sends back the chunk via this channel after downloading
startTime int64 // The time it starts downloading
totalChunkSize int64 // Total chunk size
downloadedChunkSize int64 // Downloaded chunk size
numberOfDownloadedChunks int // The number of chunks that have been downloaded
numberOfDownloadingChunks int // The number of chunks still being downloaded
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
}
func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int) *ChunkDownloader {
func CreateChunkDownloader(operator *ChunkOperator) *ChunkDownloader {
downloader := &ChunkDownloader{
config: config,
storage: storage,
snapshotCache: snapshotCache,
showStatistics: showStatistics,
threads: threads,
operator: operator,
taskList: nil,
completedTasks: make(map[int]bool),
lastChunkIndex: 0,
taskQueue: make(chan ChunkDownloadTask, threads),
stopChannel: make(chan bool),
completionChannel: make(chan ChunkDownloadCompletion),
startTime: time.Now().Unix(),
}
// Start the downloading goroutines
for i := 0; i < downloader.threads; i++ {
go func(threadIndex int) {
defer CatchLogException()
for {
select {
case task := <-downloader.taskQueue:
downloader.Download(threadIndex, task)
case <-downloader.stopChannel:
return
}
}
}(i)
}
return downloader
}
@@ -122,26 +95,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files []*Entry)
maximumChunks = file.EndChunk - file.StartChunk
}
}
}
// AddChunk adds a single chunk the download list.
func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
task := ChunkDownloadTask{
chunkIndex: len(downloader.taskList),
chunkHash: chunkHash,
chunkLength: 0,
needed: true,
isDownloading: false,
}
downloader.taskList = append(downloader.taskList, task)
if downloader.numberOfActiveChunks < downloader.threads {
downloader.taskQueue <- task
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
downloader.taskList[len(downloader.taskList)-1].isDownloading = true
}
return len(downloader.taskList) - 1
downloader.operator.totalChunkSize = downloader.totalChunkSize
}
// Prefetch adds up to 'threads' chunks needed by a file to the download list
@@ -154,20 +108,22 @@ func (downloader *ChunkDownloader) Prefetch(file *Entry) {
task := &downloader.taskList[i]
if task.needed {
if !task.isDownloading {
if downloader.numberOfActiveChunks >= downloader.threads {
if downloader.numberOfActiveChunks >= downloader.operator.threads {
return
}
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching %s chunk %s", file.Path,
downloader.config.GetChunkIDFromHash(task.chunkHash))
downloader.taskQueue <- *task
downloader.operator.config.GetChunkIDFromHash(task.chunkHash))
downloader.operator.DownloadAsync(task.chunkHash, i, false, func (chunk *Chunk, chunkIndex int) {
downloader.completionChannel <- ChunkDownloadCompletion { chunk: chunk, chunkIndex: chunkIndex }
})
task.isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
}
} else {
LOG_DEBUG("DOWNLOAD_PREFETCH", "%s chunk %s is not needed", file.Path,
downloader.config.GetChunkIDFromHash(task.chunkHash))
downloader.operator.config.GetChunkIDFromHash(task.chunkHash))
}
}
}
@@ -181,7 +137,7 @@ func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
for i := range downloader.completedTasks {
if i < chunkIndex && downloader.taskList[i].chunk != nil {
downloader.config.PutChunk(downloader.taskList[i].chunk)
downloader.operator.config.PutChunk(downloader.taskList[i].chunk)
downloader.taskList[i].chunk = nil
delete(downloader.completedTasks, i)
downloader.numberOfActiveChunks--
@@ -217,8 +173,10 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
// If we haven't started download the specified chunk, download it now
if !downloader.taskList[chunkIndex].isDownloading {
LOG_DEBUG("DOWNLOAD_FETCH", "Fetching chunk %s",
downloader.config.GetChunkIDFromHash(downloader.taskList[chunkIndex].chunkHash))
downloader.taskQueue <- downloader.taskList[chunkIndex]
downloader.operator.config.GetChunkIDFromHash(downloader.taskList[chunkIndex].chunkHash))
downloader.operator.DownloadAsync(downloader.taskList[chunkIndex].chunkHash, chunkIndex, false, func (chunk *Chunk, chunkIndex int) {
downloader.completionChannel <- ChunkDownloadCompletion { chunk: chunk, chunkIndex: chunkIndex }
})
downloader.taskList[chunkIndex].isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
@@ -226,7 +184,7 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
// We also need to look ahead and prefetch other chunks as many as permitted by the number of threads
for i := chunkIndex + 1; i < len(downloader.taskList); i++ {
if downloader.numberOfActiveChunks >= downloader.threads {
if downloader.numberOfActiveChunks >= downloader.operator.threads {
break
}
task := &downloader.taskList[i]
@@ -235,8 +193,10 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
}
if !task.isDownloading {
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching chunk %s", downloader.config.GetChunkIDFromHash(task.chunkHash))
downloader.taskQueue <- *task
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching chunk %s", downloader.operator.config.GetChunkIDFromHash(task.chunkHash))
downloader.operator.DownloadAsync(task.chunkHash, task.chunkIndex, false, func (chunk *Chunk, chunkIndex int) {
downloader.completionChannel <- ChunkDownloadCompletion { chunk: chunk, chunkIndex: chunkIndex }
})
task.isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
@@ -273,7 +233,7 @@ func (downloader *ChunkDownloader) WaitForCompletion() {
// Wait for a completion event first
if downloader.numberOfActiveChunks > 0 {
completion := <-downloader.completionChannel
downloader.config.PutChunk(completion.chunk)
downloader.operator.config.PutChunk(completion.chunk)
downloader.numberOfActiveChunks--
downloader.numberOfDownloadedChunks++
downloader.numberOfDownloadingChunks--
@@ -286,204 +246,13 @@ func (downloader *ChunkDownloader) WaitForCompletion() {
downloader.lastChunkIndex++
continue
}
downloader.taskQueue <- *task
downloader.operator.DownloadAsync(task.chunkHash, task.chunkIndex, false, func (chunk *Chunk, chunkIndex int) {
downloader.completionChannel <- ChunkDownloadCompletion { chunk: chunk, chunkIndex: chunkIndex }
})
task.isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
downloader.lastChunkIndex++
}
}
}
// Stop terminates all downloading goroutines
func (downloader *ChunkDownloader) Stop() {
for downloader.numberOfDownloadingChunks > 0 {
completion := <-downloader.completionChannel
downloader.completedTasks[completion.chunkIndex] = true
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
downloader.numberOfDownloadedChunks++
downloader.numberOfDownloadingChunks--
}
for i := range downloader.completedTasks {
downloader.config.PutChunk(downloader.taskList[i].chunk)
downloader.taskList[i].chunk = nil
downloader.numberOfActiveChunks--
}
for i := 0; i < downloader.threads; i++ {
downloader.stopChannel <- true
}
}
// Download downloads a chunk from the storage.
func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadTask) bool {
cachedPath := ""
chunk := downloader.config.GetChunk()
chunkID := downloader.config.GetChunkIDFromHash(task.chunkHash)
if downloader.snapshotCache != nil && downloader.storage.IsCacheNeeded() {
var exist bool
var err error
// Reset the chunk with a hasher -- we're reading from the cache where chunk are not encrypted or compressed
chunk.Reset(true)
cachedPath, exist, _, err = downloader.snapshotCache.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
} else if exist {
err = downloader.snapshotCache.DownloadFile(0, cachedPath, chunk)
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to load the chunk %s from the snapshot cache: %v", chunkID, err)
} else {
actualChunkID := chunk.GetID()
if actualChunkID != chunkID {
LOG_WARN("DOWNLOAD_CACHE_CORRUPTED",
"The chunk %s load from the snapshot cache has a hash id of %s", chunkID, actualChunkID)
} else {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been loaded from the snapshot cache", chunkID)
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
return false
}
}
}
}
// Reset the chunk without a hasher -- the downloaded content will be encrypted and/or compressed and the hasher
// will be set up before the encryption
chunk.Reset(false)
const MaxDownloadAttempts = 3
for downloadAttempt := 0; ; downloadAttempt++ {
// Find the chunk by ID first.
chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return false
}
if !exist {
// No chunk is found. Have to find it in the fossil pool again.
fossilPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, true)
if err != nil {
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return false
}
if !exist {
retry := false
// Retry for Hubic or WebDAV as it may return 404 even when the chunk exists
if _, ok := downloader.storage.(*HubicStorage); ok {
retry = true
}
if _, ok := downloader.storage.(*WebDAVStorage); ok {
retry = true
}
if retry && downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to find the chunk %s; retrying", chunkID)
continue
}
// A chunk is not found. This is a serious error and hopefully it will never happen.
if err != nil {
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
} else {
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
}
return false
}
// We can't download the fossil directly. We have to turn it back into a regular chunk and try
// downloading again.
err = downloader.storage.MoveFile(threadIndex, fossilPath, chunkPath)
if err != nil {
LOG_FATAL("DOWNLOAD_CHUNK", "Failed to resurrect chunk %s: %v", chunkID, err)
return false
}
LOG_WARN("DOWNLOAD_RESURRECT", "Fossil %s has been resurrected", chunkID)
continue
}
err = downloader.storage.DownloadFile(threadIndex, chunkPath, chunk)
if err != nil {
_, isHubic := downloader.storage.(*HubicStorage)
// Retry on EOF or if it is a Hubic backend as it may return 404 even when the chunk exists
if (err == io.ErrUnexpectedEOF || isHubic) && downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to download the chunk %s: %v; retrying", chunkID, err)
chunk.Reset(false)
continue
} else {
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to download the chunk %s: %v", chunkID, err)
return false
}
}
err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash)
if err != nil {
if downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to decrypt the chunk %s: %v; retrying", chunkID, err)
chunk.Reset(false)
continue
} else {
LOG_ERROR("DOWNLOAD_DECRYPT", "Failed to decrypt the chunk %s: %v", chunkID, err)
return false
}
}
actualChunkID := chunk.GetID()
if actualChunkID != chunkID {
if downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "The chunk %s has a hash id of %s; retrying", chunkID, actualChunkID)
chunk.Reset(false)
continue
} else {
LOG_FATAL("DOWNLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
return false
}
}
break
}
if len(cachedPath) > 0 {
// Save a copy to the local snapshot cache
err := downloader.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes())
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to add the chunk %s to the snapshot cache: %v", chunkID, err)
}
}
downloadedChunkSize := atomic.AddInt64(&downloader.downloadedChunkSize, int64(chunk.GetLength()))
if (downloader.showStatistics || IsTracing()) && downloader.totalChunkSize > 0 {
now := time.Now().Unix()
if now <= downloader.startTime {
now = downloader.startTime + 1
}
speed := downloadedChunkSize / (now - downloader.startTime)
remainingTime := int64(0)
if speed > 0 {
remainingTime = (downloader.totalChunkSize-downloadedChunkSize)/speed + 1
}
percentage := float32(downloadedChunkSize * 1000 / downloader.totalChunkSize)
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
task.chunkIndex+1, chunk.GetLength(),
PrettySize(speed), PrettyTime(remainingTime), percentage/10)
} else {
LOG_DEBUG("CHUNK_DOWNLOAD", "Chunk %s has been downloaded", chunkID)
}
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
return true
}
}

View File

@@ -25,15 +25,20 @@ type ChunkMaker struct {
bufferSize int
bufferStart int
minimumReached bool
hashSum uint64
chunk *Chunk
config *Config
hashOnly bool
hashOnlyChunk *Chunk
}
// CreateChunkMaker creates a chunk maker. 'randomSeed' is used to generate the character-to-integer table needed by
// buzhash.
func CreateChunkMaker(config *Config, hashOnly bool) *ChunkMaker {
func CreateFileChunkMaker(config *Config, hashOnly bool) *ChunkMaker {
size := 1
for size*2 <= config.AverageChunkSize {
size *= 2
@@ -67,6 +72,33 @@ func CreateChunkMaker(config *Config, hashOnly bool) *ChunkMaker {
}
maker.buffer = make([]byte, 2*config.MinimumChunkSize)
maker.bufferStart = 0
maker.bufferSize = 0
maker.startNewChunk()
return maker
}
// CreateMetaDataChunkMaker creates a chunk maker that always uses the variable-sized chunking algorithm
func CreateMetaDataChunkMaker(config *Config, chunkSize int) *ChunkMaker {
size := 1
for size*2 <= chunkSize {
size *= 2
}
if size != chunkSize {
LOG_FATAL("CHUNK_SIZE", "Invalid metadata chunk size: %d is not a power of 2", chunkSize)
return nil
}
maker := CreateFileChunkMaker(config, false)
maker.hashMask = uint64(chunkSize - 1)
maker.maximumChunkSize = chunkSize * 4
maker.minimumChunkSize = chunkSize / 4
maker.bufferCapacity = 2 * maker.minimumChunkSize
maker.buffer = make([]byte, maker.bufferCapacity)
return maker
}
@@ -90,62 +122,50 @@ func (maker *ChunkMaker) buzhashUpdate(sum uint64, out byte, in byte, length int
return rotateLeftByOne(sum) ^ rotateLeft(maker.randomTable[out], uint(length)) ^ maker.randomTable[in]
}
// ForEachChunk reads data from 'reader'. If EOF is encountered, it will call 'nextReader' to ask for next file. If
// 'nextReader' returns false, it will process remaining data in the buffer and then quit. When a chunk is identified,
// it will call 'endOfChunk' to return the chunk size and a boolean flag indicating if it is the last chunk.
func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *Chunk, final bool),
nextReader func(size int64, hash string) (io.Reader, bool)) {
func (maker *ChunkMaker) startNewChunk() (chunk *Chunk) {
maker.hashSum = 0
maker.minimumReached = false
if maker.hashOnly {
maker.chunk = maker.hashOnlyChunk
maker.chunk.Reset(true)
} else {
maker.chunk = maker.config.GetChunk()
maker.chunk.Reset(true)
}
return
}
maker.bufferStart = 0
maker.bufferSize = 0
var minimumReached bool
var hashSum uint64
var chunk *Chunk
func (maker *ChunkMaker) AddData(reader io.Reader, sendChunk func(*Chunk)) (int64, string) {
isEOF := false
fileSize := int64(0)
fileHasher := maker.config.NewFileHasher()
// Start a new chunk.
startNewChunk := func() {
hashSum = 0
minimumReached = false
if maker.hashOnly {
chunk = maker.hashOnlyChunk
chunk.Reset(true)
} else {
chunk = maker.config.GetChunk()
chunk.Reset(true)
}
}
// Move data from the buffer to the chunk.
fill := func(count int) {
if maker.bufferStart+count < maker.bufferCapacity {
chunk.Write(maker.buffer[maker.bufferStart : maker.bufferStart+count])
maker.chunk.Write(maker.buffer[maker.bufferStart : maker.bufferStart+count])
maker.bufferStart += count
maker.bufferSize -= count
} else {
chunk.Write(maker.buffer[maker.bufferStart:])
chunk.Write(maker.buffer[:count-(maker.bufferCapacity-maker.bufferStart)])
maker.chunk.Write(maker.buffer[maker.bufferStart:])
maker.chunk.Write(maker.buffer[:count-(maker.bufferCapacity-maker.bufferStart)])
maker.bufferStart = count - (maker.bufferCapacity - maker.bufferStart)
maker.bufferSize -= count
}
}
startNewChunk()
var err error
isEOF := false
if maker.minimumChunkSize == maker.maximumChunkSize {
if maker.bufferCapacity < maker.minimumChunkSize {
maker.buffer = make([]byte, maker.minimumChunkSize)
if reader == nil {
return 0, ""
}
for {
maker.startNewChunk()
maker.bufferStart = 0
for maker.bufferStart < maker.minimumChunkSize && !isEOF {
count, err := reader.Read(maker.buffer[maker.bufferStart:maker.minimumChunkSize])
@@ -153,7 +173,7 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
if err != nil {
if err != io.EOF {
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
return
return 0, ""
} else {
isEOF = true
}
@@ -161,26 +181,15 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
maker.bufferStart += count
}
fileHasher.Write(maker.buffer[:maker.bufferStart])
fileSize += int64(maker.bufferStart)
chunk.Write(maker.buffer[:maker.bufferStart])
if maker.bufferStart > 0 {
fileHasher.Write(maker.buffer[:maker.bufferStart])
fileSize += int64(maker.bufferStart)
maker.chunk.Write(maker.buffer[:maker.bufferStart])
sendChunk(maker.chunk)
}
if isEOF {
var ok bool
reader, ok = nextReader(fileSize, hex.EncodeToString(fileHasher.Sum(nil)))
if !ok {
endOfChunk(chunk, true)
return
} else {
endOfChunk(chunk, false)
startNewChunk()
fileSize = 0
fileHasher = maker.config.NewFileHasher()
isEOF = false
}
} else {
endOfChunk(chunk, false)
startNewChunk()
return fileSize, hex.EncodeToString(fileHasher.Sum(nil))
}
}
@@ -189,7 +198,7 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
for {
// If the buffer still has some space left and EOF is not seen, read more data.
for maker.bufferSize < maker.bufferCapacity && !isEOF {
for maker.bufferSize < maker.bufferCapacity && !isEOF && reader != nil {
start := maker.bufferStart + maker.bufferSize
count := maker.bufferCapacity - start
if start >= maker.bufferCapacity {
@@ -201,7 +210,7 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
if err != nil && err != io.EOF {
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
return
return 0, ""
}
maker.bufferSize += count
@@ -210,54 +219,55 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
// if EOF is seen, try to switch to next file and continue
if err == io.EOF {
var ok bool
reader, ok = nextReader(fileSize, hex.EncodeToString(fileHasher.Sum(nil)))
if !ok {
isEOF = true
} else {
fileSize = 0
fileHasher = maker.config.NewFileHasher()
isEOF = false
}
isEOF = true
break
}
}
// No eough data to meet the minimum chunk size requirement, so just return as a chunk.
if maker.bufferSize < maker.minimumChunkSize {
fill(maker.bufferSize)
endOfChunk(chunk, true)
return
if reader == nil {
fill(maker.bufferSize)
if maker.chunk.GetLength() > 0 {
sendChunk(maker.chunk)
}
return 0, ""
} else if isEOF {
return fileSize, hex.EncodeToString(fileHasher.Sum(nil))
} else {
continue
}
}
// Minimum chunk size has been reached. Calculate the buzhash for the minimum size chunk.
if !minimumReached {
if !maker.minimumReached {
bytes := maker.minimumChunkSize
if maker.bufferStart+bytes < maker.bufferCapacity {
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:maker.bufferStart+bytes])
maker.hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:maker.bufferStart+bytes])
} else {
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:])
hashSum = maker.buzhashSum(hashSum,
maker.hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:])
maker.hashSum = maker.buzhashSum(maker.hashSum,
maker.buffer[:bytes-(maker.bufferCapacity-maker.bufferStart)])
}
if (hashSum & maker.hashMask) == 0 {
if (maker.hashSum & maker.hashMask) == 0 {
// This is a minimum size chunk
fill(bytes)
endOfChunk(chunk, false)
startNewChunk()
sendChunk(maker.chunk)
maker.startNewChunk()
continue
}
minimumReached = true
maker.minimumReached = true
}
// Now check the buzhash of the data in the buffer, shifting one byte at a time.
bytes := maker.bufferSize - maker.minimumChunkSize
isEOC := false
maxSize := maker.maximumChunkSize - chunk.GetLength()
for i := 0; i < maker.bufferSize-maker.minimumChunkSize; i++ {
isEOC := false // chunk boundary found
maxSize := maker.maximumChunkSize - maker.chunk.GetLength()
for i := 0; i < bytes; i++ {
out := maker.bufferStart + i
if out >= maker.bufferCapacity {
out -= maker.bufferCapacity
@@ -267,8 +277,8 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
in -= maker.bufferCapacity
}
hashSum = maker.buzhashUpdate(hashSum, maker.buffer[out], maker.buffer[in], maker.minimumChunkSize)
if (hashSum&maker.hashMask) == 0 || i == maxSize-maker.minimumChunkSize-1 {
maker.hashSum = maker.buzhashUpdate(maker.hashSum, maker.buffer[out], maker.buffer[in], maker.minimumChunkSize)
if (maker.hashSum&maker.hashMask) == 0 || i == maxSize-maker.minimumChunkSize-1 {
// A chunk is completed.
bytes = i + 1 + maker.minimumChunkSize
isEOC = true
@@ -277,21 +287,20 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
}
fill(bytes)
if isEOC {
if isEOF && maker.bufferSize == 0 {
endOfChunk(chunk, true)
return
sendChunk(maker.chunk)
maker.startNewChunk()
} else {
if reader == nil {
fill(maker.minimumChunkSize)
sendChunk(maker.chunk)
maker.startNewChunk()
return 0, ""
}
endOfChunk(chunk, false)
startNewChunk()
continue
}
if isEOF {
fill(maker.bufferSize)
endOfChunk(chunk, true)
return
return fileSize, hex.EncodeToString(fileHasher.Sum(nil))
}
}
}

View File

@@ -7,14 +7,12 @@ package duplicacy
import (
"bytes"
crypto_rand "crypto/rand"
"io"
"math/rand"
"sort"
"testing"
)
func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunkSize,
bufferCapacity int) ([]string, int) {
func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunkSize int) ([]string, int) {
config := CreateConfig()
@@ -27,14 +25,12 @@ func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunk
config.HashKey = DEFAULT_KEY
config.IDKey = DEFAULT_KEY
maker := CreateChunkMaker(config, false)
maker := CreateFileChunkMaker(config, false)
var chunks []string
totalChunkSize := 0
totalFileSize := int64(0)
//LOG_INFO("CHUNK_SPLIT", "bufferCapacity: %d", bufferCapacity)
buffers := make([]*bytes.Buffer, n)
sizes := make([]int, n)
sizes[0] = 0
@@ -42,7 +38,7 @@ func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunk
same := true
for same {
same = false
sizes[i] = rand.Int() % n
sizes[i] = rand.Int() % len(content)
for j := 0; j < i; j++ {
if sizes[i] == sizes[j] {
same = true
@@ -59,22 +55,17 @@ func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunk
}
buffers[n-1] = bytes.NewBuffer(content[sizes[n-1]:])
i := 0
chunkFunc := func(chunk *Chunk) {
chunks = append(chunks, chunk.GetHash())
totalChunkSize += chunk.GetLength()
config.PutChunk(chunk)
}
maker.ForEachChunk(buffers[0],
func(chunk *Chunk, final bool) {
//LOG_INFO("CHUNK_SPLIT", "i: %d, chunk: %s, size: %d", i, chunk.GetHash(), size)
chunks = append(chunks, chunk.GetHash())
totalChunkSize += chunk.GetLength()
},
func(size int64, hash string) (io.Reader, bool) {
totalFileSize += size
i++
if i >= len(buffers) {
return nil, false
}
return buffers[i], true
})
for _, buffer := range buffers {
fileSize, _ := maker.AddData(buffer, chunkFunc)
totalFileSize += fileSize
}
maker.AddData(nil, chunkFunc)
if totalFileSize != int64(totalChunkSize) {
LOG_ERROR("CHUNK_SPLIT", "total chunk size: %d, total file size: %d", totalChunkSize, totalFileSize)
@@ -96,35 +87,28 @@ func TestChunkMaker(t *testing.T) {
continue
}
chunkArray1, totalSize1 := splitIntoChunks(content, 10, 32, 64, 16, 32)
chunkArray1, totalSize1 := splitIntoChunks(content, 10, 32, 64, 16)
capacities := [...]int{32, 33, 34, 61, 62, 63, 64, 65, 66, 126, 127, 128, 129, 130,
255, 256, 257, 511, 512, 513, 1023, 1024, 1025,
32, 48, 64, 128, 256, 512, 1024, 2048}
//capacities := [...]int { 32 }
for _, n := range [...]int{6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} {
chunkArray2, totalSize2 := splitIntoChunks(content, n, 32, 64, 16)
for _, capacity := range capacities {
if totalSize1 != totalSize2 {
t.Errorf("[size %d] total size is %d instead of %d",
size, totalSize2, totalSize1)
}
for _, n := range [...]int{6, 7, 8, 9, 10} {
chunkArray2, totalSize2 := splitIntoChunks(content, n, 32, 64, 16, capacity)
if totalSize1 != totalSize2 {
t.Errorf("[size %d, capacity %d] total size is %d instead of %d",
size, capacity, totalSize2, totalSize1)
}
if len(chunkArray1) != len(chunkArray2) {
t.Errorf("[size %d, capacity %d] number of chunks is %d instead of %d",
size, capacity, len(chunkArray2), len(chunkArray1))
} else {
for i := 0; i < len(chunkArray1); i++ {
if chunkArray1[i] != chunkArray2[i] {
t.Errorf("[size %d, capacity %d, chunk %d] chunk is different", size, capacity, i)
}
if len(chunkArray1) != len(chunkArray2) {
t.Errorf("[size %d] number of chunks is %d instead of %d",
size, len(chunkArray2), len(chunkArray1))
} else {
for i := 0; i < len(chunkArray1); i++ {
if chunkArray1[i] != chunkArray2[i] {
t.Errorf("[size %d, chunk %d] chunk is different", size, i)
}
}
}
}
}

View File

@@ -5,6 +5,7 @@
package duplicacy
import (
"io"
"sync"
"sync/atomic"
"time"
@@ -12,42 +13,69 @@ import (
// These are operations that ChunkOperator will perform.
const (
ChunkOperationFind = 0
ChunkOperationDelete = 1
ChunkOperationFossilize = 2
ChunkOperationResurrect = 3
ChunkOperationDownload = 0
ChunkOperationUpload = 1
ChunkOperationDelete = 2
ChunkOperationFossilize = 3
ChunkOperationResurrect = 4
ChunkOperationFind = 5
)
// ChunkOperatorTask is used to pass parameters for different kinds of chunk operations.
type ChunkOperatorTask struct {
operation int // The type of operation
chunkID string // The chunk id
filePath string // The path of the chunk file; it may be empty
// ChunkTask is used to pass parameters for different kinds of chunk operations.
type ChunkTask struct {
operation int // The type of operation
chunkID string // The chunk id
chunkHash string // The chunk hash
chunkIndex int // The chunk index
filePath string // The path of the chunk file; it may be empty
isMetadata bool
chunk *Chunk
completionFunc func(chunk *Chunk, chunkIndex int)
}
// ChunkOperator is capable of performing multi-threaded operations on chunks.
type ChunkOperator struct {
numberOfActiveTasks int64 // The number of chunks that are being operated on
storage Storage // This storage
threads int // Number of threads
taskQueue chan ChunkOperatorTask // Operating goroutines are waiting on this channel for input
stopChannel chan bool // Used to stop all the goroutines
config *Config // Associated config
storage Storage // This storage
snapshotCache *FileStorage
showStatistics bool
threads int // Number of threads
taskQueue chan ChunkTask // Operating goroutines are waiting on this channel for input
stopChannel chan bool // Used to stop all the goroutines
fossils []string // For fossilize operation, the paths of the fossils are stored in this slice
fossilsLock *sync.Mutex // The lock for 'fossils'
numberOfActiveTasks int64 // The number of chunks that are being operated on
fossils []string // For fossilize operation, the paths of the fossils are stored in this slice
collectionLock *sync.Mutex // The lock for accessing 'fossils'
startTime int64 // The time it starts downloading
totalChunkSize int64 // Total chunk size
downloadedChunkSize int64 // Downloaded chunk size
allowFailures bool // Whether to fail on download error, or continue
NumberOfFailedChunks int64 // The number of chunks that can't be downloaded
UploadCompletionFunc func(chunk *Chunk, chunkIndex int, inCache bool, chunkSize int, uploadSize int)
}
// CreateChunkOperator creates a new ChunkOperator.
func CreateChunkOperator(storage Storage, threads int) *ChunkOperator {
func CreateChunkOperator(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int, allowFailures bool) *ChunkOperator {
operator := &ChunkOperator{
config: config,
storage: storage,
snapshotCache: snapshotCache,
showStatistics: showStatistics,
threads: threads,
taskQueue: make(chan ChunkOperatorTask, threads*4),
taskQueue: make(chan ChunkTask, threads),
stopChannel: make(chan bool),
fossils: make([]string, 0),
fossilsLock: &sync.Mutex{},
collectionLock: &sync.Mutex{},
startTime: time.Now().Unix(),
allowFailures: allowFailures,
}
// Start the operator goroutines
@@ -84,38 +112,78 @@ func (operator *ChunkOperator) Stop() {
atomic.AddInt64(&operator.numberOfActiveTasks, int64(-1))
}
func (operator *ChunkOperator) AddTask(operation int, chunkID string, filePath string) {
func (operator *ChunkOperator) WaitForCompletion() {
task := ChunkOperatorTask{
operation: operation,
chunkID: chunkID,
filePath: filePath,
for atomic.LoadInt64(&operator.numberOfActiveTasks) > 0 {
time.Sleep(100 * time.Millisecond)
}
operator.taskQueue <- task
atomic.AddInt64(&operator.numberOfActiveTasks, int64(1))
}
func (operator *ChunkOperator) Find(chunkID string) {
operator.AddTask(ChunkOperationFind, chunkID, "")
func (operator *ChunkOperator) AddTask(operation int, chunkID string, chunkHash string, filePath string, chunkIndex int, chunk *Chunk, isMetadata bool, completionFunc func(*Chunk, int)) {
task := ChunkTask {
operation: operation,
chunkID: chunkID,
chunkHash: chunkHash,
chunkIndex: chunkIndex,
filePath: filePath,
chunk: chunk,
isMetadata: isMetadata,
completionFunc: completionFunc,
}
operator.taskQueue <- task
atomic.AddInt64(&operator.numberOfActiveTasks, int64(1))
return
}
func (operator *ChunkOperator) Download(chunkHash string, chunkIndex int, isMetadata bool) *Chunk {
chunkID := operator.config.GetChunkIDFromHash(chunkHash)
completionChannel := make(chan *Chunk)
completionFunc := func(chunk *Chunk, chunkIndex int) {
completionChannel <- chunk
}
operator.AddTask(ChunkOperationDownload, chunkID, chunkHash, "", chunkIndex, nil, isMetadata, completionFunc)
return <- completionChannel
}
func (operator *ChunkOperator) DownloadAsync(chunkHash string, chunkIndex int, isMetadata bool, completionFunc func(*Chunk, int)) {
chunkID := operator.config.GetChunkIDFromHash(chunkHash)
operator.AddTask(ChunkOperationDownload, chunkID, chunkHash, "", chunkIndex, nil, isMetadata, completionFunc)
}
func (operator *ChunkOperator) Upload(chunk *Chunk, chunkIndex int, isMetadata bool) {
chunkHash := chunk.GetHash()
chunkID := operator.config.GetChunkIDFromHash(chunkHash)
operator.AddTask(ChunkOperationUpload, chunkID, chunkHash, "", chunkIndex, chunk, isMetadata, nil)
}
func (operator *ChunkOperator) Delete(chunkID string, filePath string) {
operator.AddTask(ChunkOperationDelete, chunkID, filePath)
operator.AddTask(ChunkOperationDelete, chunkID, "", filePath, 0, nil, false, nil)
}
func (operator *ChunkOperator) Fossilize(chunkID string, filePath string) {
operator.AddTask(ChunkOperationFossilize, chunkID, filePath)
operator.AddTask(ChunkOperationFossilize, chunkID, "", filePath, 0, nil, false, nil)
}
func (operator *ChunkOperator) Resurrect(chunkID string, filePath string) {
operator.AddTask(ChunkOperationResurrect, chunkID, filePath)
operator.AddTask(ChunkOperationResurrect, chunkID, "", filePath, 0, nil, false, nil)
}
func (operator *ChunkOperator) Run(threadIndex int, task ChunkOperatorTask) {
func (operator *ChunkOperator) Run(threadIndex int, task ChunkTask) {
defer func() {
atomic.AddInt64(&operator.numberOfActiveTasks, int64(-1))
}()
if task.operation == ChunkOperationDownload {
operator.DownloadChunk(threadIndex, task)
return
} else if task.operation == ChunkOperationUpload {
operator.UploadChunk(threadIndex, task)
return
}
// task.filePath may be empty. If so, find the chunk first.
if task.operation == ChunkOperationDelete || task.operation == ChunkOperationFossilize {
if task.filePath == "" {
@@ -132,9 +200,9 @@ func (operator *ChunkOperator) Run(threadIndex int, task ChunkOperatorTask) {
fossilPath, exist, _, _ := operator.storage.FindChunk(threadIndex, task.chunkID, true)
if exist {
LOG_WARN("CHUNK_FOSSILIZE", "Chunk %s is already a fossil", task.chunkID)
operator.fossilsLock.Lock()
operator.collectionLock.Lock()
operator.fossils = append(operator.fossils, fossilPath)
operator.fossilsLock.Unlock()
operator.collectionLock.Unlock()
} else {
LOG_ERROR("CHUNK_FIND", "Chunk %s does not exist in the storage", task.chunkID)
}
@@ -175,17 +243,17 @@ func (operator *ChunkOperator) Run(threadIndex int, task ChunkOperatorTask) {
if err == nil {
LOG_TRACE("CHUNK_DELETE", "Deleted chunk file %s as the fossil already exists", task.chunkID)
}
operator.fossilsLock.Lock()
operator.collectionLock.Lock()
operator.fossils = append(operator.fossils, fossilPath)
operator.fossilsLock.Unlock()
operator.collectionLock.Unlock()
} else {
LOG_ERROR("CHUNK_DELETE", "Failed to fossilize the chunk %s: %v", task.chunkID, err)
}
} else {
LOG_TRACE("CHUNK_FOSSILIZE", "The chunk %s has been marked as a fossil", task.chunkID)
operator.fossilsLock.Lock()
operator.collectionLock.Lock()
operator.fossils = append(operator.fossils, fossilPath)
operator.fossilsLock.Unlock()
operator.collectionLock.Unlock()
}
} else if task.operation == ChunkOperationResurrect {
chunkPath, exist, _, err := operator.storage.FindChunk(threadIndex, task.chunkID, false)
@@ -207,3 +275,267 @@ func (operator *ChunkOperator) Run(threadIndex int, task ChunkOperatorTask) {
}
}
}
// Download downloads a chunk from the storage.
func (operator *ChunkOperator) DownloadChunk(threadIndex int, task ChunkTask) {
cachedPath := ""
chunk := operator.config.GetChunk()
chunk.isMetadata = task.isMetadata
chunkID := task.chunkID
defer func() {
if chunk != nil {
operator.config.PutChunk(chunk)
}
} ()
if task.isMetadata && operator.snapshotCache != nil {
var exist bool
var err error
// Reset the chunk with a hasher -- we're reading from the cache where chunk are not encrypted or compressed
chunk.Reset(true)
cachedPath, exist, _, err = operator.snapshotCache.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
} else if exist {
err = operator.snapshotCache.DownloadFile(0, cachedPath, chunk)
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to load the chunk %s from the snapshot cache: %v", chunkID, err)
} else {
actualChunkID := chunk.GetID()
if actualChunkID != chunkID {
LOG_WARN("DOWNLOAD_CACHE_CORRUPTED",
"The chunk %s load from the snapshot cache has a hash id of %s", chunkID, actualChunkID)
} else {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been loaded from the snapshot cache", chunkID)
task.completionFunc(chunk, task.chunkIndex)
chunk = nil
return
}
}
}
}
// Reset the chunk without a hasher -- the downloaded content will be encrypted and/or compressed and the hasher
// will be set up before the encryption
chunk.Reset(false)
chunk.isMetadata = task.isMetadata
// If failures are allowed, complete the task properly
completeFailedChunk := func() {
atomic.AddInt64(&operator.NumberOfFailedChunks, 1)
if operator.allowFailures {
task.completionFunc(chunk, task.chunkIndex)
}
}
const MaxDownloadAttempts = 3
for downloadAttempt := 0; ; downloadAttempt++ {
// Find the chunk by ID first.
chunkPath, exist, _, err := operator.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return
}
if !exist {
// No chunk is found. Have to find it in the fossil pool again.
fossilPath, exist, _, err := operator.storage.FindChunk(threadIndex, chunkID, true)
if err != nil {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return
}
if !exist {
retry := false
// Retry for Hubic or WebDAV as it may return 404 even when the chunk exists
if _, ok := operator.storage.(*HubicStorage); ok {
retry = true
}
if _, ok := operator.storage.(*WebDAVStorage); ok {
retry = true
}
if retry && downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to find the chunk %s; retrying", chunkID)
continue
}
// A chunk is not found. This is a serious error and hopefully it will never happen.
completeFailedChunk()
if err != nil {
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
} else {
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
}
return
}
// We can't download the fossil directly. We have to turn it back into a regular chunk and try
// downloading again.
err = operator.storage.MoveFile(threadIndex, fossilPath, chunkPath)
if err != nil {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Failed to resurrect chunk %s: %v", chunkID, err)
return
}
LOG_WARN("DOWNLOAD_RESURRECT", "Fossil %s has been resurrected", chunkID)
continue
}
err = operator.storage.DownloadFile(threadIndex, chunkPath, chunk)
if err != nil {
_, isHubic := operator.storage.(*HubicStorage)
// Retry on EOF or if it is a Hubic backend as it may return 404 even when the chunk exists
if (err == io.ErrUnexpectedEOF || isHubic) && downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to download the chunk %s: %v; retrying", chunkID, err)
chunk.Reset(false)
chunk.isMetadata = task.isMetadata
continue
} else {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Failed to download the chunk %s: %v", chunkID, err)
return
}
}
err = chunk.Decrypt(operator.config.ChunkKey, task.chunkHash)
if err != nil {
if downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to decrypt the chunk %s: %v; retrying", chunkID, err)
chunk.Reset(false)
chunk.isMetadata = task.isMetadata
continue
} else {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_DECRYPT", "Failed to decrypt the chunk %s: %v", chunkID, err)
return
}
}
actualChunkID := chunk.GetID()
if actualChunkID != chunkID {
if downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "The chunk %s has a hash id of %s; retrying", chunkID, actualChunkID)
chunk.Reset(false)
chunk.isMetadata = task.isMetadata
continue
} else {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
return
}
}
break
}
if chunk.isMetadata && len(cachedPath) > 0 {
// Save a copy to the local snapshot cache
err := operator.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes())
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to add the chunk %s to the snapshot cache: %v", chunkID, err)
}
}
downloadedChunkSize := atomic.AddInt64(&operator.downloadedChunkSize, int64(chunk.GetLength()))
if (operator.showStatistics || IsTracing()) && operator.totalChunkSize > 0 {
now := time.Now().Unix()
if now <= operator.startTime {
now = operator.startTime + 1
}
speed := downloadedChunkSize / (now - operator.startTime)
remainingTime := int64(0)
if speed > 0 {
remainingTime = (operator.totalChunkSize-downloadedChunkSize)/speed + 1
}
percentage := float32(downloadedChunkSize * 1000 / operator.totalChunkSize)
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
task.chunkIndex+1, chunk.GetLength(),
PrettySize(speed), PrettyTime(remainingTime), percentage/10)
} else {
LOG_DEBUG("CHUNK_DOWNLOAD", "Chunk %s has been downloaded", chunkID)
}
task.completionFunc(chunk, task.chunkIndex)
chunk = nil
return
}
// UploadChunk is called by the task goroutines to perform the actual uploading
func (operator *ChunkOperator) UploadChunk(threadIndex int, task ChunkTask) bool {
chunk := task.chunk
chunkID := task.chunkID
chunkSize := chunk.GetLength()
// For a snapshot chunk, verify that its chunk id is correct
if task.isMetadata {
chunk.VerifyID()
}
if task.isMetadata && operator.snapshotCache != nil && operator.storage.IsCacheNeeded() {
// Save a copy to the local snapshot.
chunkPath, exist, _, err := operator.snapshotCache.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_WARN("UPLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
} else if exist {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s already exists in the snapshot cache", chunkID)
} else if err = operator.snapshotCache.UploadFile(threadIndex, chunkPath, chunk.GetBytes()); err != nil {
LOG_WARN("UPLOAD_CACHE", "Failed to save the chunk %s to the snapshot cache: %v", chunkID, err)
} else {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been saved to the snapshot cache", chunkID)
}
}
// This returns the path the chunk file should be at.
chunkPath, exist, _, err := operator.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to find the path for the chunk %s: %v", chunkID, err)
return false
}
if exist {
// Chunk deduplication by name in effect here.
LOG_DEBUG("CHUNK_DUPLICATE", "Chunk %s already exists", chunkID)
operator.UploadCompletionFunc(chunk, task.chunkIndex, false, chunkSize, 0)
return false
}
// Encrypt the chunk only after we know that it must be uploaded.
err = chunk.Encrypt(operator.config.ChunkKey, chunk.GetHash(), task.isMetadata)
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to encrypt the chunk %s: %v", chunkID, err)
return false
}
if !operator.config.dryRun {
err = operator.storage.UploadFile(threadIndex, chunkPath, chunk.GetBytes())
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to upload the chunk %s: %v", chunkID, err)
return false
}
LOG_DEBUG("CHUNK_UPLOAD", "Chunk %s has been uploaded", chunkID)
} else {
LOG_DEBUG("CHUNK_UPLOAD", "Uploading was skipped for chunk %s", chunkID)
}
operator.UploadCompletionFunc(chunk, task.chunkIndex, false, chunkSize, chunk.GetLength())
return true
}

View File

@@ -15,11 +15,11 @@ import (
"math/rand"
)
func TestUploaderAndDownloader(t *testing.T) {
func TestChunkOperator(t *testing.T) {
rand.Seed(time.Now().UnixNano())
setTestingT(t)
SetLoggingLevel(INFO)
SetLoggingLevel(DEBUG)
defer func() {
if r := recover(); r != nil {
@@ -38,7 +38,7 @@ func TestUploaderAndDownloader(t *testing.T) {
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
t.Logf("storage: %s", testStorageName)
t.Logf("storage: %s", *testStorageName)
storage, err := loadStorage(testDir, 1)
if err != nil {
@@ -46,7 +46,7 @@ func TestUploaderAndDownloader(t *testing.T) {
return
}
storage.EnableTestMode()
storage.SetRateLimits(testRateLimit, testRateLimit)
storage.SetRateLimits(*testRateLimit, *testRateLimit)
for _, dir := range []string{"chunks", "snapshots"} {
err = storage.CreateDirectory(0, dir)
@@ -59,7 +59,7 @@ func TestUploaderAndDownloader(t *testing.T) {
numberOfChunks := 100
maxChunkSize := 64 * 1024
if testQuickMode {
if *testQuickMode {
numberOfChunks = 10
}
@@ -87,35 +87,25 @@ func TestUploaderAndDownloader(t *testing.T) {
totalFileSize += chunk.GetLength()
}
completionFunc := func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
chunkOperator := CreateChunkOperator(config, storage, nil, false, *testThreads, false)
chunkOperator.UploadCompletionFunc = func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
t.Logf("Chunk %s size %d (%d/%d) uploaded", chunk.GetID(), chunkSize, chunkIndex, len(chunks))
}
chunkUploader := CreateChunkUploader(config, storage, nil, testThreads, nil)
chunkUploader.completionFunc = completionFunc
chunkUploader.Start()
for i, chunk := range chunks {
chunkUploader.StartChunk(chunk, i)
chunkOperator.Upload(chunk, i, false)
}
chunkUploader.Stop()
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
chunkDownloader.totalChunkSize = int64(totalFileSize)
for _, chunk := range chunks {
chunkDownloader.AddChunk(chunk.GetHash())
}
chunkOperator.WaitForCompletion()
for i, chunk := range chunks {
downloaded := chunkDownloader.WaitForChunk(i)
downloaded := chunkOperator.Download(chunk.GetHash(), i, false)
if downloaded.GetID() != chunk.GetID() {
t.Errorf("Uploaded: %s, downloaded: %s", chunk.GetID(), downloaded.GetID())
}
}
chunkDownloader.Stop()
chunkOperator.Stop()
for _, file := range listChunks(storage) {
err = storage.DeleteFile(0, "chunks/"+file)

View File

@@ -1,151 +0,0 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"sync/atomic"
"time"
)
// ChunkUploadTask represents a chunk to be uploaded.
type ChunkUploadTask struct {
chunk *Chunk
chunkIndex int
}
// ChunkUploader uploads chunks to the storage using one or more uploading goroutines. Chunks are added
// by the call to StartChunk(), and then passed to the uploading goroutines. The completion function is
// called when the downloading is completed. Note that ChunkUploader does not release chunks to the
// chunk pool; instead
type ChunkUploader struct {
config *Config // Associated config
storage Storage // Download from this storage
snapshotCache *FileStorage // Used as cache if not nil; usually for uploading snapshot chunks
threads int // Number of uploading goroutines
taskQueue chan ChunkUploadTask // Uploading goroutines are listening on this channel for upload jobs
stopChannel chan bool // Used to terminate uploading goroutines
numberOfUploadingTasks int32 // The number of uploading tasks
// Uploading goroutines call this function after having downloaded chunks
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)
}
// CreateChunkUploader creates a chunk uploader.
func CreateChunkUploader(config *Config, storage Storage, snapshotCache *FileStorage, threads int,
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)) *ChunkUploader {
uploader := &ChunkUploader{
config: config,
storage: storage,
snapshotCache: snapshotCache,
threads: threads,
taskQueue: make(chan ChunkUploadTask, 1),
stopChannel: make(chan bool),
completionFunc: completionFunc,
}
return uploader
}
// Starts starts uploading goroutines.
func (uploader *ChunkUploader) Start() {
for i := 0; i < uploader.threads; i++ {
go func(threadIndex int) {
defer CatchLogException()
for {
select {
case task := <-uploader.taskQueue:
uploader.Upload(threadIndex, task)
case <-uploader.stopChannel:
return
}
}
}(i)
}
}
// StartChunk sends a chunk to be uploaded to a waiting uploading goroutine. It may block if all uploading goroutines are busy.
func (uploader *ChunkUploader) StartChunk(chunk *Chunk, chunkIndex int) {
atomic.AddInt32(&uploader.numberOfUploadingTasks, 1)
uploader.taskQueue <- ChunkUploadTask{
chunk: chunk,
chunkIndex: chunkIndex,
}
}
// Stop stops all uploading goroutines.
func (uploader *ChunkUploader) Stop() {
for atomic.LoadInt32(&uploader.numberOfUploadingTasks) > 0 {
time.Sleep(100 * time.Millisecond)
}
for i := 0; i < uploader.threads; i++ {
uploader.stopChannel <- false
}
}
// Upload is called by the uploading goroutines to perform the actual uploading
func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) bool {
chunk := task.chunk
chunkSize := chunk.GetLength()
chunkID := chunk.GetID()
// For a snapshot chunk, verify that its chunk id is correct
if uploader.snapshotCache != nil {
chunk.VerifyID()
}
if uploader.snapshotCache != nil && uploader.storage.IsCacheNeeded() {
// Save a copy to the local snapshot.
chunkPath, exist, _, err := uploader.snapshotCache.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_WARN("UPLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
} else if exist {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s already exists in the snapshot cache", chunkID)
} else if err = uploader.snapshotCache.UploadFile(threadIndex, chunkPath, chunk.GetBytes()); err != nil {
LOG_WARN("UPLOAD_CACHE", "Failed to save the chunk %s to the snapshot cache: %v", chunkID, err)
} else {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been saved to the snapshot cache", chunkID)
}
}
// This returns the path the chunk file should be at.
chunkPath, exist, _, err := uploader.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to find the path for the chunk %s: %v", chunkID, err)
return false
}
if exist {
// Chunk deduplication by name in effect here.
LOG_DEBUG("CHUNK_DUPLICATE", "Chunk %s already exists", chunkID)
uploader.completionFunc(chunk, task.chunkIndex, true, chunkSize, 0)
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
return false
}
// Encrypt the chunk only after we know that it must be uploaded.
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash(), uploader.snapshotCache != nil)
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to encrypt the chunk %s: %v", chunkID, err)
return false
}
if !uploader.config.dryRun {
err = uploader.storage.UploadFile(threadIndex, chunkPath, chunk.GetBytes())
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to upload the chunk %s: %v", chunkID, err)
return false
}
LOG_DEBUG("CHUNK_UPLOAD", "Chunk %s has been uploaded", chunkID)
} else {
LOG_DEBUG("CHUNK_UPLOAD", "Uploading was skipped for chunk %s", chunkID)
}
uploader.completionFunc(chunk, task.chunkIndex, false, chunkSize, chunk.GetLength())
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
return true
}

View File

@@ -18,6 +18,7 @@ import (
"fmt"
"hash"
"os"
"strings"
"runtime"
"runtime/debug"
"sync/atomic"
@@ -34,8 +35,8 @@ var DEFAULT_KEY = []byte("duplicacy")
// standard zlib levels of -1 to 9.
var DEFAULT_COMPRESSION_LEVEL = 100
// The new header of the config file (to differentiate from the old format where the salt and iterations are fixed)
var CONFIG_HEADER = "duplicacy\001"
// The new banner of the config file (to differentiate from the old format where the salt and iterations are fixed)
var CONFIG_BANNER = "duplicacy\001"
// The length of the salt used in the new format
var CONFIG_SALT_LENGTH = 32
@@ -70,6 +71,10 @@ type Config struct {
// for encrypting a non-chunk file
FileKey []byte `json:"-"`
// for erasure coding
DataShards int `json:'data-shards'`
ParityShards int `json:'parity-shards'`
// for RSA encryption
rsaPrivateKey *rsa.PrivateKey
rsaPublicKey *rsa.PublicKey
@@ -180,6 +185,10 @@ func (config *Config) Print() {
LOG_TRACE("CONFIG_INFO", "Metadata chunks are encrypted")
}
if config.DataShards != 0 && config.ParityShards != 0 {
LOG_TRACE("CONFIG_INFO", "Data shards: %d, parity shards: %d", config.DataShards, config.ParityShards)
}
if config.rsaPublicKey != nil {
pkisPublicKey, _ := x509.MarshalPKIXPublicKey(config.rsaPublicKey)
@@ -386,11 +395,11 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
return nil, false, err
}
if len(configFile.GetBytes()) < len(ENCRYPTION_HEADER) {
if len(configFile.GetBytes()) < len(ENCRYPTION_BANNER) {
return nil, false, fmt.Errorf("The storage has an invalid config file")
}
if string(configFile.GetBytes()[:len(ENCRYPTION_HEADER)-1]) == ENCRYPTION_HEADER[:len(ENCRYPTION_HEADER)-1] && len(password) == 0 {
if string(configFile.GetBytes()[:len(ENCRYPTION_BANNER)-1]) == ENCRYPTION_BANNER[:len(ENCRYPTION_BANNER)-1] && len(password) == 0 {
return nil, true, fmt.Errorf("The storage is likely to have been initialized with a password before")
}
@@ -398,23 +407,23 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
if len(password) > 0 {
if string(configFile.GetBytes()[:len(ENCRYPTION_HEADER)]) == ENCRYPTION_HEADER {
if string(configFile.GetBytes()[:len(ENCRYPTION_BANNER)]) == ENCRYPTION_BANNER {
// This is the old config format with a static salt and a fixed number of iterations
masterKey = GenerateKeyFromPassword(password, DEFAULT_KEY, CONFIG_DEFAULT_ITERATIONS)
LOG_TRACE("CONFIG_FORMAT", "Using a static salt and %d iterations for key derivation", CONFIG_DEFAULT_ITERATIONS)
} else if string(configFile.GetBytes()[:len(CONFIG_HEADER)]) == CONFIG_HEADER {
} else if string(configFile.GetBytes()[:len(CONFIG_BANNER)]) == CONFIG_BANNER {
// This is the new config format with a random salt and a configurable number of iterations
encryptedLength := len(configFile.GetBytes()) - CONFIG_SALT_LENGTH - 4
// Extract the salt and the number of iterations
saltStart := configFile.GetBytes()[len(CONFIG_HEADER):]
saltStart := configFile.GetBytes()[len(CONFIG_BANNER):]
iterations := binary.LittleEndian.Uint32(saltStart[CONFIG_SALT_LENGTH : CONFIG_SALT_LENGTH+4])
LOG_TRACE("CONFIG_ITERATIONS", "Using %d iterations for key derivation", iterations)
masterKey = GenerateKeyFromPassword(password, saltStart[:CONFIG_SALT_LENGTH], int(iterations))
// Copy to a temporary buffer to replace the header and remove the salt and the number of riterations
// Copy to a temporary buffer to replace the banner and remove the salt and the number of riterations
var encrypted bytes.Buffer
encrypted.Write([]byte(ENCRYPTION_HEADER))
encrypted.Write([]byte(ENCRYPTION_BANNER))
encrypted.Write(saltStart[CONFIG_SALT_LENGTH+4:])
configFile.Reset(false)
@@ -423,7 +432,7 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
LOG_ERROR("CONFIG_DOWNLOAD", "Encrypted config has %d bytes instead of expected %d bytes", len(configFile.GetBytes()), encryptedLength)
}
} else {
return nil, true, fmt.Errorf("The config file has an invalid header")
return nil, true, fmt.Errorf("The config file has an invalid banner")
}
// Decrypt the config file. masterKey == nil means no encryption.
@@ -487,15 +496,15 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i
return false
}
// The new encrypted format for config is CONFIG_HEADER + salt + #iterations + encrypted content
// The new encrypted format for config is CONFIG_BANNER + salt + #iterations + encrypted content
encryptedLength := len(chunk.GetBytes()) + CONFIG_SALT_LENGTH + 4
// Copy to a temporary buffer to replace the header and add the salt and the number of iterations
// Copy to a temporary buffer to replace the banner and add the salt and the number of iterations
var encrypted bytes.Buffer
encrypted.Write([]byte(CONFIG_HEADER))
encrypted.Write([]byte(CONFIG_BANNER))
encrypted.Write(salt)
binary.Write(&encrypted, binary.LittleEndian, uint32(iterations))
encrypted.Write(chunk.GetBytes()[len(ENCRYPTION_HEADER):])
encrypted.Write(chunk.GetBytes()[len(ENCRYPTION_BANNER):])
chunk.Reset(false)
chunk.Write(encrypted.Bytes())
@@ -528,7 +537,7 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i
// it simply creates a file named 'config' that stores various parameters as well as a set of keys if encryption
// is enabled.
func ConfigStorage(storage Storage, iterations int, compressionLevel int, averageChunkSize int, maximumChunkSize int,
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool, keyFile string) bool {
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool, keyFile string, dataShards int, parityShards int) bool {
exist, _, _, err := storage.GetFileInfo(0, "config")
if err != nil {
@@ -550,14 +559,24 @@ func ConfigStorage(storage Storage, iterations int, compressionLevel int, averag
if keyFile != "" {
config.loadRSAPublicKey(keyFile)
}
config.DataShards = dataShards
config.ParityShards = parityShards
return UploadConfig(storage, config, password, iterations)
}
func (config *Config) loadRSAPublicKey(keyFile string) {
encodedKey, err := ioutil.ReadFile(keyFile)
if err != nil {
LOG_ERROR("BACKUP_KEY", "Failed to read the public key file: %v", err)
return
encodedKey := []byte(keyFile)
var err error
// keyFile may be the actually key, in which case we don't need to read from a file
if !strings.Contains(keyFile, "-----BEGIN") {
encodedKey, err = ioutil.ReadFile(keyFile)
if err != nil {
LOG_ERROR("BACKUP_KEY", "Failed to read the public key file: %v", err)
return
}
}
decodedKey, _ := pem.Decode(encodedKey)
@@ -593,10 +612,16 @@ func (config *Config) loadRSAPrivateKey(keyFile string, passphrase string) {
return
}
encodedKey, err := ioutil.ReadFile(keyFile)
if err != nil {
LOG_ERROR("RSA_PRIVATE", "Failed to read the private key file: %v", err)
return
encodedKey := []byte(keyFile)
var err error
// keyFile may be the actually key, in which case we don't need to read from a file
if !strings.Contains(keyFile, "-----BEGIN") {
encodedKey, err = ioutil.ReadFile(keyFile)
if err != nil {
LOG_ERROR("RSA_PRIVATE", "Failed to read the private key file: %v", err)
return
}
}
decodedKey, _ := pem.Decode(encodedKey)

View File

@@ -21,11 +21,11 @@ type DropboxStorage struct {
}
// CreateDropboxStorage creates a dropbox storage object.
func CreateDropboxStorage(accessToken string, storageDir string, minimumNesting int, threads int) (storage *DropboxStorage, err error) {
func CreateDropboxStorage(refreshToken string, storageDir string, minimumNesting int, threads int) (storage *DropboxStorage, err error) {
var clients []*dropbox.Files
for i := 0; i < threads; i++ {
client := dropbox.NewFiles(dropbox.NewConfig(accessToken))
client := dropbox.NewFiles(dropbox.NewConfig("", refreshToken, "https://duplicacy.com/dropbox_refresh"))
clients = append(clients, client)
}

View File

@@ -16,6 +16,11 @@ import (
"strconv"
"strings"
"time"
"bytes"
"crypto/sha256"
"github.com/vmihailenco/msgpack"
)
// This is the hidden directory in the repository for storing various files.
@@ -45,7 +50,7 @@ type Entry struct {
EndChunk int
EndOffset int
Attributes map[string][]byte
Attributes *map[string][]byte
}
// CreateEntry creates an entry from file properties.
@@ -93,6 +98,27 @@ func CreateEntryFromFileInfo(fileInfo os.FileInfo, directory string) *Entry {
return entry
}
func (entry *Entry) Copy() *Entry {
return &Entry{
Path: entry.Path,
Size: entry.Size,
Time: entry.Time,
Mode: entry.Mode,
Link: entry.Link,
Hash: entry.Hash,
UID: entry.UID,
GID: entry.GID,
StartChunk: entry.StartChunk,
StartOffset: entry.StartOffset,
EndChunk: entry.EndChunk,
EndOffset: entry.EndOffset,
Attributes: entry.Attributes,
}
}
// CreateEntryFromJSON creates an entry from a json description.
func (entry *Entry) UnmarshalJSON(description []byte) (err error) {
@@ -175,17 +201,17 @@ func (entry *Entry) UnmarshalJSON(description []byte) (err error) {
if attributes, ok := value.(map[string]interface{}); !ok {
return fmt.Errorf("Attributes are invalid for file '%s' in the snapshot", entry.Path)
} else {
entry.Attributes = make(map[string][]byte)
entry.Attributes = &map[string][]byte{}
for name, object := range attributes {
if object == nil {
entry.Attributes[name] = []byte("")
(*entry.Attributes)[name] = []byte("")
} else if attributeInBase64, ok := object.(string); !ok {
return fmt.Errorf("Attribute '%s' is invalid for file '%s' in the snapshot", name, entry.Path)
} else if attribute, err := base64.StdEncoding.DecodeString(attributeInBase64); err != nil {
return fmt.Errorf("Failed to decode attribute '%s' for file '%s' in the snapshot: %v",
name, entry.Path, err)
} else {
entry.Attributes[name] = attribute
(*entry.Attributes)[name] = attribute
}
}
}
@@ -244,7 +270,7 @@ func (entry *Entry) convertToObject(encodeName bool) map[string]interface{} {
object["gid"] = entry.GID
}
if len(entry.Attributes) > 0 {
if entry.Attributes != nil && len(*entry.Attributes) > 0 {
object["attributes"] = entry.Attributes
}
@@ -259,6 +285,197 @@ func (entry *Entry) MarshalJSON() ([]byte, error) {
return description, err
}
var _ msgpack.CustomEncoder = (*Entry)(nil)
var _ msgpack.CustomDecoder = (*Entry)(nil)
func (entry *Entry) EncodeMsgpack(encoder *msgpack.Encoder) error {
err := encoder.EncodeString(entry.Path)
if err != nil {
return err
}
err = encoder.EncodeInt(entry.Size)
if err != nil {
return err
}
err = encoder.EncodeInt(entry.Time)
if err != nil {
return err
}
err = encoder.EncodeInt(int64(entry.Mode))
if err != nil {
return err
}
err = encoder.EncodeString(entry.Link)
if err != nil {
return err
}
err = encoder.EncodeString(entry.Hash)
if err != nil {
return err
}
err = encoder.EncodeInt(int64(entry.StartChunk))
if err != nil {
return err
}
err = encoder.EncodeInt(int64(entry.StartOffset))
if err != nil {
return err
}
err = encoder.EncodeInt(int64(entry.EndChunk))
if err != nil {
return err
}
err = encoder.EncodeInt(int64(entry.EndOffset))
if err != nil {
return err
}
err = encoder.EncodeInt(int64(entry.UID))
if err != nil {
return err
}
err = encoder.EncodeInt(int64(entry.GID))
if err != nil {
return err
}
var numberOfAttributes int64
if entry.Attributes != nil {
numberOfAttributes = int64(len(*entry.Attributes))
}
err = encoder.EncodeInt(numberOfAttributes)
if err != nil {
return err
}
if entry.Attributes != nil {
attributes := make([]string, numberOfAttributes)
i := 0
for attribute := range *entry.Attributes {
attributes[i] = attribute
i++
}
sort.Strings(attributes)
for _, attribute := range attributes {
err = encoder.EncodeString(attribute)
if err != nil {
return err
}
err = encoder.EncodeString(string((*entry.Attributes)[attribute]))
if err != nil {
return err
}
}
}
return nil
}
func (entry *Entry) DecodeMsgpack(decoder *msgpack.Decoder) error {
var err error
entry.Path, err = decoder.DecodeString()
if err != nil {
return err
}
entry.Size, err = decoder.DecodeInt64()
if err != nil {
return err
}
entry.Time, err = decoder.DecodeInt64()
if err != nil {
return err
}
mode, err := decoder.DecodeInt64()
if err != nil {
return err
}
entry.Mode = uint32(mode)
entry.Link, err = decoder.DecodeString()
if err != nil {
return err
}
entry.Hash, err = decoder.DecodeString()
if err != nil {
return err
}
startChunk, err := decoder.DecodeInt()
if err != nil {
return err
}
entry.StartChunk = int(startChunk)
startOffset, err := decoder.DecodeInt()
if err != nil {
return err
}
entry.StartOffset = int(startOffset)
endChunk, err := decoder.DecodeInt()
if err != nil {
return err
}
entry.EndChunk = int(endChunk)
endOffset, err := decoder.DecodeInt()
if err != nil {
return err
}
entry.EndOffset = int(endOffset)
uid, err := decoder.DecodeInt()
if err != nil {
return err
}
entry.UID = int(uid)
gid, err := decoder.DecodeInt()
if err != nil {
return err
}
entry.GID = int(gid)
numberOfAttributes, err := decoder.DecodeInt()
if err != nil {
return err
}
if numberOfAttributes > 0 {
entry.Attributes = &map[string][]byte{}
for i := 0; i < numberOfAttributes; i++ {
attribute, err := decoder.DecodeString()
if err != nil {
return err
}
value, err := decoder.DecodeString()
if err != nil {
return err
}
(*entry.Attributes)[attribute] = []byte(value)
}
}
return nil
}
func (entry *Entry) IsFile() bool {
return entry.Mode&uint32(os.ModeType) == 0
}
@@ -271,10 +488,27 @@ func (entry *Entry) IsLink() bool {
return entry.Mode&uint32(os.ModeSymlink) != 0
}
func (entry *Entry) IsComplete() bool {
return entry.Size >= 0
}
func (entry *Entry) GetPermissions() os.FileMode {
return os.FileMode(entry.Mode) & fileModeMask
}
func (entry *Entry) GetParent() string {
path := entry.Path
if path != "" && path[len(path) - 1] == '/' {
path = path[:len(path) - 1]
}
i := strings.LastIndex(path, "/")
if i == -1 {
return ""
} else {
return path[:i]
}
}
func (entry *Entry) IsSameAs(other *Entry) bool {
return entry.Size == other.Size && entry.Time <= other.Time+1 && entry.Time >= other.Time-1
}
@@ -326,7 +560,7 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setO
}
}
if len(entry.Attributes) > 0 {
if entry.Attributes != nil && len(*entry.Attributes) > 0 {
entry.SetAttributesToFile(fullPath)
}
@@ -335,47 +569,62 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setO
// Return -1 if 'left' should appear before 'right', 1 if opposite, and 0 if they are the same.
// Files are always arranged before subdirectories under the same parent directory.
func (left *Entry) Compare(right *Entry) int {
path1 := left.Path
path2 := right.Path
func ComparePaths(left string, right string) int {
p := 0
for ; p < len(path1) && p < len(path2); p++ {
if path1[p] != path2[p] {
for ; p < len(left) && p < len(right); p++ {
if left[p] != right[p] {
break
}
}
// c1, c2 is the first byte that differs
// c1, c2 are the first bytes that differ
var c1, c2 byte
if p < len(path1) {
c1 = path1[p]
if p < len(left) {
c1 = left[p]
}
if p < len(path2) {
c2 = path2[p]
if p < len(right) {
c2 = right[p]
}
// c3, c4 indicates how the current component ends
// c3 == '/': the current component is a directory
// c3 != '/': the current component is the last one
// c3, c4 indicate how the current component ends
// c3 == '/': the current component is a directory; c3 != '/': the current component is the last one
c3 := c1
for i := p; c3 != '/' && i < len(path1); i++ {
c3 = path1[i]
// last1, last2 means if the current compoent is the last component
last1 := true
for i := p; i < len(left); i++ {
c3 = left[i]
if c3 == '/' {
last1 = i == len(left) - 1
break
}
}
c4 := c2
for i := p; c4 != '/' && i < len(path2); i++ {
c4 = path2[i]
last2 := true
for i := p; i < len(right); i++ {
c4 = right[i]
if c4 == '/' {
last2 = i == len(right) - 1
break
}
}
if last1 != last2 {
if last1 {
return -1
} else {
return 1
}
}
if c3 == '/' {
if c4 == '/' {
// We are comparing two directory components
if c1 == '/' {
// left is shorter
// Note that c2 maybe smaller than c1 but c1 is '/' which is counted
// as 0
// left is shorter; note that c2 maybe smaller than c1 but c1 should be treated as 0 therefore
// this is a special case that must be handled separately
return -1
} else if c2 == '/' {
// right is shorter
@@ -397,6 +646,10 @@ func (left *Entry) Compare(right *Entry) int {
}
}
func (left *Entry) Compare(right *Entry) int {
return ComparePaths(left.Path, right.Path)
}
// This is used to sort entries by their names.
type ByName []*Entry
@@ -443,7 +696,7 @@ func (files FileInfoCompare) Less(i, j int) bool {
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files.
func ListEntries(top string, path string, fileList *[]*Entry, patterns []string, nobackupFile string, discardAttributes bool) (directoryList []*Entry,
func ListEntries(top string, path string, patterns []string, nobackupFile string, excludeByAttribute bool, listingChannel chan *Entry) (directoryList []*Entry,
skippedFiles []string, err error) {
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
@@ -478,8 +731,6 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
sort.Sort(FileInfoCompare(files))
entries := make([]*Entry, 0, 4)
for _, f := range files {
if f.Name() == DUPLICACY_DIRECTORY {
continue
@@ -520,8 +771,11 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
}
}
if !discardAttributes {
entry.ReadAttributes(top)
entry.ReadAttributes(top)
if excludeByAttribute && entry.Attributes != nil && excludedByAttribute(*entry.Attributes) {
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded by attribute", entry.Path)
continue
}
if f.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
@@ -530,20 +784,20 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
continue
}
entries = append(entries, entry)
if entry.IsDir() {
directoryList = append(directoryList, entry)
} else {
listingChannel <- entry
}
}
// For top level directory we need to sort again because symlinks may have been changed
if path == "" {
sort.Sort(ByName(entries))
sort.Sort(ByName(directoryList))
}
for _, entry := range entries {
if entry.IsDir() {
directoryList = append(directoryList, entry)
} else {
*fileList = append(*fileList, entry)
}
for _, entry := range directoryList {
listingChannel <- entry
}
for i, j := 0, len(directoryList)-1; i < j; i, j = i+1, j-1 {
@@ -592,3 +846,100 @@ func (entry *Entry) Diff(chunkHashes []string, chunkLengths []int,
return modifiedLength
}
func (entry *Entry) EncodeWithHash(encoder *msgpack.Encoder) error {
entryBytes, err := msgpack.Marshal(entry)
if err != nil {
return err
}
hash := sha256.Sum256(entryBytes)
err = encoder.EncodeBytes(entryBytes)
if err != nil {
return err
}
err = encoder.EncodeBytes(hash[:])
if err != nil {
return err
}
return nil
}
func DecodeEntryWithHash(decoder *msgpack.Decoder) (*Entry, error) {
entryBytes, err := decoder.DecodeBytes()
if err != nil {
return nil, err
}
hashBytes, err := decoder.DecodeBytes()
if err != nil {
return nil, err
}
expectedHash := sha256.Sum256(entryBytes)
if bytes.Compare(expectedHash[:], hashBytes) != 0 {
return nil, fmt.Errorf("corrupted file metadata")
}
var entry Entry
err = msgpack.Unmarshal(entryBytes, &entry)
if err != nil {
return nil, err
}
return &entry, nil
}
func (entry *Entry) check(chunkLengths []int) error {
if entry.Size < 0 {
return fmt.Errorf("The file %s hash an invalid size (%d)", entry.Path, entry.Size)
}
if !entry.IsFile() || entry.Size == 0 {
return nil
}
if entry.StartChunk < 0 {
return fmt.Errorf("The file %s starts at chunk %d", entry.Path, entry.StartChunk)
}
if entry.EndChunk >= len(chunkLengths) {
return fmt.Errorf("The file %s ends at chunk %d while the number of chunks is %d",
entry.Path, entry.EndChunk, len(chunkLengths))
}
if entry.EndChunk < entry.StartChunk {
return fmt.Errorf("The file %s starts at chunk %d and ends at chunk %d",
entry.Path, entry.StartChunk, entry.EndChunk)
}
if entry.StartOffset >= chunkLengths[entry.StartChunk] {
return fmt.Errorf("The file %s starts at offset %d of chunk %d of length %d",
entry.Path, entry.StartOffset, entry.StartChunk, chunkLengths[entry.StartChunk])
}
if entry.EndOffset > chunkLengths[entry.EndChunk] {
return fmt.Errorf("The file %s ends at offset %d of chunk %d of length %d",
entry.Path, entry.EndOffset, entry.EndChunk, chunkLengths[entry.EndChunk])
}
fileSize := int64(0)
for i := entry.StartChunk; i <= entry.EndChunk; i++ {
start := 0
if i == entry.StartChunk {
start = entry.StartOffset
}
end := chunkLengths[i]
if i == entry.EndChunk {
end = entry.EndOffset
}
fileSize += int64(end - start)
}
if entry.Size != fileSize {
return fmt.Errorf("The file %s has a size of %d but the total size of chunks is %d",
entry.Path, entry.Size, fileSize)
}
return nil
}

View File

@@ -9,8 +9,15 @@ import (
"math/rand"
"os"
"path/filepath"
"runtime"
"sort"
"strings"
"testing"
"bytes"
"encoding/json"
"github.com/gilbertchen/xattr"
"github.com/vmihailenco/msgpack"
)
func TestEntrySort(t *testing.T) {
@@ -23,19 +30,19 @@ func TestEntrySort(t *testing.T) {
"\xBB\xDDfile",
"\xFF\xDDfile",
"ab/",
"ab-/",
"ab0/",
"ab1/",
"ab/c",
"ab+/c-",
"ab+/c0",
"ab+/c/",
"ab+/c/d",
"ab+/c+/",
"ab+/c+/d",
"ab+/c0/",
"ab+/c/d",
"ab+/c+/d",
"ab+/c0/d",
"ab-/",
"ab-/c",
"ab0/",
"ab1/",
"ab1/c",
"ab1/\xBB\xDDfile",
"ab1/\xFF\xDDfile",
@@ -82,7 +89,7 @@ func TestEntrySort(t *testing.T) {
}
}
func TestEntryList(t *testing.T) {
func TestEntryOrder(t *testing.T) {
testDir := filepath.Join(os.TempDir(), "duplicacy_test")
os.RemoveAll(testDir)
@@ -94,16 +101,16 @@ func TestEntryList(t *testing.T) {
"ab0",
"ab1",
"ab+/",
"ab2/",
"ab3/",
"ab+/c",
"ab+/c+",
"ab+/c1",
"ab+/c-/",
"ab+/c-/d",
"ab+/c0/",
"ab+/c-/d",
"ab+/c0/d",
"ab2/",
"ab2/c",
"ab3/",
"ab3/c",
}
@@ -168,18 +175,24 @@ func TestEntryList(t *testing.T) {
directories = append(directories, CreateEntry("", 0, 0, 0))
entries := make([]*Entry, 0, 4)
entryChannel := make(chan *Entry, 1024)
entries = append(entries, CreateEntry("", 0, 0, 0))
for len(directories) > 0 {
directory := directories[len(directories)-1]
directories = directories[:len(directories)-1]
entries = append(entries, directory)
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, "", false)
subdirectories, _, err := ListEntries(testDir, directory.Path, nil, "", false, entryChannel)
if err != nil {
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
}
directories = append(directories, subdirectories...)
}
close(entryChannel)
for entry := range entryChannel {
entries = append(entries, entry)
}
entries = entries[1:]
for _, entry := range entries {
@@ -216,3 +229,147 @@ func TestEntryList(t *testing.T) {
}
}
// TestEntryExcludeByAttribute tests the excludeByAttribute parameter to the ListEntries function
func TestEntryExcludeByAttribute(t *testing.T) {
if !(runtime.GOOS == "darwin" || runtime.GOOS == "linux") {
t.Skip("skipping test not darwin or linux")
}
testDir := filepath.Join(os.TempDir(), "duplicacy_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
// Files or folders named with "exclude" below will have the exclusion attribute set on them
// When ListEntries is called with excludeByAttribute true, they should be excluded.
DATA := [...]string{
"excludefile",
"includefile",
"excludedir/",
"excludedir/file",
"includedir/",
"includedir/includefile",
"includedir/excludefile",
}
for _, file := range DATA {
fullPath := filepath.Join(testDir, file)
if file[len(file)-1] == '/' {
err := os.Mkdir(fullPath, 0700)
if err != nil {
t.Errorf("Mkdir(%s) returned an error: %s", fullPath, err)
}
continue
}
err := ioutil.WriteFile(fullPath, []byte(file), 0700)
if err != nil {
t.Errorf("WriteFile(%s) returned an error: %s", fullPath, err)
}
}
for _, file := range DATA {
fullPath := filepath.Join(testDir, file)
if strings.Contains(file, "exclude") {
xattr.Setxattr(fullPath, "com.apple.metadata:com_apple_backup_excludeItem", []byte("com.apple.backupd"))
}
}
for _, excludeByAttribute := range [2]bool{true, false} {
t.Logf("testing excludeByAttribute: %t", excludeByAttribute)
directories := make([]*Entry, 0, 4)
directories = append(directories, CreateEntry("", 0, 0, 0))
entries := make([]*Entry, 0, 4)
entryChannel := make(chan *Entry, 1024)
entries = append(entries, CreateEntry("", 0, 0, 0))
for len(directories) > 0 {
directory := directories[len(directories)-1]
directories = directories[:len(directories)-1]
subdirectories, _, err := ListEntries(testDir, directory.Path, nil, "", excludeByAttribute, entryChannel)
if err != nil {
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
}
directories = append(directories, subdirectories...)
}
close(entryChannel)
for entry := range entryChannel {
entries = append(entries, entry)
}
entries = entries[1:]
for _, entry := range entries {
t.Logf("entry: %s", entry.Path)
}
i := 0
for _, file := range DATA {
entryFound := false
var entry *Entry
for _, entry = range entries {
if entry.Path == file {
entryFound = true
break
}
}
if excludeByAttribute && strings.Contains(file, "exclude") {
if entryFound {
t.Errorf("file: %s, expected to be excluded but wasn't. attributes: %v", file, entry.Attributes)
i++
} else {
t.Logf("file: %s, excluded", file)
}
} else {
if entryFound {
t.Logf("file: %s, included. attributes: %v", file, entry.Attributes)
i++
} else {
t.Errorf("file: %s, expected to be included but wasn't", file)
}
}
}
}
if !t.Failed() {
os.RemoveAll(testDir)
}
}
func TestEntryEncoding(t *testing.T) {
buffer := new(bytes.Buffer)
encoder := msgpack.NewEncoder(buffer)
entry1 := CreateEntry("abcd", 1, 2, 0700)
err := encoder.Encode(entry1)
if err != nil {
t.Errorf("Failed to encode the entry: %v", err)
return
}
t.Logf("msgpack size: %d\n", len(buffer.Bytes()))
decoder := msgpack.NewDecoder(buffer)
description, _ := json.Marshal(entry1)
t.Logf("json size: %d\n", len(description))
var entry2 Entry
err = decoder.Decode(&entry2)
if err != nil {
t.Errorf("Failed to decode the entry: %v", err)
return
}
if entry1.Path != entry2.Path || entry1.Size != entry2.Size || entry1.Time != entry2.Time {
t.Error("Decoded entry is different than the original one")
}
}

574
src/duplicacy_entrylist.go Normal file
View File

@@ -0,0 +1,574 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"encoding/hex"
"encoding/binary"
"fmt"
"os"
"io"
"path"
"crypto/sha256"
"crypto/rand"
"sync"
"github.com/vmihailenco/msgpack"
)
// This struct stores information about a file entry that has been modified
type ModifiedEntry struct {
Path string
Size int64
Hash string
}
// EntryList is basically a list of entries, which can be kept in the memory, or serialized to a disk file,
// depending on if maximumInMemoryEntries is reached.
//
// The idea behind the on-disk entry list is that entries are written to a disk file as they are coming in.
// Entries that have been modified and thus need to be uploaded will have their Incomplete bit set (i.e.,
// with a size of -1). When the limit is reached, entries are moved to a disk file but ModifiedEntries and
// UploadedChunks are still kept in memory. When later entries are read from the entry list, incomplete
// entries are back-annotated with info from ModifiedEntries and UploadedChunk* before sending them out.
type EntryList struct {
onDiskFile *os.File // the file to store entries
encoder *msgpack.Encoder // msgpack encoder for entry serialization
entries []*Entry // in-memory entry list
SnapshotID string // the snapshot id
Token string // this unique random token makes sure we read/write
// the same entry list
ModifiedEntries []ModifiedEntry // entries that will be uploaded
UploadedChunkHashes []string // chunks from entries that have been uploaded
UploadedChunkLengths []int // chunk lengths from entries that have been uploaded
uploadedChunkLock sync.Mutex // lock for UploadedChunkHashes and UploadedChunkLengths
PreservedChunkHashes []string // chunks from entries not changed
PreservedChunkLengths []int // chunk lengths from entries not changed
Checksum string // checksum of all entries to detect disk corruption
maximumInMemoryEntries int // max in-memory entries
NumberOfEntries int64 // number of entries (not including directories and links)
cachePath string // the directory for the on-disk file
// These 3 variables are used in entry infomation back-annotation
modifiedEntryIndex int // points to the current modified entry
uploadedChunkIndex int // counter for upload chunks
uploadedChunkOffset int // the start offset for the current modified entry
}
// Create a new entry list
func CreateEntryList(snapshotID string, cachePath string, maximumInMemoryEntries int) (*EntryList, error) {
token := make([]byte, 16)
_, err := rand.Read(token)
if err != nil {
return nil, fmt.Errorf("Failed to create a random token: %v", err)
}
entryList := &EntryList {
SnapshotID: snapshotID,
maximumInMemoryEntries: maximumInMemoryEntries,
cachePath: cachePath,
Token: string(token),
}
return entryList, nil
}
// Create the on-disk entry list file
func (entryList *EntryList)createOnDiskFile() error {
file, err := os.OpenFile(path.Join(entryList.cachePath, "incomplete_files"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return fmt.Errorf("Failed to create on disk entry list: %v", err)
}
entryList.onDiskFile = file
entryList.encoder = msgpack.NewEncoder(file)
err = entryList.encoder.EncodeString(entryList.Token)
if err != nil {
return fmt.Errorf("Failed to create on disk entry list: %v", err)
}
for _, entry := range entryList.entries {
err = entry.EncodeWithHash(entryList.encoder)
if err != nil {
return err
}
}
return nil
}
// Add an entry to the entry list
func (entryList *EntryList)AddEntry(entry *Entry) error {
if !entry.IsDir() && !entry.IsLink() {
entryList.NumberOfEntries++
}
if !entry.IsComplete() {
if entry.IsDir() || entry.IsLink() {
entry.Size = 0
} else {
modifiedEntry := ModifiedEntry {
Path: entry.Path,
Size: -1,
}
entryList.ModifiedEntries = append(entryList.ModifiedEntries, modifiedEntry)
}
}
if entryList.onDiskFile != nil {
return entry.EncodeWithHash(entryList.encoder)
} else {
entryList.entries = append(entryList.entries, entry)
if entryList.maximumInMemoryEntries >= 0 && len(entryList.entries) > entryList.maximumInMemoryEntries {
err := entryList.createOnDiskFile()
if err != nil {
return err
}
}
}
return nil
}
// Add a preserved chunk that belongs to files that have not been modified
func (entryList *EntryList)AddPreservedChunk(chunkHash string, chunkSize int) {
entryList.PreservedChunkHashes = append(entryList.PreservedChunkHashes, chunkHash)
entryList.PreservedChunkLengths = append(entryList.PreservedChunkLengths, chunkSize)
}
// Add a chunk just uploaded (that belongs to files that have been modified)
func (entryList *EntryList)AddUploadedChunk(chunkIndex int, chunkHash string, chunkSize int) {
entryList.uploadedChunkLock.Lock()
for len(entryList.UploadedChunkHashes) <= chunkIndex {
entryList.UploadedChunkHashes = append(entryList.UploadedChunkHashes, "")
}
for len(entryList.UploadedChunkLengths) <= chunkIndex {
entryList.UploadedChunkLengths = append(entryList.UploadedChunkLengths, 0)
}
entryList.UploadedChunkHashes[chunkIndex] = chunkHash
entryList.UploadedChunkLengths[chunkIndex] = chunkSize
entryList.uploadedChunkLock.Unlock()
}
// Close the on-disk file
func (entryList *EntryList) CloseOnDiskFile() error {
if entryList.onDiskFile == nil {
return nil
}
err := entryList.onDiskFile.Sync()
if err != nil {
return err
}
err = entryList.onDiskFile.Close()
if err != nil {
return err
}
entryList.onDiskFile = nil
return nil
}
// Return the length of the `index`th chunk
func (entryList *EntryList) getChunkLength(index int) int {
if index < len(entryList.PreservedChunkLengths) {
return entryList.PreservedChunkLengths[index]
} else {
return entryList.UploadedChunkLengths[index - len(entryList.PreservedChunkLengths)]
}
}
// Sanity check for each entry
func (entryList *EntryList) checkEntry(entry *Entry) error {
if entry.Size < 0 {
return fmt.Errorf("the file %s hash an invalid size (%d)", entry.Path, entry.Size)
}
if !entry.IsFile() || entry.Size == 0 {
return nil
}
numberOfChunks := len(entryList.PreservedChunkLengths) + len(entryList.UploadedChunkLengths)
if entry.StartChunk < 0 {
return fmt.Errorf("the file %s starts at chunk %d", entry.Path, entry.StartChunk)
}
if entry.EndChunk >= numberOfChunks {
return fmt.Errorf("the file %s ends at chunk %d while the number of chunks is %d",
entry.Path, entry.EndChunk, numberOfChunks)
}
if entry.EndChunk < entry.StartChunk {
return fmt.Errorf("the file %s starts at chunk %d and ends at chunk %d",
entry.Path, entry.StartChunk, entry.EndChunk)
}
if entry.StartOffset >= entryList.getChunkLength(entry.StartChunk) {
return fmt.Errorf("the file %s starts at offset %d of chunk %d with a length of %d",
entry.Path, entry.StartOffset, entry.StartChunk, entryList.getChunkLength(entry.StartChunk))
}
if entry.EndOffset > entryList.getChunkLength(entry.EndChunk) {
return fmt.Errorf("the file %s ends at offset %d of chunk %d with a length of %d",
entry.Path, entry.EndOffset, entry.EndChunk, entryList.getChunkLength(entry.EndChunk))
}
fileSize := int64(0)
for i := entry.StartChunk; i <= entry.EndChunk; i++ {
start := 0
if i == entry.StartChunk {
start = entry.StartOffset
}
end := entryList.getChunkLength(i)
if i == entry.EndChunk {
end = entry.EndOffset
}
fileSize += int64(end - start)
}
if entry.Size != fileSize {
return fmt.Errorf("the file %s has a size of %d but the total size of chunks is %d",
entry.Path, entry.Size, fileSize)
}
return nil
}
// An incomplete entry (with a size of -1) does not have 'startChunk', 'startOffset', 'endChunk', and 'endOffset'. This function
// is to fill in these information before sending the entry out.
func (entryList *EntryList) fillAndSendEntry(entry *Entry, entryOut func(*Entry)error) (skipped bool, err error) {
if entry.IsComplete() {
err := entryList.checkEntry(entry)
if err != nil {
return false, err
}
return false, entryOut(entry)
}
if entryList.modifiedEntryIndex >= len(entryList.ModifiedEntries) {
return false, fmt.Errorf("Unexpected file index %d (%d modified files)", entryList.modifiedEntryIndex, len(entryList.ModifiedEntries))
}
modifiedEntry := &entryList.ModifiedEntries[entryList.modifiedEntryIndex]
entryList.modifiedEntryIndex++
if modifiedEntry.Path != entry.Path {
return false, fmt.Errorf("Unexpected file path %s when expecting %s", modifiedEntry.Path, entry.Path)
}
if modifiedEntry.Size <= 0 {
return true, nil
}
entry.Size = modifiedEntry.Size
entry.Hash = modifiedEntry.Hash
entry.StartChunk = entryList.uploadedChunkIndex + len(entryList.PreservedChunkHashes)
entry.StartOffset = entryList.uploadedChunkOffset
entry.EndChunk = entry.StartChunk
endOffset := int64(entry.StartOffset) + entry.Size
for entryList.uploadedChunkIndex < len(entryList.UploadedChunkLengths) && endOffset > int64(entryList.UploadedChunkLengths[entryList.uploadedChunkIndex]) {
endOffset -= int64(entryList.UploadedChunkLengths[entryList.uploadedChunkIndex])
entry.EndChunk++
entryList.uploadedChunkIndex++
}
if entryList.uploadedChunkIndex >= len(entryList.UploadedChunkLengths) {
return false, fmt.Errorf("File %s has not been completely uploaded", entry.Path)
}
entry.EndOffset = int(endOffset)
entryList.uploadedChunkOffset = entry.EndOffset
if entry.EndOffset == entryList.UploadedChunkLengths[entryList.uploadedChunkIndex] {
entryList.uploadedChunkIndex++
entryList.uploadedChunkOffset = 0
}
err = entryList.checkEntry(entry)
if err != nil {
return false, err
}
return false, entryOut(entry)
}
// Iterate through the entries in this entry list
func (entryList *EntryList) ReadEntries(entryOut func(*Entry)error) (error) {
entryList.modifiedEntryIndex = 0
entryList.uploadedChunkIndex = 0
entryList.uploadedChunkOffset = 0
if entryList.onDiskFile == nil {
for _, entry := range entryList.entries {
skipped, err := entryList.fillAndSendEntry(entry.Copy(), entryOut)
if err != nil {
return err
}
if skipped {
continue
}
}
} else {
_, err := entryList.onDiskFile.Seek(0, os.SEEK_SET)
if err != nil {
return err
}
decoder := msgpack.NewDecoder(entryList.onDiskFile)
_, err = decoder.DecodeString()
if err != nil {
return err
}
for _, err = decoder.PeekCode(); err == nil; _, err = decoder.PeekCode() {
entry, err := DecodeEntryWithHash(decoder)
if err != nil {
return err
}
skipped, err := entryList.fillAndSendEntry(entry, entryOut)
if err != nil {
return err
}
if skipped {
continue
}
}
if err != io.EOF {
return err
}
}
return nil
}
// When saving an incomplete snapshot, the on-disk entry list ('incomplete_files') is renamed to
// 'incomplete_snapshot', and this EntryList struct is saved as 'incomplete_chunks'.
func (entryList *EntryList) SaveIncompleteSnapshot() {
entryList.uploadedChunkLock.Lock()
defer entryList.uploadedChunkLock.Unlock()
if entryList.onDiskFile == nil {
err := entryList.createOnDiskFile()
if err != nil {
LOG_WARN("INCOMPLETE_SAVE", "Failed to create the incomplete snapshot file: %v", err)
return
}
for _, entry := range entryList.entries {
err = entry.EncodeWithHash(entryList.encoder)
if err != nil {
LOG_WARN("INCOMPLETE_SAVE", "Failed to save the entry %s: %v", entry.Path, err)
return
}
}
}
err := entryList.onDiskFile.Close()
if err != nil {
LOG_WARN("INCOMPLETE_SAVE", "Failed to close the on-disk file: %v", err)
return
}
filePath := path.Join(entryList.cachePath, "incomplete_snapshot")
if _, err := os.Stat(filePath); err == nil {
err = os.Remove(filePath)
if err != nil {
LOG_WARN("INCOMPLETE_REMOVE", "Failed to remove previous incomplete snapshot: %v", err)
}
}
err = os.Rename(path.Join(entryList.cachePath, "incomplete_files"), filePath)
if err != nil {
LOG_WARN("INCOMPLETE_SAVE", "Failed to rename the incomplete snapshot file: %v", err)
return
}
chunkFile := path.Join(entryList.cachePath, "incomplete_chunks")
file, err := os.OpenFile(chunkFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
LOG_WARN("INCOMPLETE_SAVE", "Failed to create the incomplete chunk file: %v", err)
return
}
defer file.Close()
encoder := msgpack.NewEncoder(file)
entryList.Checksum = entryList.CalculateChecksum()
err = encoder.Encode(entryList)
if err != nil {
LOG_WARN("INCOMPLETE_SAVE", "Failed to save the incomplete snapshot: %v", err)
return
}
LOG_INFO("INCOMPLETE_SAVE", "Incomplete snapshot saved to %s", filePath)
}
// Calculate a checksum for this entry list
func (entryList *EntryList) CalculateChecksum() string{
hasher := sha256.New()
for _, s := range entryList.UploadedChunkHashes {
hasher.Write([]byte(s))
}
buffer := make([]byte, 8)
for _, i := range entryList.UploadedChunkLengths {
binary.LittleEndian.PutUint64(buffer, uint64(i))
hasher.Write(buffer)
}
for _, s := range entryList.PreservedChunkHashes {
hasher.Write([]byte(s))
}
for _, i := range entryList.PreservedChunkLengths {
binary.LittleEndian.PutUint64(buffer, uint64(i))
hasher.Write(buffer)
}
for _, entry := range entryList.ModifiedEntries {
binary.LittleEndian.PutUint64(buffer, uint64(entry.Size))
hasher.Write(buffer)
hasher.Write([]byte(entry.Hash))
}
return hex.EncodeToString(hasher.Sum(nil))
}
// Check if all chunks exist in 'chunkCache'
func (entryList *EntryList) CheckChunks(config *Config, chunkCache map[string]bool) bool {
for _, chunkHash := range entryList.UploadedChunkHashes {
chunkID := config.GetChunkIDFromHash(chunkHash)
if _, ok := chunkCache[chunkID]; !ok {
return false
}
}
for _, chunkHash := range entryList.PreservedChunkHashes {
chunkID := config.GetChunkIDFromHash(chunkHash)
if _, ok := chunkCache[chunkID]; !ok {
return false
}
}
return true
}
// Recover the on disk file from 'incomplete_snapshot', and restore the EntryList struct
// from 'incomplete_chunks'
func loadIncompleteSnapshot(snapshotID string, cachePath string) *EntryList {
onDiskFilePath := path.Join(cachePath, "incomplete_snapshot")
entryListFilePath := path.Join(cachePath, "incomplete_chunks")
if _, err := os.Stat(onDiskFilePath); os.IsNotExist(err) {
return nil
}
if _, err := os.Stat(entryListFilePath); os.IsNotExist(err) {
return nil
}
entryList := &EntryList {}
entryListFile, err := os.OpenFile(entryListFilePath, os.O_RDONLY, 0600)
if err != nil {
LOG_WARN("INCOMPLETE_LOAD", "Failed to open the incomplete snapshot: %v", err)
return nil
}
defer entryListFile.Close()
decoder := msgpack.NewDecoder(entryListFile)
err = decoder.Decode(&entryList)
if err != nil {
LOG_WARN("INCOMPLETE_LOAD", "Failed to load the incomplete snapshot: %v", err)
return nil
}
checksum := entryList.CalculateChecksum()
if checksum != entryList.Checksum {
LOG_WARN("INCOMPLETE_LOAD", "Failed to load the incomplete snapshot: checksum mismatched")
return nil
}
onDiskFile, err := os.OpenFile(onDiskFilePath, os.O_RDONLY, 0600)
if err != nil {
LOG_WARN("INCOMPLETE_LOAD", "Failed to open the on disk file for the incomplete snapshot: %v", err)
return nil
}
decoder = msgpack.NewDecoder(onDiskFile)
token, err := decoder.DecodeString()
if err != nil {
LOG_WARN("INCOMPLETE_LOAD", "Failed to read the token for the incomplete snapshot: %v", err)
onDiskFile.Close()
return nil
}
if token != entryList.Token {
LOG_WARN("INCOMPLETE_LOAD", "Mismatched tokens in the incomplete snapshot")
onDiskFile.Close()
return nil
}
entryList.onDiskFile = onDiskFile
for i, hash := range entryList.UploadedChunkHashes {
if len(hash) == 0 {
// An empty hash means the chunk has not been uploaded in previous run
entryList.UploadedChunkHashes = entryList.UploadedChunkHashes[0:i]
entryList.UploadedChunkLengths = entryList.UploadedChunkLengths[0:i]
break
}
}
LOG_INFO("INCOMPLETE_LOAD", "Previous incomlete backup contains %d files and %d chunks",
entryList.NumberOfEntries, len(entryList.PreservedChunkLengths) + len(entryList.UploadedChunkHashes))
return entryList
}
// Delete the two incomplete files.
func deleteIncompleteSnapshot(cachePath string) {
for _, file := range []string{"incomplete_snapshot", "incomplete_chunks"} {
filePath := path.Join(cachePath, file)
if _, err := os.Stat(filePath); err == nil {
err = os.Remove(filePath)
if err != nil {
LOG_WARN("INCOMPLETE_REMOVE", "Failed to remove the incomplete snapshot: %v", err)
return
}
}
}
}

View File

@@ -0,0 +1,179 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"os"
"path"
"time"
"testing"
"math/rand"
)
func generateRandomString(length int) string {
var letters = []rune("abcdefghijklmnopqrstuvwxyz")
b := make([]rune, length)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
var fileSizeGenerator = rand.NewZipf(rand.New(rand.NewSource(time.Now().UnixNano())), 1.2, 1.0, 1024)
func generateRandomFileSize() int64 {
return int64(fileSizeGenerator.Uint64() + 1)
}
func generateRandomChunks(totalFileSize int64) (chunks []string, lengths []int) {
totalChunkSize := int64(0)
for totalChunkSize < totalFileSize {
chunks = append(chunks, generateRandomString(64))
chunkSize := int64(1 + (rand.Int() % 64))
if chunkSize + totalChunkSize > totalFileSize {
chunkSize = totalFileSize - totalChunkSize
}
lengths = append(lengths, int(chunkSize))
totalChunkSize += chunkSize
}
return chunks, lengths
}
func getPreservedChunks(entries []*Entry, chunks []string, lengths []int) (preservedChunks []string, preservedChunkLengths []int) {
lastPreservedChunk := -1
for i := range entries {
if entries[i].Size < 0 {
continue
}
delta := entries[i].StartChunk - len(chunks)
if lastPreservedChunk != entries[i].StartChunk {
lastPreservedChunk = entries[i].StartChunk
preservedChunks = append(preservedChunks, chunks[entries[i].StartChunk])
preservedChunkLengths = append(preservedChunkLengths, lengths[entries[i].StartChunk])
delta++
}
for j := entries[i].StartChunk + 1; i <= entries[i].EndChunk; i++ {
preservedChunks = append(preservedChunks, chunks[j])
preservedChunkLengths = append(preservedChunkLengths, lengths[j])
lastPreservedChunk = j
}
}
return
}
func testEntryList(t *testing.T, numberOfEntries int, maximumInMemoryEntries int) {
entries := make([]*Entry, 0, numberOfEntries)
entrySizes := make([]int64, 0)
for i := 0; i < numberOfEntries; i++ {
entry:= CreateEntry(generateRandomString(16), -1, 0, 0700)
entries = append(entries, entry)
entrySizes = append(entrySizes, generateRandomFileSize())
}
totalFileSize := int64(0)
for _, size := range entrySizes {
totalFileSize += size
}
testDir := path.Join(os.TempDir(), "duplicacy_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
os.MkdirAll(testDir + "/list1", 0700)
os.MkdirAll(testDir + "/list2", 0700)
os.MkdirAll(testDir + "/list3", 0700)
os.MkdirAll(testDir + "/list1", 0700)
// For the first entry list, all entries are new
entryList, _ := CreateEntryList("test", testDir + "/list1", maximumInMemoryEntries)
for _, entry := range entries {
entryList.AddEntry(entry)
}
uploadedChunks, uploadedChunksLengths := generateRandomChunks(totalFileSize)
for i, chunk := range uploadedChunks {
entryList.AddUploadedChunk(i, chunk, uploadedChunksLengths[i])
}
for i := range entryList.ModifiedEntries {
entryList.ModifiedEntries[i].Size = entrySizes[i]
}
totalEntries := 0
err := entryList.ReadEntries(func(entry *Entry) error {
totalEntries++
return nil
})
if err != nil {
t.Errorf("ReadEntries returned an error: %s", err)
return
}
if totalEntries != numberOfEntries {
t.Errorf("EntryList contains %d entries instead of %d", totalEntries, numberOfEntries)
return
}
// For the second entry list, half of the entries are new
for i := range entries {
if rand.Int() % 1 == 0 {
entries[i].Size = -1
} else {
entries[i].Size = entrySizes[i]
}
}
preservedChunks, preservedChunkLengths := getPreservedChunks(entries, uploadedChunks, uploadedChunksLengths)
entryList, _ = CreateEntryList("test", testDir + "/list2", maximumInMemoryEntries)
for _, entry := range entries {
entryList.AddEntry(entry)
}
for i, chunk := range preservedChunks {
entryList.AddPreservedChunk(chunk, preservedChunkLengths[i])
}
totalFileSize = 0
for i := range entryList.ModifiedEntries {
fileSize := generateRandomFileSize()
entryList.ModifiedEntries[i].Size = fileSize
totalFileSize += fileSize
}
uploadedChunks, uploadedChunksLengths = generateRandomChunks(totalFileSize)
for i, chunk := range uploadedChunks {
entryList.AddUploadedChunk(i, chunk, uploadedChunksLengths[i])
}
totalEntries = 0
err = entryList.ReadEntries(func(entry *Entry) error {
totalEntries++
return nil
})
if err != nil {
t.Errorf("ReadEntries returned an error: %s", err)
return
}
if totalEntries != numberOfEntries {
t.Errorf("EntryList contains %d entries instead of %d", totalEntries, numberOfEntries)
return
}
}
func TestEntryList(t *testing.T) {
testEntryList(t, 1024, 1024)
testEntryList(t, 1024, 512)
testEntryList(t, 1024, 0)
}

View File

@@ -0,0 +1,618 @@
// Copyright (c) Storage Made Easy. All rights reserved.
//
// This storage backend is contributed by Storage Made Easy (https://storagemadeeasy.com/) to be used in
// Duplicacy and its derivative works.
//
package duplicacy
import (
"io"
"fmt"
"time"
"sync"
"bytes"
"errors"
"strings"
"net/url"
"net/http"
"math/rand"
"io/ioutil"
"encoding/xml"
"path/filepath"
"mime/multipart"
)
// The XML element representing a file returned by the File Fabric server
type FileFabricFile struct {
XMLName xml.Name
ID string `xml:"fi_id"`
Path string `xml:"path"`
Size int64 `xml:"fi_size"`
Type int `xml:"fi_type"`
}
// The XML element representing a file list returned by the server
type FileFabricFileList struct {
XMLName xml.Name `xml:"files"`
Files []FileFabricFile `xml:",any"`
}
type FileFabricStorage struct {
StorageBase
endpoint string // the server
authToken string // the authentication token
accessToken string // the access token (as returned by getTokenByAuthToken)
storageDir string // the path of the storage directory
storageDirID string // the id of 'storageDir'
client *http.Client // the default http client
threads int // number of threads
maxRetries int // maximum number of tries
directoryCache map[string]string // stores ids for directories known to this backend
directoryCacheLock sync.Mutex // lock for accessing directoryCache
isAuthorized bool
testMode bool
}
var (
errFileFabricAuthorizationFailure = errors.New("Authentication failure")
errFileFabricDirectoryExists = errors.New("Directory exists")
)
// The general server response
type FileFabricResponse struct {
Status string `xml:"status"`
Message string `xml:"statusmessage"`
}
// Check the server response and return an error representing the error message it contains
func checkFileFabricResponse(response FileFabricResponse, actionFormat string, actionArguments ...interface{}) error {
action := fmt.Sprintf(actionFormat, actionArguments...)
if response.Status == "ok" && response.Message == "Success" {
return nil
} else if response.Status == "error_data" {
if response.Message == "Folder with same name already exists." {
return errFileFabricDirectoryExists
}
}
return fmt.Errorf("Failed to %s (status: %s, message: %s)", action, response.Status, response.Message)
}
// Create a File Fabric storage backend
func CreateFileFabricStorage(endpoint string, token string, storageDir string, threads int) (storage *FileFabricStorage, err error) {
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
storageDir += "/"
}
storage = &FileFabricStorage{
endpoint: endpoint,
authToken: token,
client: http.DefaultClient,
threads: threads,
directoryCache: make(map[string]string),
maxRetries: 12,
}
err = storage.getAccessToken()
if err != nil {
return nil, err
}
storageDirID, isDir, _, err := storage.getFileInfo(0, storageDir)
if err != nil {
return nil, err
}
if storageDirID == "" {
return nil, fmt.Errorf("Storage path %s does not exist", storageDir)
}
if !isDir {
return nil, fmt.Errorf("Storage path %s is not a directory", storageDir)
}
storage.storageDir = storageDir
storage.storageDirID = storageDirID
for _, dir := range []string{"snapshots", "chunks"} {
storage.CreateDirectory(0, dir)
}
storage.DerivedStorage = storage
storage.SetDefaultNestingLevels([]int{0}, 0)
return storage, nil
}
// Retrieve the access token using an auth token
func (storage *FileFabricStorage) getAccessToken() (error) {
formData := url.Values { "authtoken": {storage.authToken},}
readCloser, _, _, err := storage.sendRequest(0, http.MethodPost, storage.getAPIURL("getTokenByAuthToken"), nil, formData)
if err != nil {
return err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
var output struct {
FileFabricResponse
Token string `xml:"token"`
}
err = xml.NewDecoder(readCloser).Decode(&output)
if err != nil {
return err
}
err = checkFileFabricResponse(output.FileFabricResponse, "request the access token")
if err != nil {
return err
}
storage.accessToken = output.Token
return nil
}
// Determine if we should retry based on the number of retries given by 'retry' and if so calculate the delay with exponential backoff
func (storage *FileFabricStorage) shouldRetry(retry int, messageFormat string, messageArguments ...interface{}) bool {
message := fmt.Sprintf(messageFormat, messageArguments...)
if retry >= storage.maxRetries {
LOG_WARN("FILEFABRIC_REQUEST", "%s", message)
return false
}
backoff := 1 << uint(retry)
if backoff > 60 {
backoff = 60
}
delay := rand.Intn(backoff*500) + backoff*500
LOG_INFO("FILEFABRIC_RETRY", "%s; retrying after %.1f seconds", message, float32(delay) / 1000.0)
time.Sleep(time.Duration(delay) * time.Millisecond)
return true
}
// Send a request to the server
func (storage *FileFabricStorage) sendRequest(threadIndex int, method string, requestURL string, requestHeaders map[string]string, input interface{}) ( io.ReadCloser, http.Header, int64, error) {
var response *http.Response
for retries := 0; ; retries++ {
var inputReader io.Reader
switch input.(type) {
case url.Values:
values := input.(url.Values)
inputReader = strings.NewReader(values.Encode())
if requestHeaders == nil {
requestHeaders = make(map[string]string)
}
requestHeaders["Content-Type"] = "application/x-www-form-urlencoded"
case *RateLimitedReader:
rateLimitedReader := input.(*RateLimitedReader)
rateLimitedReader.Reset()
inputReader = rateLimitedReader
default:
LOG_FATAL("FILEFABRIC_REQUEST", "Input type is not supported")
return nil, nil, 0, fmt.Errorf("Input type is not supported")
}
request, err := http.NewRequest(method, requestURL, inputReader)
if err != nil {
return nil, nil, 0, err
}
if requestHeaders != nil {
for key, value := range requestHeaders {
request.Header.Set(key, value)
}
}
if _, ok := input.(*RateLimitedReader); ok {
request.ContentLength = input.(*RateLimitedReader).Length()
}
response, err = storage.client.Do(request)
if err != nil {
if !storage.shouldRetry(retries, "[%d] %s %s returned an error: %v", threadIndex, method, requestURL, err) {
return nil, nil, 0, err
}
continue
}
if response.StatusCode < 300 {
return response.Body, response.Header, response.ContentLength, nil
}
defer response.Body.Close()
defer io.Copy(ioutil.Discard, response.Body)
var output struct {
Status string `xml:"status"`
Message string `xml:"statusmessage"`
}
err = xml.NewDecoder(response.Body).Decode(&output)
if err != nil {
if !storage.shouldRetry(retries, "[%d] %s %s returned an invalid response: %v", threadIndex, method, requestURL, err) {
return nil, nil, 0, err
}
continue
}
if !storage.shouldRetry(retries, "[%d] %s %s returned status: %s, message: %s", threadIndex, method, requestURL, output.Status, output.Message) {
return nil, nil, 0, err
}
}
}
func (storage *FileFabricStorage) getAPIURL(function string) string {
if storage.accessToken == "" {
return "https://" + storage.endpoint + "/api/*/" + function + "/"
} else {
return "https://" + storage.endpoint + "/api/" + storage.accessToken + "/" + function + "/"
}
}
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
// a size of 0. If 'dir' is 'snapshots', only subdirectories will be returned. If 'dir' is 'snapshots/repository_id', then only
// files will be returned. If 'dir' is 'chunks', the implementation can return the list either recusively or non-recusively.
func (storage *FileFabricStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
if dir != "" && dir[len(dir)-1] != '/' {
dir += "/"
}
dirID, _, _, err := storage.getFileInfo(threadIndex, dir)
if err != nil {
return nil, nil, err
}
if dirID == "" {
return nil, nil, nil
}
lastID := ""
for {
formData := url.Values { "marker": {lastID}, "limit": {"1000"}, "includefolders": {"n"}, "fi_pid" : {dirID}}
if dir == "snapshots/" {
formData["includefolders"] = []string{"y"}
}
if storage.testMode {
formData["limit"] = []string{"5"}
}
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("getListOfFiles"), nil, formData)
if err != nil {
return nil, nil, err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
var output struct {
FileFabricResponse
FileList FileFabricFileList `xml:"files"`
Truncated int `xml:"truncated"`
}
err = xml.NewDecoder(readCloser).Decode(&output)
if err != nil {
return nil, nil, err
}
err = checkFileFabricResponse(output.FileFabricResponse, "list the storage directory '%s'", dir)
if err != nil {
return nil, nil, err
}
if dir == "snapshots/" {
for _, file := range output.FileList.Files {
if file.Type == 1 {
files = append(files, file.Path + "/")
}
lastID = file.ID
}
} else {
for _, file := range output.FileList.Files {
if file.Type == 0 {
files = append(files, file.Path)
sizes = append(sizes, file.Size)
}
lastID = file.ID
}
}
if output.Truncated != 1 {
break
}
}
return files, sizes, nil
}
// getFileInfo returns the information about the file or directory at 'filePath'.
func (storage *FileFabricStorage) getFileInfo(threadIndex int, filePath string) (fileID string, isDir bool, size int64, err error) {
formData := url.Values { "path" : {storage.storageDir + filePath}}
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("checkPathExists"), nil, formData)
if err != nil {
return "", false, 0, err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
var output struct {
FileFabricResponse
File FileFabricFile `xml:"file"`
Exists string `xml:"exists"`
}
err = xml.NewDecoder(readCloser).Decode(&output)
if err != nil {
return "", false, 0, err
}
err = checkFileFabricResponse(output.FileFabricResponse, "get the info on '%s'", filePath)
if err != nil {
return "", false, 0, err
}
if output.Exists != "y" {
return "", false, 0, nil
} else {
if output.File.Type == 1 {
for filePath != "" && filePath[len(filePath)-1] == '/' {
filePath = filePath[:len(filePath)-1]
}
storage.directoryCacheLock.Lock()
storage.directoryCache[filePath] = output.File.ID
storage.directoryCacheLock.Unlock()
}
return output.File.ID, output.File.Type == 1, output.File.Size, nil
}
}
// GetFileInfo returns the information about the file or directory at 'filePath'. This is a function required by the Storage interface.
func (storage *FileFabricStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
fileID := ""
fileID, isDir, size, err = storage.getFileInfo(threadIndex, filePath)
return fileID != "", isDir, size, err
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *FileFabricStorage) DeleteFile(threadIndex int, filePath string) (err error) {
fileID, _, _, _ := storage.getFileInfo(threadIndex, filePath)
if fileID == "" {
return nil
}
formData := url.Values { "fi_id" : {fileID}}
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doDeleteFile"), nil, formData)
if err != nil {
return err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
var output FileFabricResponse
err = xml.NewDecoder(readCloser).Decode(&output)
if err != nil {
return err
}
err = checkFileFabricResponse(output, "delete file '%s'", filePath)
if err != nil {
return err
}
return nil
}
// MoveFile renames the file.
func (storage *FileFabricStorage) MoveFile(threadIndex int, from string, to string) (err error) {
fileID, _, _, _ := storage.getFileInfo(threadIndex, from)
if fileID == "" {
return nil
}
formData := url.Values { "fi_id" : {fileID}, "fi_name": {filepath.Base(to)},}
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doRenameFile"), nil, formData)
if err != nil {
return err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
var output FileFabricResponse
err = xml.NewDecoder(readCloser).Decode(&output)
if err != nil {
return err
}
err = checkFileFabricResponse(output, "rename file '%s' to '%s'", from, to)
if err != nil {
return err
}
return nil
}
// createParentDirectory creates the parent directory if it doesn't exist in the cache.
func (storage *FileFabricStorage) createParentDirectory(threadIndex int, dir string) (parentID string, err error) {
found := strings.LastIndex(dir, "/")
if found == -1 {
return storage.storageDirID, nil
}
parent := dir[:found]
storage.directoryCacheLock.Lock()
parentID = storage.directoryCache[parent]
storage.directoryCacheLock.Unlock()
if parentID != "" {
return parentID, nil
}
parentID, err = storage.createDirectory(threadIndex, parent)
if err != nil {
if err == errFileFabricDirectoryExists {
var isDir bool
parentID, isDir, _, err = storage.getFileInfo(threadIndex, parent)
if err != nil {
return "", err
}
if isDir == false {
return "", fmt.Errorf("'%s' in the storage is a file", parent)
}
storage.directoryCacheLock.Lock()
storage.directoryCache[parent] = parentID
storage.directoryCacheLock.Unlock()
return parentID, nil
} else {
return "", err
}
}
return parentID, nil
}
// createDirectory creates a new directory.
func (storage *FileFabricStorage) createDirectory(threadIndex int, dir string) (dirID string, err error) {
for dir != "" && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}
parentID, err := storage.createParentDirectory(threadIndex, dir)
if err != nil {
return "", err
}
formData := url.Values { "fi_name": {filepath.Base(dir)}, "fi_pid" : {parentID}}
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doCreateNewFolder"), nil, formData)
if err != nil {
return "", err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
var output struct {
FileFabricResponse
File FileFabricFile `xml:"file"`
}
err = xml.NewDecoder(readCloser).Decode(&output)
if err != nil {
return "", err
}
err = checkFileFabricResponse(output.FileFabricResponse, "create directory '%s'", dir)
if err != nil {
return "", err
}
storage.directoryCacheLock.Lock()
storage.directoryCache[dir] = output.File.ID
storage.directoryCacheLock.Unlock()
return output.File.ID, nil
}
func (storage *FileFabricStorage) CreateDirectory(threadIndex int, dir string) (err error) {
_, err = storage.createDirectory(threadIndex, dir)
if err == errFileFabricDirectoryExists {
return nil
}
return err
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *FileFabricStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
formData := url.Values { "fi_id" : {storage.storageDir + filePath}}
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("getFile"), nil, formData)
if err != nil {
return err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.threads)
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *FileFabricStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
parentID, err := storage.createParentDirectory(threadIndex, filePath)
if err != nil {
return err
}
fileName := filepath.Base(filePath)
requestBody := &bytes.Buffer{}
writer := multipart.NewWriter(requestBody)
part, _ := writer.CreateFormFile("file_1", fileName)
part.Write(content)
writer.WriteField("file_name1", fileName)
writer.WriteField("fi_pid", parentID)
writer.WriteField("fi_structtype", "g")
writer.Close()
headers := make(map[string]string)
headers["Content-Type"] = writer.FormDataContentType()
rateLimitedReader := CreateRateLimitedReader(requestBody.Bytes(), storage.UploadRateLimit/storage.threads)
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doUploadFiles"), headers, rateLimitedReader)
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
var output FileFabricResponse
err = xml.NewDecoder(readCloser).Decode(&output)
if err != nil {
return err
}
err = checkFileFabricResponse(output, "upload file '%s'", filePath)
if err != nil {
return err
}
return nil
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *FileFabricStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *FileFabricStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *FileFabricStorage) IsStrongConsistent() bool { return false }
// If the storage supports fast listing of files names.
func (storage *FileFabricStorage) IsFastListing() bool { return false }
// Enable the test mode.
func (storage *FileFabricStorage) EnableTestMode() { storage.testMode = true }

View File

@@ -79,7 +79,7 @@ func (storage *FileStorage) ListFiles(threadIndex int, dir string) (files []stri
for _, f := range list {
name := f.Name()
if f.IsDir() && name[len(name)-1] != '/' {
if (f.IsDir() || f.Mode() & os.ModeSymlink != 0) && name[len(name)-1] != '/' {
name += "/"
}
files = append(files, name)
@@ -165,8 +165,8 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
return err
}
} else {
if !stat.IsDir() {
return fmt.Errorf("The path %s is not a directory", dir)
if !stat.IsDir() && stat.Mode() & os.ModeSymlink == 0 {
return fmt.Errorf("The path %s is not a directory or symlink", dir)
}
}
}

View File

@@ -41,6 +41,7 @@ type GCDStorage struct {
backoffs []int // desired backoff time in seconds for each thread
attempts []int // number of failed attempts since last success for each thread
driveID string // the ID of the shared drive or 'root' (GCDUserDrive) if the user's drive
spaces string // 'appDataFolder' if scope is drive.appdata; 'drive' otherwise
createDirectoryLock sync.Mutex
isConnected bool
@@ -86,6 +87,10 @@ func (storage *GCDStorage) shouldRetry(threadIndex int, err error) (bool, error)
// Request timeout
message = e.Message
retry = true
} else if e.Code == 400 && strings.Contains(e.Message, "failedPrecondition") {
// Daily quota exceeded
message = e.Message
retry = true
} else if e.Code == 401 {
// Only retry on authorization error when storage has been connected before
if storage.isConnected {
@@ -195,7 +200,7 @@ func (storage *GCDStorage) listFiles(threadIndex int, parentID string, listFiles
var err error
for {
q := storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount)
q := storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount).Spaces(storage.spaces)
if storage.driveID != GCDUserDrive {
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
}
@@ -227,7 +232,7 @@ func (storage *GCDStorage) listByName(threadIndex int, parentID string, name str
for {
query := "name = '" + name + "' and '" + parentID + "' in parents and trashed = false "
q := storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)")
q := storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)").Spaces(storage.spaces)
if storage.driveID != GCDUserDrive {
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
}
@@ -251,6 +256,29 @@ func (storage *GCDStorage) listByName(threadIndex int, parentID string, name str
return file.Id, file.MimeType == GCDDirectoryMimeType, file.Size, nil
}
// Returns the id of the shared folder with the given name if it exists
func (storage *GCDStorage) findSharedFolder(threadIndex int, name string) (string, error) {
query := "name = '" + name + "' and sharedWithMe and trashed = false and mimeType = 'application/vnd.google-apps.folder'"
q := storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)").Spaces(storage.spaces)
if storage.driveID != GCDUserDrive {
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
}
fileList, err := q.Do()
if err != nil {
return "", err
}
if len(fileList.Files) == 0 {
return "", nil
}
file := fileList.Files[0]
return file.Id, nil
}
// getIDFromPath returns the id of the given path. If 'createDirectories' is true, create the given path and all its
// parent directories if they don't exist. Note that if 'createDirectories' is false, it may return an empty 'fileID'
// if the file doesn't exist.
@@ -340,11 +368,23 @@ func CreateGCDStorage(tokenFile string, driveID string, storagePath string, thre
var tokenSource oauth2.TokenSource
scope := drive.DriveScope
if isServiceAccount {
config, err := google.JWTConfigFromJSON(description, drive.DriveScope)
if newScope, ok := object["scope"]; ok {
scope = newScope.(string)
}
config, err := google.JWTConfigFromJSON(description, scope)
if err != nil {
return nil, err
}
if subject, ok := object["subject"]; ok {
config.Subject = subject.(string)
}
tokenSource = config.TokenSource(ctx)
} else {
gcdConfig := &GCDConfig{}
@@ -394,6 +434,7 @@ func CreateGCDStorage(tokenFile string, driveID string, storagePath string, thre
backoffs: make([]int, threads),
attempts: make([]int, threads),
driveID: driveID,
spaces: "drive",
}
for i := range storage.backoffs {
@@ -401,10 +442,29 @@ func CreateGCDStorage(tokenFile string, driveID string, storagePath string, thre
storage.attempts[i] = 0
}
storage.savePathID("", driveID)
storagePathID, err := storage.getIDFromPath(0, storagePath, true)
if err != nil {
return nil, err
if scope == drive.DriveAppdataScope {
storage.spaces = "appDataFolder"
storage.savePathID("", "appDataFolder")
} else {
storage.savePathID("", driveID)
}
storagePathID := ""
// When using service acount, check if storagePath is a shared folder which takes priority over regular folders.
if isServiceAccount && !strings.Contains(storagePath, "/") {
storagePathID, err = storage.findSharedFolder(0, storagePath)
if err != nil {
LOG_WARN("GCD_STORAGE", "Failed to check if %s is a shared folder: %v", storagePath, err)
}
}
if storagePathID == "" {
storagePathID, err = storage.getIDFromPath(0, storagePath, true)
if err != nil {
return nil, err
}
}
// Reset the id cache and start with 'storagePathID' as the root
@@ -476,39 +536,76 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
}
return files, nil, nil
} else {
files := []string{}
sizes := []int64{}
lock := sync.Mutex {}
allFiles := []string{}
allSizes := []int64{}
errorChannel := make(chan error)
directoryChannel := make(chan string)
activeWorkers := 0
parents := []string{"chunks", "fossils"}
for i := 0; i < len(parents); i++ {
parent := parents[i]
pathID, ok := storage.findPathID(parent)
if !ok {
continue
}
entries, err := storage.listFiles(threadIndex, pathID, true, true)
if err != nil {
return nil, nil, err
}
for _, entry := range entries {
if entry.MimeType != GCDDirectoryMimeType {
name := entry.Name
if strings.HasPrefix(parent, "fossils") {
name = parent + "/" + name + ".fsl"
name = name[len("fossils/"):]
} else {
name = parent + "/" + name
name = name[len("chunks/"):]
for len(parents) > 0 || activeWorkers > 0 {
if len(parents) > 0 && activeWorkers < storage.numberOfThreads {
parent := parents[0]
parents = parents[1:]
activeWorkers++
go func(parent string) {
pathID, ok := storage.findPathID(parent)
if !ok {
return
}
entries, err := storage.listFiles(threadIndex, pathID, true, true)
if err != nil {
errorChannel <- err
return
}
LOG_DEBUG("GCD_STORAGE", "Listing %s; %d items returned", parent, len(entries))
files := []string {}
sizes := []int64 {}
for _, entry := range entries {
if entry.MimeType != GCDDirectoryMimeType {
name := entry.Name
if strings.HasPrefix(parent, "fossils") {
name = parent + "/" + name + ".fsl"
name = name[len("fossils/"):]
} else {
name = parent + "/" + name
name = name[len("chunks/"):]
}
files = append(files, name)
sizes = append(sizes, entry.Size)
} else {
directoryChannel <- parent+"/"+entry.Name
storage.savePathID(parent+"/"+entry.Name, entry.Id)
}
}
lock.Lock()
allFiles = append(allFiles, files...)
allSizes = append(allSizes, sizes...)
lock.Unlock()
directoryChannel <- ""
} (parent)
}
if activeWorkers > 0 {
select {
case err := <- errorChannel:
return nil, nil, err
case directory := <- directoryChannel:
if directory == "" {
activeWorkers--
} else {
parents = append(parents, directory)
}
files = append(files, name)
sizes = append(sizes, entry.Size)
} else {
parents = append(parents, parent+"/"+entry.Name)
storage.savePathID(parent+"/"+entry.Name, entry.Id)
}
}
}
return files, sizes, nil
return allFiles, allSizes, nil
}
}

View File

@@ -107,6 +107,15 @@ func LOG_ERROR(logID string, format string, v ...interface{}) {
logf(ERROR, logID, format, v...)
}
func LOG_WERROR(isWarning bool, logID string, format string, v ...interface{}) {
if isWarning {
logf(WARN, logID, format, v...)
} else {
logf(ERROR, logID, format, v...)
}
}
func LOG_FATAL(logID string, format string, v ...interface{}) {
logf(FATAL, logID, format, v...)
}

View File

@@ -13,6 +13,7 @@ import (
"math/rand"
"net/http"
"strings"
"strconv"
"sync"
"time"
"path/filepath"
@@ -86,7 +87,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
var response *http.Response
backoff := 1
for i := 0; i < 8; i++ {
for i := 0; i < 12; i++ {
LOG_DEBUG("ONEDRIVE_CALL", "%s %s", method, url)
@@ -129,6 +130,8 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
request.Header.Set("Content-Type", contentType)
}
request.Header.Set("User-Agent", "ISV|Acrosync|Duplicacy/2.0")
response, err = client.HTTPClient.Do(request)
if err != nil {
if client.IsConnected {
@@ -145,6 +148,9 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
time.Sleep(retryAfter * time.Millisecond)
}
backoff *= 2
if backoff > 256 {
backoff = 256
}
continue
}
return nil, 0, err
@@ -176,10 +182,20 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
} else if response.StatusCode == 409 {
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Conflict"}
} else if response.StatusCode > 401 && response.StatusCode != 404 {
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
time.Sleep(retryAfter * time.Millisecond)
delay := int((rand.Float32() * 0.5 + 0.5) * 1000.0 * float32(backoff))
if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 {
retryAfter, _ := strconv.Atoi(backoffList[0])
if retryAfter * 1000 > delay {
delay = retryAfter * 1000
}
}
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, delay)
time.Sleep(time.Duration(delay) * time.Millisecond)
backoff *= 2
if backoff > 256 {
backoff = 256
}
continue
} else {
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
@@ -314,7 +330,7 @@ func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit
// Upload file using the simple method; this is only possible for OneDrive Personal or if the file
// is smaller than 4MB for OneDrive Business
if !client.IsBusiness || len(content) < 4 * 1024 * 1024 || (client.TestMode && rand.Int() % 2 == 0) {
if !client.IsBusiness || (client.TestMode && rand.Int() % 2 == 0) {
url := client.APIURL + "/drive/root:/" + path + ":/content"
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")

View File

@@ -25,7 +25,8 @@ type Preference struct {
DoNotSavePassword bool `json:"no_save_password"`
NobackupFile string `json:"nobackup_file"`
Keys map[string]string `json:"keys"`
FiltersFile string `json:"filters"`
FiltersFile string `json:"filters"`
ExcludeByAttribute bool `json:"exclude_by_attribute"`
}
var preferencePath string

View File

@@ -43,10 +43,10 @@ func CreateSFTPStorageWithPassword(server string, port int, username string, sto
return nil
}
return CreateSFTPStorage(server, port, username, storageDir, minimumNesting, authMethods, hostKeyCallback, threads)
return CreateSFTPStorage(false, server, port, username, storageDir, minimumNesting, authMethods, hostKeyCallback, threads)
}
func CreateSFTPStorage(server string, port int, username string, storageDir string, minimumNesting int,
func CreateSFTPStorage(compatibilityMode bool, server string, port int, username string, storageDir string, minimumNesting int,
authMethods []ssh.AuthMethod,
hostKeyCallback func(hostname string, remote net.Addr,
key ssh.PublicKey) error, threads int) (storage *SFTPStorage, err error) {
@@ -57,8 +57,21 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
HostKeyCallback: hostKeyCallback,
}
if server == "sftp.hidrive.strato.com" {
sftpConfig.Ciphers = []string{"aes128-ctr", "aes256-ctr"}
if compatibilityMode {
sftpConfig.Ciphers = []string{
"aes128-ctr", "aes192-ctr", "aes256-ctr",
"aes128-gcm@openssh.com",
"chacha20-poly1305@openssh.com",
"arcfour256", "arcfour128", "arcfour",
"aes128-cbc",
"3des-cbc",
}
sftpConfig.KeyExchanges = [] string {
"curve25519-sha256@libssh.org",
"ecdh-sha2-nistp256", "ecdh-sha2-nistp384", "ecdh-sha2-nistp521",
"diffie-hellman-group1-sha1", "diffie-hellman-group14-sha1",
"diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256",
}
}
serverAddress := fmt.Sprintf("%s:%d", server, port)
@@ -305,7 +318,11 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
file.Close()
return err
}
file.Close()
err = file.Close()
if err != nil {
return err
}
err = storage.getSFTPClient().Rename(temporaryFile, fullPath)
if err != nil {

View File

@@ -14,7 +14,6 @@ import (
"os"
"os/exec"
"regexp"
"strings"
"syscall"
"time"
)
@@ -77,19 +76,19 @@ func DeleteShadowCopy() {
err := exec.Command("/sbin/umount", "-f", snapshotPath).Run()
if err != nil {
LOG_ERROR("VSS_DELETE", "Error while unmounting snapshot")
LOG_WARN("VSS_DELETE", "Error while unmounting snapshot: %v", err)
return
}
err = exec.Command("tmutil", "deletelocalsnapshots", snapshotDate).Run()
if err != nil {
LOG_ERROR("VSS_DELETE", "Error while deleting local snapshot")
LOG_WARN("VSS_DELETE", "Error while deleting local snapshot: %v", err)
return
}
err = os.RemoveAll(snapshotPath)
if err != nil {
LOG_ERROR("VSS_DELETE", "Error while deleting temporary mount directory")
LOG_WARN("VSS_DELETE", "Error while deleting temporary mount directory: %v", err)
return
}
@@ -150,12 +149,13 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
return top
}
colonPos := strings.IndexByte(tmutilOutput, ':')
if colonPos < 0 {
snapshotDateRegex := regexp.MustCompile(`:\s+([0-9\-]+)`)
matched := snapshotDateRegex.FindStringSubmatch(tmutilOutput)
if matched == nil {
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: %s", tmutilOutput)
return top
}
snapshotDate = strings.TrimSpace(tmutilOutput[colonPos+1:])
snapshotDate = matched[1]
tmutilOutput, err = CommandWithTimeout(timeoutInSeconds, "tmutil", "listlocalsnapshots", ".")
if err != nil {
@@ -164,17 +164,17 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
}
snapshotName := "com.apple.TimeMachine." + snapshotDate
r := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`)
snapshotNames := r.FindStringSubmatch(tmutilOutput)
if len(snapshotNames) > 0 {
snapshotName = snapshotNames[0]
snapshotNameRegex := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`)
matched = snapshotNameRegex.FindStringSubmatch(tmutilOutput)
if len(matched) > 0 {
snapshotName = matched[0]
} else {
LOG_WARN("VSS_CREATE", "Error while using 'tmutil listlocalsnapshots' to find snapshot name. Will fallback to 'com.apple.TimeMachine.SNAPSHOT_DATE'")
LOG_INFO("VSS_CREATE", "Can't find the snapshot name with 'tmutil listlocalsnapshots'; fallback to %s", snapshotName)
}
// Mount snapshot as readonly and hide from GUI i.e. Finder
_, err = CommandWithTimeout(timeoutInSeconds,
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/", snapshotPath)
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/System/Volumes/Data", snapshotPath)
if err != nil {
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: %v", err)
return top

View File

@@ -8,17 +8,22 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"sort"
"bytes"
"github.com/vmihailenco/msgpack"
)
// Snapshot represents a backup of the repository.
type Snapshot struct {
Version int
ID string // the snapshot id; must be different for different repositories
Revision int // the revision number
Options string // options used to create this snapshot (some not included)
@@ -37,14 +42,11 @@ type Snapshot struct {
// A sequence of chunks whose aggregated content is the json representation of 'ChunkLengths'.
LengthSequence []string
Files []*Entry // list of files and subdirectories
ChunkHashes []string // a sequence of chunks representing the file content
ChunkLengths []int // the length of each chunk
Flag bool // used to mark certain snapshots for deletion or copy
discardAttributes bool
}
// CreateEmptySnapshot creates an empty snapshot.
@@ -56,16 +58,14 @@ func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
}
}
// CreateSnapshotFromDirectory creates a snapshot from the local directory 'top'. Only 'Files'
// will be constructed, while 'ChunkHashes' and 'ChunkLengths' can only be populated after uploading.
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, filtersFile string) (snapshot *Snapshot, skippedDirectories []string,
skippedFiles []string, err error) {
type DirectoryListing struct {
directory string
files *[]Entry
}
snapshot = &Snapshot{
ID: id,
Revision: 0,
StartTime: time.Now().Unix(),
}
func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string,
filtersFile string, excludeByAttribute bool, listingChannel chan *Entry,
skippedDirectories *[]string, skippedFiles *[]string) {
var patterns []string
@@ -77,45 +77,128 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, fil
directories := make([]*Entry, 0, 256)
directories = append(directories, CreateEntry("", 0, 0, 0))
snapshot.Files = make([]*Entry, 0, 256)
attributeThreshold := 1024 * 1024
if attributeThresholdValue, found := os.LookupEnv("DUPLICACY_ATTRIBUTE_THRESHOLD"); found && attributeThresholdValue != "" {
attributeThreshold, _ = strconv.Atoi(attributeThresholdValue)
}
for len(directories) > 0 {
directory := directories[len(directories)-1]
directories = directories[:len(directories)-1]
snapshot.Files = append(snapshot.Files, directory)
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, nobackupFile, snapshot.discardAttributes)
subdirectories, skipped, err := ListEntries(top, directory.Path, patterns, nobackupFile, excludeByAttribute, listingChannel)
if err != nil {
if directory.Path == "" {
LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err)
return nil, nil, nil, err
return
}
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory %s: %v", directory.Path, err)
if skippedDirectories != nil {
*skippedDirectories = append(*skippedDirectories, directory.Path)
}
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory: %v", err)
skippedDirectories = append(skippedDirectories, directory.Path)
continue
}
directories = append(directories, subdirectories...)
skippedFiles = append(skippedFiles, skipped...)
if !snapshot.discardAttributes && len(snapshot.Files) > attributeThreshold {
LOG_INFO("LIST_ATTRIBUTES", "Discarding file attributes")
snapshot.discardAttributes = true
for _, file := range snapshot.Files {
file.Attributes = nil
}
if skippedFiles != nil {
*skippedFiles = append(*skippedFiles, skipped...)
}
}
close(listingChannel)
}
func (snapshot *Snapshot)ListRemoteFiles(config *Config, chunkOperator *ChunkOperator, entryOut func(*Entry) bool) {
var chunks []string
for _, chunkHash := range snapshot.FileSequence {
chunks = append(chunks, chunkOperator.config.GetChunkIDFromHash(chunkHash))
}
// Remove the root entry
snapshot.Files = snapshot.Files[1:]
var chunk *Chunk
reader := sequenceReader{
sequence: snapshot.FileSequence,
buffer: new(bytes.Buffer),
refillFunc: func(chunkHash string) []byte {
if chunk != nil {
config.PutChunk(chunk)
}
chunk = chunkOperator.Download(chunkHash, 0, true)
return chunk.GetBytes()
},
}
if snapshot.Version == 0 {
LOG_INFO("SNAPSHOT_VERSION", "snapshot %s at revision %d is encoded in an old version format", snapshot.ID, snapshot.Revision)
files := make([]*Entry, 0)
decoder := json.NewDecoder(&reader)
// read open bracket
_, err := decoder.Token()
if err != nil {
LOG_ERROR("SNAPSHOT_PARSE", "Failed to open the snapshot %s at revision %d: not a list of entries",
snapshot.ID, snapshot.Revision)
return
}
for decoder.More() {
var entry Entry
err = decoder.Decode(&entry)
if err != nil {
LOG_ERROR("SNAPSHOT_PARSE", "Failed to load files specified in the snapshot %s at revision %d: %v",
snapshot.ID, snapshot.Revision, err)
return
}
files = append(files, &entry)
}
sort.Sort(ByName(files))
for _, file := range files {
if !entryOut(file) {
return
}
}
} else if snapshot.Version == 1 {
decoder := msgpack.NewDecoder(&reader)
lastEndChunk := 0
// while the array contains values
for _, err := decoder.PeekCode(); err != io.EOF; _, err = decoder.PeekCode() {
if err != nil {
LOG_ERROR("SNAPSHOT_PARSE", "Failed to parse the snapshot %s at revision %d: %v",
snapshot.ID, snapshot.Revision, err)
return
}
var entry Entry
err = decoder.Decode(&entry)
if err != nil {
LOG_ERROR("SNAPSHOT_PARSE", "Failed to load the snapshot %s at revision %d: %v",
snapshot.ID, snapshot.Revision, err)
return
}
if entry.IsFile() {
entry.StartChunk += lastEndChunk
entry.EndChunk += entry.StartChunk
lastEndChunk = entry.EndChunk
}
err = entry.check(snapshot.ChunkLengths)
if err != nil {
LOG_ERROR("SNAPSHOT_ENTRY", "Failed to load the snapshot %s at revision %d: %v",
snapshot.ID, snapshot.Revision, err)
return
}
if !entryOut(&entry) {
return
}
}
} else {
LOG_ERROR("SNAPSHOT_VERSION", "snapshot %s at revision %d is encoded in unsupported version %d format",
snapshot.ID, snapshot.Revision, snapshot.Version)
return
}
return snapshot, skippedDirectories, skippedFiles, nil
}
func AppendPattern(patterns []string, new_pattern string) (new_patterns []string) {
@@ -215,100 +298,6 @@ func ProcessFilterLines(patternFileLines []string, includedFiles []string) (patt
return patterns
}
// This is the struct used to save/load incomplete snapshots
type IncompleteSnapshot struct {
Files []*Entry
ChunkHashes []string
ChunkLengths []int
}
// LoadIncompleteSnapshot loads the incomplete snapshot if it exists
func LoadIncompleteSnapshot() (snapshot *Snapshot) {
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
description, err := ioutil.ReadFile(snapshotFile)
if err != nil {
LOG_DEBUG("INCOMPLETE_LOCATE", "Failed to locate incomplete snapshot: %v", err)
return nil
}
var incompleteSnapshot IncompleteSnapshot
err = json.Unmarshal(description, &incompleteSnapshot)
if err != nil {
LOG_DEBUG("INCOMPLETE_PARSE", "Failed to parse incomplete snapshot: %v", err)
return nil
}
var chunkHashes []string
for _, chunkHash := range incompleteSnapshot.ChunkHashes {
hash, err := hex.DecodeString(chunkHash)
if err != nil {
LOG_DEBUG("INCOMPLETE_DECODE", "Failed to decode incomplete snapshot: %v", err)
return nil
}
chunkHashes = append(chunkHashes, string(hash))
}
snapshot = &Snapshot{
Files: incompleteSnapshot.Files,
ChunkHashes: chunkHashes,
ChunkLengths: incompleteSnapshot.ChunkLengths,
}
LOG_INFO("INCOMPLETE_LOAD", "Incomplete snapshot loaded from %s", snapshotFile)
return snapshot
}
// SaveIncompleteSnapshot saves the incomplete snapshot under the preference directory
func SaveIncompleteSnapshot(snapshot *Snapshot) {
var files []*Entry
for _, file := range snapshot.Files {
// All unprocessed files will have a size of -1
if file.Size >= 0 {
file.Attributes = nil
files = append(files, file)
} else {
break
}
}
var chunkHashes []string
for _, chunkHash := range snapshot.ChunkHashes {
chunkHashes = append(chunkHashes, hex.EncodeToString([]byte(chunkHash)))
}
incompleteSnapshot := IncompleteSnapshot{
Files: files,
ChunkHashes: chunkHashes,
ChunkLengths: snapshot.ChunkLengths,
}
description, err := json.MarshalIndent(incompleteSnapshot, "", " ")
if err != nil {
LOG_WARN("INCOMPLETE_ENCODE", "Failed to encode the incomplete snapshot: %v", err)
return
}
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
err = ioutil.WriteFile(snapshotFile, description, 0644)
if err != nil {
LOG_WARN("INCOMPLETE_WRITE", "Failed to save the incomplete snapshot: %v", err)
return
}
LOG_INFO("INCOMPLETE_SAVE", "Incomplete snapshot saved to %s", snapshotFile)
}
func RemoveIncompleteSnapshot() {
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
if stat, err := os.Stat(snapshotFile); err == nil && !stat.IsDir() {
err = os.Remove(snapshotFile)
if err != nil {
LOG_INFO("INCOMPLETE_SAVE", "Failed to remove ncomplete snapshot: %v", err)
} else {
LOG_INFO("INCOMPLETE_SAVE", "Removed incomplete snapshot %s", snapshotFile)
}
}
}
// CreateSnapshotFromDescription creates a snapshot from json decription.
func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err error) {
@@ -321,6 +310,14 @@ func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err
snapshot = &Snapshot{}
if value, ok := root["version"]; !ok {
snapshot.Version = 0
} else if version, ok := value.(float64); !ok {
return nil, fmt.Errorf("Invalid version is specified in the snapshot")
} else {
snapshot.Version = int(version)
}
if value, ok := root["id"]; !ok {
return nil, fmt.Errorf("No id is specified in the snapshot")
} else if snapshot.ID, ok = value.(string); !ok {
@@ -437,6 +434,7 @@ func (snapshot *Snapshot) MarshalJSON() ([]byte, error) {
object := make(map[string]interface{})
object["version"] = 1
object["id"] = snapshot.ID
object["revision"] = snapshot.Revision
object["options"] = snapshot.Options
@@ -458,9 +456,7 @@ func (snapshot *Snapshot) MarshalJSON() ([]byte, error) {
// MarshalSequence creates a json represetion for the specified chunk sequence.
func (snapshot *Snapshot) MarshalSequence(sequenceType string) ([]byte, error) {
if sequenceType == "files" {
return json.Marshal(snapshot.Files)
} else if sequenceType == "chunks" {
if sequenceType == "chunks" {
return json.Marshal(encodeSequence(snapshot.ChunkHashes))
} else {
return json.Marshal(snapshot.ChunkLengths)
@@ -489,3 +485,4 @@ func encodeSequence(sequence []string) []string {
return sequenceInHex
}

View File

@@ -20,6 +20,8 @@ import (
"strings"
"text/tabwriter"
"time"
"sync"
"sync/atomic"
"github.com/aryann/difflib"
)
@@ -147,7 +149,7 @@ func (collection *FossilCollection) IsDeletable(isStrongConsistent bool, ignored
// collection would have finsihed already, while a snapshot currently being created does not affect
// this fossil collection.
if lastSnapshotTime[hostID] > 0 && lastSnapshotTime[hostID] < time.Now().Unix()-maxSnapshotRunningTime*secondsInDay {
LOG_INFO("SNAPSHOT_INACTIVE", "Ignore snapshot %s whose last revision was created %d days ago",
LOG_INFO("SNAPSHOT_INACTIVE", "Ignore snapshot %s whose last revision was created more than %d days ago",
hostID, maxSnapshotRunningTime)
continue
}
@@ -189,7 +191,6 @@ type SnapshotManager struct {
fileChunk *Chunk
snapshotCache *FileStorage
chunkDownloader *ChunkDownloader
chunkOperator *ChunkOperator
}
@@ -268,72 +269,26 @@ func (reader *sequenceReader) Read(data []byte) (n int, err error) {
return reader.buffer.Read(data)
}
func (manager *SnapshotManager) CreateChunkDownloader() {
if manager.chunkDownloader == nil {
manager.chunkDownloader = CreateChunkDownloader(manager.config, manager.storage, manager.snapshotCache, false, 1)
func (manager *SnapshotManager) CreateChunkOperator(resurrect bool, threads int, allowFailures bool) {
if manager.chunkOperator == nil {
manager.chunkOperator = CreateChunkOperator(manager.config, manager.storage, manager.snapshotCache, resurrect, threads, allowFailures)
}
}
// DownloadSequence returns the content represented by a sequence of chunks.
func (manager *SnapshotManager) DownloadSequence(sequence []string) (content []byte) {
manager.CreateChunkDownloader()
manager.CreateChunkOperator(false, 1, false)
for _, chunkHash := range sequence {
i := manager.chunkDownloader.AddChunk(chunkHash)
chunk := manager.chunkDownloader.WaitForChunk(i)
chunk := manager.chunkOperator.Download(chunkHash, 0, true)
content = append(content, chunk.GetBytes()...)
manager.config.PutChunk(chunk)
}
return content
}
func (manager *SnapshotManager) DownloadSnapshotFileSequence(snapshot *Snapshot, patterns []string, attributesNeeded bool) bool {
manager.CreateChunkDownloader()
reader := sequenceReader{
sequence: snapshot.FileSequence,
buffer: new(bytes.Buffer),
refillFunc: func(chunkHash string) []byte {
i := manager.chunkDownloader.AddChunk(chunkHash)
chunk := manager.chunkDownloader.WaitForChunk(i)
return chunk.GetBytes()
},
}
files := make([]*Entry, 0)
decoder := json.NewDecoder(&reader)
// read open bracket
_, err := decoder.Token()
if err != nil {
LOG_ERROR("SNAPSHOT_PARSE", "Failed to load files specified in the snapshot %s at revision %d: not a list of entries",
snapshot.ID, snapshot.Revision)
return false
}
// while the array contains values
for decoder.More() {
var entry Entry
err = decoder.Decode(&entry)
if err != nil {
LOG_ERROR("SNAPSHOT_PARSE", "Failed to load files specified in the snapshot %s at revision %d: %v",
snapshot.ID, snapshot.Revision, err)
return false
}
// If we don't need the attributes or the file isn't included we clear the attributes to save memory
if !attributesNeeded || (len(patterns) != 0 && !MatchPath(entry.Path, patterns)) {
entry.Attributes = nil
}
files = append(files, &entry)
}
snapshot.Files = files
return true
}
// DownloadSnapshotSequence downloads the content represented by a sequence of chunks, and then unmarshal the content
// using the specified 'loadFunction'. It purpose is to decode the chunk sequences representing chunk hashes or chunk lengths
// using the specified 'loadFunction'. Its purpose is to decode the chunk sequences representing chunk hashes or chunk lengths
// in a snapshot.
func (manager *SnapshotManager) DownloadSnapshotSequence(snapshot *Snapshot, sequenceType string) bool {
@@ -362,25 +317,23 @@ func (manager *SnapshotManager) DownloadSnapshotSequence(snapshot *Snapshot, seq
return true
}
// DownloadSnapshotContents loads all chunk sequences in a snapshot. A snapshot, when just created, only contains
// some metadata and theree sequence representing files, chunk hashes, and chunk lengths. This function must be called
// for the actual content of the snapshot to be usable.
func (manager *SnapshotManager) DownloadSnapshotContents(snapshot *Snapshot, patterns []string, attributesNeeded bool) bool {
// DownloadSnapshotSequences loads all chunk sequences in a snapshot. A snapshot, when just created, only contains
// some metadata and three sequence representing files, chunk hashes, and chunk lengths. This function must be called
// for the chunk hash sequence and chunk length sequence to be usable.
func (manager *SnapshotManager) DownloadSnapshotSequences(snapshot *Snapshot) bool {
manager.DownloadSnapshotFileSequence(snapshot, patterns, attributesNeeded)
manager.DownloadSnapshotSequence(snapshot, "chunks")
manager.DownloadSnapshotSequence(snapshot, "lengths")
err := manager.CheckSnapshot(snapshot)
if err != nil {
LOG_ERROR("SNAPSHOT_CHECK", "The snapshot %s at revision %d contains an error: %v",
snapshot.ID, snapshot.Revision, err)
return false
}
return true
}
// ClearSnapshotContents removes sequences loaded by DownloadSnapshotSequences
func (manager *SnapshotManager) ClearSnapshotSequences(snapshot *Snapshot) {
snapshot.ChunkHashes = nil
snapshot.ChunkLengths = nil
}
// CleanSnapshotCache removes all files not referenced by the specified 'snapshot' in the snapshot cache.
func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, allSnapshots map[string][]*Snapshot) bool {
@@ -570,10 +523,6 @@ func (manager *SnapshotManager) downloadLatestSnapshot(snapshotID string) (remot
remote = manager.DownloadSnapshot(snapshotID, latest)
}
if remote != nil {
manager.DownloadSnapshotContents(remote, nil, false)
}
return remote
}
@@ -705,6 +654,12 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showFiles: %t, showChunks: %t",
snapshotID, revisionsToList, tag, showFiles, showChunks)
manager.CreateChunkOperator(false, 1, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
}()
var snapshotIDs []string
var err error
@@ -742,14 +697,16 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
if len(snapshot.Tag) > 0 {
tagWithSpace = snapshot.Tag + " "
}
LOG_INFO("SNAPSHOT_INFO", "Snapshot %s revision %d created at %s %s%s",
snapshotID, revision, creationTime, tagWithSpace, snapshot.Options)
if showFiles {
manager.DownloadSnapshotFileSequence(snapshot, nil, false)
options := snapshot.Options
if snapshot.Version == 0 {
options += " (0)"
}
LOG_INFO("SNAPSHOT_INFO", "Snapshot %s revision %d created at %s %s%s",
snapshotID, revision, creationTime, tagWithSpace, options)
if showFiles {
// We need to fill in ChunkHashes and ChunkLengths to verify that each entry is valid
manager.DownloadSnapshotSequences(snapshot)
if snapshot.NumberOfFiles > 0 {
LOG_INFO("SNAPSHOT_STATS", "Files: %d", snapshot.NumberOfFiles)
@@ -761,7 +718,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
totalFileSize := int64(0)
lastChunk := 0
for _, file := range snapshot.Files {
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry)bool {
if file.IsFile() {
totalFiles++
totalFileSize += file.Size
@@ -773,17 +730,18 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
lastChunk = file.EndChunk
}
}
}
return true
})
for _, file := range snapshot.Files {
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry)bool {
if file.IsFile() {
LOG_INFO("SNAPSHOT_FILE", "%s", file.String(maxSizeDigits))
}
}
return true
})
metaChunks := len(snapshot.FileSequence) + len(snapshot.ChunkSequence) + len(snapshot.LengthSequence)
LOG_INFO("SNAPSHOT_STATS", "Files: %d, total size: %d, file chunks: %d, metadata chunks: %d",
totalFiles, totalFileSize, lastChunk+1, metaChunks)
LOG_INFO("SNAPSHOT_STATS", "Total size: %d, file chunks: %d, metadata chunks: %d", totalFileSize, lastChunk+1, metaChunks)
}
if showChunks {
@@ -800,11 +758,15 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
}
// ListSnapshots shows the information about a snapshot.
// CheckSnapshots checks if there is any problem with a snapshot.
func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToCheck []int, tag string, showStatistics bool, showTabular bool,
checkFiles bool, checkChunks, searchFossils bool, resurrect bool, threads int) bool {
checkFiles bool, checkChunks, searchFossils bool, resurrect bool, threads int, allowFailures bool) bool {
manager.chunkDownloader = CreateChunkDownloader(manager.config, manager.storage, manager.snapshotCache, false, threads)
manager.CreateChunkOperator(resurrect, threads, allowFailures)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
}()
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, showTabular: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
snapshotID, revisionsToCheck, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
@@ -821,6 +783,8 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
// Store the index of the snapshot that references each chunk; if the chunk is shared by multiple chunks, the index is -1
chunkSnapshotMap := make(map[string]int)
emptyChunks := 0
LOG_INFO("SNAPSHOT_CHECK", "Listing all chunks")
allChunks, allSizes := manager.ListAllFiles(manager.storage, chunkDir)
@@ -835,6 +799,11 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
chunk = strings.Replace(chunk, "/", "", -1)
chunkSizeMap[chunk] = allSizes[i]
if allSizes[i] == 0 && !strings.HasSuffix(chunk, ".tmp") {
LOG_WARN("SNAPSHOT_CHECK", "Chunk %s has a size of 0", chunk)
emptyChunks++
}
}
if snapshotID == "" || showStatistics || showTabular {
@@ -897,8 +866,9 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
for _, snapshot := range snapshotMap[snapshotID] {
if checkFiles {
manager.DownloadSnapshotContents(snapshot, nil, false)
manager.DownloadSnapshotSequences(snapshot)
manager.VerifySnapshot(snapshot)
manager.ClearSnapshotSequences(snapshot)
continue
}
@@ -991,19 +961,137 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
return false
}
if emptyChunks > 0 {
LOG_ERROR("SNAPSHOT_CHECK", "%d chunks have a size of 0", emptyChunks)
return false
}
if showTabular {
manager.ShowStatisticsTabular(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
} else if showStatistics {
manager.ShowStatistics(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
}
if checkChunks && !checkFiles {
manager.chunkDownloader.snapshotCache = nil
LOG_INFO("SNAPSHOT_VERIFY", "Verifying %d chunks", len(*allChunkHashes))
for chunkHash := range *allChunkHashes {
manager.chunkDownloader.AddChunk(chunkHash)
// Don't verify chunks with -files
if !checkChunks || checkFiles {
return true
}
// This contains chunks that have been verifed in previous checks and is loaded from
// .duplicacy/cache/storage/verified_chunks. Note that it contains the chunk ids not chunk
// hashes.
verifiedChunks := make(map[string]int64)
var verifiedChunksLock sync.Mutex
verifiedChunksFile := "verified_chunks"
manager.fileChunk.Reset(false)
err = manager.snapshotCache.DownloadFile(0, verifiedChunksFile, manager.fileChunk)
if err != nil {
if !os.IsNotExist(err) {
LOG_WARN("SNAPSHOT_VERIFY", "Failed to load the file containing verified chunks: %v", err)
}
manager.chunkDownloader.WaitForCompletion()
} else {
err = json.Unmarshal(manager.fileChunk.GetBytes(), &verifiedChunks)
if err != nil {
LOG_WARN("SNAPSHOT_VERIFY", "Failed to parse the file containing verified chunks: %v", err)
}
}
numberOfVerifiedChunks := len(verifiedChunks)
saveVerifiedChunks := func() {
if len(verifiedChunks) > numberOfVerifiedChunks {
var description []byte
description, err = json.Marshal(verifiedChunks)
if err != nil {
LOG_WARN("SNAPSHOT_VERIFY", "Failed to create a json file for the set of verified chunks: %v", err)
} else {
err = manager.snapshotCache.UploadFile(0, verifiedChunksFile, description)
if err != nil {
LOG_WARN("SNAPSHOT_VERIFY", "Failed to save the verified chunks file: %v", err)
} else {
LOG_INFO("SNAPSHOT_VERIFY", "Added %d chunks to the list of verified chunks", len(verifiedChunks) - numberOfVerifiedChunks)
}
}
}
}
defer saveVerifiedChunks()
RunAtError = saveVerifiedChunks
LOG_INFO("SNAPSHOT_VERIFY", "Verifying %d chunks", len(*allChunkHashes))
startTime := time.Now()
var chunkHashes []string
skippedChunks := 0
for chunkHash := range *allChunkHashes {
if len(verifiedChunks) > 0 {
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
if _, found := verifiedChunks[chunkID]; found {
skippedChunks++
continue
}
}
chunkHashes = append(chunkHashes, chunkHash)
}
if skippedChunks > 0 {
LOG_INFO("SNAPSHOT_VERIFY", "Skipped %d chunks that have already been verified before", skippedChunks)
}
var totalDownloadedChunkSize int64
var totalDownloadedChunks int64
totalChunks := int64(len(chunkHashes))
chunkChannel := make(chan int, threads)
var wg sync.WaitGroup
wg.Add(threads)
for i := 0; i < threads; i++ {
go func() {
defer CatchLogException()
for {
chunkIndex, ok := <- chunkChannel
if !ok {
wg.Done()
return
}
chunk := manager.chunkOperator.Download(chunkHashes[chunkIndex], chunkIndex, false)
if chunk == nil {
continue
}
chunkID := manager.config.GetChunkIDFromHash(chunkHashes[chunkIndex])
verifiedChunksLock.Lock()
verifiedChunks[chunkID] = startTime.Unix()
verifiedChunksLock.Unlock()
downloadedChunkSize := atomic.AddInt64(&totalDownloadedChunkSize, int64(chunk.GetLength()))
downloadedChunks := atomic.AddInt64(&totalDownloadedChunks, 1)
elapsedTime := time.Now().Sub(startTime).Seconds()
speed := int64(float64(downloadedChunkSize) / elapsedTime)
remainingTime := int64(float64(totalChunks - downloadedChunks) / float64(downloadedChunks) * elapsedTime)
percentage := float64(downloadedChunks) / float64(totalChunks) * 100.0
LOG_INFO("VERIFY_PROGRESS", "Verified chunk %s (%d/%d), %sB/s %s %.1f%%",
chunkID, downloadedChunks, totalChunks, PrettySize(speed), PrettyTime(remainingTime), percentage)
manager.config.PutChunk(chunk)
}
} ()
}
for chunkIndex := range chunkHashes {
chunkChannel <- chunkIndex
}
close(chunkChannel)
wg.Wait()
manager.chunkOperator.WaitForCompletion()
if manager.chunkOperator.NumberOfFailedChunks > 0 {
LOG_ERROR("SNAPSHOT_VERIFY", "%d out of %d chunks are corrupted", manager.chunkOperator.NumberOfFailedChunks, len(*allChunkHashes))
} else {
LOG_INFO("SNAPSHOT_VERIFY", "All %d chunks have been successfully verified", len(*allChunkHashes))
}
return true
@@ -1170,14 +1258,6 @@ func (manager *SnapshotManager) PrintSnapshot(snapshot *Snapshot) bool {
object["chunks"] = manager.ConvertSequence(snapshot.ChunkHashes)
object["lengths"] = snapshot.ChunkLengths
// By default the json serialization of a file entry contains the path in base64 format. This is
// to convert every file entry into an object which include the path in a more readable format.
var files []map[string]interface{}
for _, file := range snapshot.Files {
files = append(files, file.convertToObject(false))
}
object["files"] = files
description, err := json.MarshalIndent(object, "", " ")
if err != nil {
@@ -1186,8 +1266,24 @@ func (manager *SnapshotManager) PrintSnapshot(snapshot *Snapshot) bool {
return false
}
fmt.Printf("%s\n", string(description))
// Don't print the ending bracket
fmt.Printf("%s", string(description[:len(description) - 2]))
fmt.Printf(",\n \"files\": [\n")
isFirstFile := true
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (file *Entry) bool {
fileDescription, _ := json.MarshalIndent(file.convertToObject(false), "", " ")
if isFirstFile {
fmt.Printf("%s", fileDescription)
isFirstFile = false
} else {
fmt.Printf(",\n%s", fileDescription)
}
return true
})
fmt.Printf(" ]\n}\n")
return true
}
@@ -1203,17 +1299,20 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool {
return false
}
files := make([]*Entry, 0, len(snapshot.Files)/2)
for _, file := range snapshot.Files {
files := make([]*Entry, 0)
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (file *Entry) bool {
if file.IsFile() && file.Size != 0 {
file.Attributes = nil
files = append(files, file)
}
}
return true
})
sort.Sort(ByChunk(files))
corruptedFiles := 0
var lastChunk *Chunk
for _, file := range files {
if !manager.RetrieveFile(snapshot, file, func([]byte) {}) {
if !manager.RetrieveFile(snapshot, file, &lastChunk, func([]byte) {}) {
corruptedFiles++
}
LOG_TRACE("SNAPSHOT_VERIFY", "%s", file.Path)
@@ -1231,21 +1330,13 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool {
}
// RetrieveFile retrieves the file in the specified snapshot.
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, output func([]byte)) bool {
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, lastChunk **Chunk, output func([]byte)) bool {
if file.Size == 0 {
return true
}
manager.CreateChunkDownloader()
// Temporarily disable the snapshot cache of the download so that downloaded file chunks won't be saved
// to the cache.
snapshotCache := manager.chunkDownloader.snapshotCache
manager.chunkDownloader.snapshotCache = nil
defer func() {
manager.chunkDownloader.snapshotCache = snapshotCache
}()
manager.CreateChunkOperator(false, 1, false)
fileHasher := manager.config.NewFileHasher()
alternateHash := false
@@ -1266,12 +1357,19 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
}
hash := snapshot.ChunkHashes[i]
lastChunk, lastChunkHash := manager.chunkDownloader.GetLastDownloadedChunk()
if lastChunkHash != hash {
i := manager.chunkDownloader.AddChunk(hash)
chunk = manager.chunkDownloader.WaitForChunk(i)
if lastChunk == nil {
chunk = manager.chunkOperator.Download(hash, 0, false)
} else if *lastChunk == nil {
chunk = manager.chunkOperator.Download(hash, 0, false)
*lastChunk = chunk
} else {
chunk = lastChunk
if (*lastChunk).GetHash() == hash {
chunk = *lastChunk
} else {
manager.config.PutChunk(*lastChunk)
chunk = manager.chunkOperator.Download(hash, 0, false)
*lastChunk = chunk
}
}
output(chunk.GetBytes()[start:end])
@@ -1295,10 +1393,18 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
// FindFile returns the file entry that has the given file name.
func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string, suppressError bool) *Entry {
for _, entry := range snapshot.Files {
var found *Entry
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (entry *Entry) bool {
if entry.Path == filePath {
return entry
found = entry
return false
}
return true
})
if found != nil {
return found
}
if !suppressError {
@@ -1330,13 +1436,8 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
return false
}
patterns := []string{}
if path != "" {
patterns = []string{path}
}
// If no path is specified, we're printing the snapshot so we need all attributes
if !manager.DownloadSnapshotContents(snapshot, patterns, path == "") {
// If no path is specified, we're printing the snapshot
if !manager.DownloadSnapshotSequences(snapshot) {
return false
}
@@ -1346,7 +1447,7 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
}
file := manager.FindFile(snapshot, path, false)
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) {
if !manager.RetrieveFile(snapshot, file, nil, func(chunk []byte) {
fmt.Printf("%s", chunk)
}) {
LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d",
@@ -1359,27 +1460,43 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
// Diff compares two snapshots, or two revision of a file if the file argument is given.
func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []int,
filePath string, compareByHash bool, nobackupFile string, filtersFile string) bool {
filePath string, compareByHash bool, nobackupFile string, filtersFile string, excludeByAttribute bool) bool {
LOG_DEBUG("DIFF_PARAMETERS", "top: %s, id: %s, revision: %v, path: %s, compareByHash: %t",
top, snapshotID, revisions, filePath, compareByHash)
manager.CreateChunkOperator(false, 1, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
} ()
var leftSnapshot *Snapshot
var rightSnapshot *Snapshot
var err error
leftSnapshotFiles := make([]*Entry, 0, 1024)
rightSnapshotFiles := make([]*Entry, 0, 1024)
// If no or only one revision is specified, use the on-disk version for the right-hand side.
if len(revisions) <= 1 {
// Only scan the repository if filePath is not provided
if len(filePath) == 0 {
rightSnapshot, _, _, err = CreateSnapshotFromDirectory(snapshotID, top, nobackupFile, filtersFile)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
return false
rightSnapshot = CreateEmptySnapshot(snapshotID)
localListingChannel := make(chan *Entry)
go func() {
defer CatchLogException()
rightSnapshot.ListLocalFiles(top, nobackupFile, filtersFile, excludeByAttribute, localListingChannel, nil, nil)
} ()
for entry := range localListingChannel {
entry.Attributes = nil // attributes are not compared
rightSnapshotFiles = append(rightSnapshotFiles, entry)
}
}
} else {
rightSnapshot = manager.DownloadSnapshot(snapshotID, revisions[1])
manager.DownloadSnapshotSequences(rightSnapshot)
}
// If no revision is specified, use the latest revision as the left-hand side.
@@ -1393,15 +1510,11 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
leftSnapshot = manager.DownloadSnapshot(snapshotID, revisions[0])
}
manager.DownloadSnapshotSequences(leftSnapshot)
if len(filePath) > 0 {
manager.DownloadSnapshotContents(leftSnapshot, nil, false)
if rightSnapshot != nil && rightSnapshot.Revision != 0 {
manager.DownloadSnapshotContents(rightSnapshot, nil, false)
}
var leftFile []byte
if !manager.RetrieveFile(leftSnapshot, manager.FindFile(leftSnapshot, filePath, false), func(content []byte) {
if !manager.RetrieveFile(leftSnapshot, manager.FindFile(leftSnapshot, filePath, false), nil, func(content []byte) {
leftFile = append(leftFile, content...)
}) {
LOG_ERROR("SNAPSHOT_DIFF", "File %s is corrupted in snapshot %s at revision %d",
@@ -1411,7 +1524,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
var rightFile []byte
if rightSnapshot != nil {
if !manager.RetrieveFile(rightSnapshot, manager.FindFile(rightSnapshot, filePath, false), func(content []byte) {
if !manager.RetrieveFile(rightSnapshot, manager.FindFile(rightSnapshot, filePath, false), nil, func(content []byte) {
rightFile = append(rightFile, content...)
}) {
LOG_ERROR("SNAPSHOT_DIFF", "File %s is corrupted in snapshot %s at revision %d",
@@ -1472,24 +1585,32 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
return true
}
// We only need to decode the 'files' sequence, not 'chunkhashes' or 'chunklengthes'
manager.DownloadSnapshotFileSequence(leftSnapshot, nil, false)
if rightSnapshot != nil && rightSnapshot.Revision != 0 {
manager.DownloadSnapshotFileSequence(rightSnapshot, nil, false)
leftSnapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(entry *Entry) bool {
entry.Attributes = nil
leftSnapshotFiles = append(leftSnapshotFiles, entry)
return true
})
if rightSnapshot.Revision != 0 {
rightSnapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(entry *Entry) bool {
entry.Attributes = nil
rightSnapshotFiles = append(rightSnapshotFiles, entry)
return true
})
}
maxSize := int64(9)
maxSizeDigits := 1
// Find the max Size value in order for pretty alignment.
for _, file := range leftSnapshot.Files {
for _, file := range leftSnapshotFiles {
for !file.IsDir() && file.Size > maxSize {
maxSize = maxSize*10 + 9
maxSizeDigits += 1
}
}
for _, file := range rightSnapshot.Files {
for _, file := range rightSnapshotFiles {
for !file.IsDir() && file.Size > maxSize {
maxSize = maxSize*10 + 9
maxSizeDigits += 1
@@ -1499,22 +1620,22 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
buffer := make([]byte, 32*1024)
var i, j int
for i < len(leftSnapshot.Files) || j < len(rightSnapshot.Files) {
for i < len(leftSnapshotFiles) || j < len(rightSnapshotFiles) {
if i >= len(leftSnapshot.Files) {
if rightSnapshot.Files[j].IsFile() {
LOG_INFO("SNAPSHOT_DIFF", "+ %s", rightSnapshot.Files[j].String(maxSizeDigits))
if i >= len(leftSnapshotFiles) {
if rightSnapshotFiles[j].IsFile() {
LOG_INFO("SNAPSHOT_DIFF", "+ %s", rightSnapshotFiles[j].String(maxSizeDigits))
}
j++
} else if j >= len(rightSnapshot.Files) {
if leftSnapshot.Files[i].IsFile() {
LOG_INFO("SNAPSHOT_DIFF", "- %s", leftSnapshot.Files[i].String(maxSizeDigits))
} else if j >= len(rightSnapshotFiles) {
if leftSnapshotFiles[i].IsFile() {
LOG_INFO("SNAPSHOT_DIFF", "- %s", leftSnapshotFiles[i].String(maxSizeDigits))
}
i++
} else {
left := leftSnapshot.Files[i]
right := rightSnapshot.Files[j]
left := leftSnapshotFiles[i]
right := rightSnapshotFiles[j]
if !left.IsFile() {
i++
@@ -1569,6 +1690,12 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
LOG_DEBUG("HISTORY_PARAMETERS", "top: %s, id: %s, revisions: %v, path: %s, showLocalHash: %t",
top, snapshotID, revisions, filePath, showLocalHash)
manager.CreateChunkOperator(false, 1, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
} ()
var err error
if len(revisions) == 0 {
@@ -1583,7 +1710,7 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
sort.Ints(revisions)
for _, revision := range revisions {
snapshot := manager.DownloadSnapshot(snapshotID, revision)
manager.DownloadSnapshotFileSequence(snapshot, nil, false)
manager.DownloadSnapshotSequences(snapshot)
file := manager.FindFile(snapshot, filePath, true)
if file != nil {
@@ -1691,8 +1818,11 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
LOG_WARN("DELETE_OPTIONS", "Tags or retention policy will be ignored if at least one revision is specified")
}
manager.chunkOperator = CreateChunkOperator(manager.storage, threads)
defer manager.chunkOperator.Stop()
manager.CreateChunkOperator(false, threads, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
} ()
prefPath := GetDuplicacyPreferencePath()
logDir := path.Join(prefPath, "logs")
@@ -2074,7 +2204,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
return false
}
manager.chunkOperator.Stop()
manager.chunkOperator.WaitForCompletion()
for _, fossil := range manager.chunkOperator.fossils {
collection.AddFossil(fossil)
}
@@ -2155,6 +2285,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
} else {
manager.CleanSnapshotCache(nil, allSnapshots)
}
manager.chunkOperator.WaitForCompletion()
return true
}
@@ -2367,8 +2498,6 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s
// CheckSnapshot performs sanity checks on the given snapshot.
func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
lastChunk := 0
lastOffset := 0
var lastEntry *Entry
numberOfChunks := len(snapshot.ChunkHashes)
@@ -2378,57 +2507,39 @@ func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
numberOfChunks, len(snapshot.ChunkLengths))
}
entries := make([]*Entry, len(snapshot.Files))
copy(entries, snapshot.Files)
sort.Sort(ByChunk(entries))
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (entry *Entry) bool {
for _, entry := range snapshot.Files {
if lastEntry != nil && lastEntry.Compare(entry) >= 0 && !strings.Contains(lastEntry.Path, "\ufffd") {
return fmt.Errorf("The entry %s appears before the entry %s", lastEntry.Path, entry.Path)
err = fmt.Errorf("The entry %s appears before the entry %s", lastEntry.Path, entry.Path)
return false
}
lastEntry = entry
}
for _, entry := range entries {
if !entry.IsFile() || entry.Size == 0 {
continue
return true
}
if entry.StartChunk < 0 {
return fmt.Errorf("The file %s starts at chunk %d", entry.Path, entry.StartChunk)
err = fmt.Errorf("The file %s starts at chunk %d", entry.Path, entry.StartChunk)
return false
}
if entry.EndChunk >= numberOfChunks {
return fmt.Errorf("The file %s ends at chunk %d while the number of chunks is %d",
err = fmt.Errorf("The file %s ends at chunk %d while the number of chunks is %d",
entry.Path, entry.EndChunk, numberOfChunks)
return false
}
if entry.EndChunk < entry.StartChunk {
return fmt.Errorf("The file %s starts at chunk %d and ends at chunk %d",
fmt.Errorf("The file %s starts at chunk %d and ends at chunk %d",
entry.Path, entry.StartChunk, entry.EndChunk)
return false
}
if entry.StartOffset > 0 {
if entry.StartChunk < lastChunk {
return fmt.Errorf("The file %s starts at chunk %d while the last chunk is %d",
entry.Path, entry.StartChunk, lastChunk)
}
if entry.StartChunk > lastChunk+1 {
return fmt.Errorf("The file %s starts at chunk %d while the last chunk is %d",
entry.Path, entry.StartChunk, lastChunk)
}
if entry.StartChunk == lastChunk && entry.StartOffset < lastOffset {
return fmt.Errorf("The file %s starts at offset %d of chunk %d while the last file ends at offset %d",
entry.Path, entry.StartOffset, entry.StartChunk, lastOffset)
}
if entry.StartChunk == entry.EndChunk && entry.StartOffset > entry.EndOffset {
return fmt.Errorf("The file %s starts at offset %d and ends at offset %d of the same chunk %d",
entry.Path, entry.StartOffset, entry.EndOffset, entry.StartChunk)
}
if entry.StartChunk == entry.EndChunk && entry.StartOffset > entry.EndOffset {
err = fmt.Errorf("The file %s starts at offset %d and ends at offset %d of the same chunk %d",
entry.Path, entry.StartOffset, entry.EndOffset, entry.StartChunk)
return false
}
fileSize := int64(0)
@@ -2448,22 +2559,13 @@ func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
}
if entry.Size != fileSize {
return fmt.Errorf("The file %s has a size of %d but the total size of chunks is %d",
err = fmt.Errorf("The file %s has a size of %d but the total size of chunks is %d",
entry.Path, entry.Size, fileSize)
return false
}
lastChunk = entry.EndChunk
lastOffset = entry.EndOffset
}
if len(entries) > 0 && entries[0].StartChunk != 0 {
return fmt.Errorf("The first file starts at chunk %d", entries[0].StartChunk)
}
// There may be a last chunk whose size is 0 so we allow this to happen
if lastChunk < numberOfChunks-2 {
return fmt.Errorf("The last file ends at chunk %d but the number of chunks is %d", lastChunk, numberOfChunks)
}
return true
})
return nil
}

View File

@@ -116,19 +116,18 @@ func createTestSnapshotManager(testDir string) *SnapshotManager {
func uploadTestChunk(manager *SnapshotManager, content []byte) string {
completionFunc := func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
chunkOperator := CreateChunkOperator(manager.config, manager.storage, nil, false, *testThreads, false)
chunkOperator.UploadCompletionFunc = func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
LOG_INFO("UPLOAD_CHUNK", "Chunk %s size %d uploaded", chunk.GetID(), chunkSize)
}
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, testThreads, nil)
chunkUploader.completionFunc = completionFunc
chunkUploader.Start()
chunk := CreateChunk(manager.config, true)
chunk.Reset(true)
chunk.Write(content)
chunkUploader.StartChunk(chunk, 0)
chunkUploader.Stop()
chunkOperator.Upload(chunk, 0, false)
chunkOperator.WaitForCompletion()
chunkOperator.Stop()
return chunk.GetHash()
}
@@ -180,6 +179,12 @@ func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision in
func checkTestSnapshots(manager *SnapshotManager, expectedSnapshots int, expectedFossils int) {
manager.CreateChunkOperator(false, 1, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
}()
var snapshotIDs []string
var err error
@@ -620,7 +625,7 @@ func TestPruneNewSnapshots(t *testing.T) {
// Now chunkHash1 wil be resurrected
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 4, 0)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false, false, 1)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false, false, 1, false)
}
// A fossil collection left by an aborted prune should be ignored if any supposedly deleted snapshot exists
@@ -669,7 +674,7 @@ func TestPruneGhostSnapshots(t *testing.T) {
// Run the prune again but the fossil collection should be igored, since revision 1 still exists
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 2)
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, false, true /*searchFossils*/, false, 1)
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, false, true /*searchFossils*/, false, 1, false)
// Prune snapshot 1 again
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
@@ -683,5 +688,5 @@ func TestPruneGhostSnapshots(t *testing.T) {
// Run the prune again and this time the fossil collection will be processed and the fossils removed
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 0)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false, false, 1)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false, false, 1, false)
}

View File

@@ -268,7 +268,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
if matched == nil {
LOG_ERROR("STORAGE_CREATE", "Unrecognizable storage URL: %s", storageURL)
return nil
} else if matched[1] == "sftp" {
} else if matched[1] == "sftp" || matched[1] == "sftpc" {
server := matched[3]
username := matched[2]
storageDir := matched[5]
@@ -440,7 +440,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
return checkHostKey(hostname, remote, key)
}
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, 2, authMethods, hostKeyChecker, threads)
sftpStorage, err := CreateSFTPStorage(matched[1] == "sftpc", server, port, username, storageDir, 2, authMethods, hostKeyChecker, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the SFTP storage at %s: %v", storageURL, err)
return nil
@@ -540,7 +540,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
} else if matched[1] == "dropbox" {
storageDir := matched[3] + matched[5]
token := GetPassword(preference, "dropbox_token", "Enter Dropbox access token:", true, resetPassword)
token := GetPassword(preference, "dropbox_token", "Enter Dropbox refresh token:", true, resetPassword)
dropboxStorage, err := CreateDropboxStorage(token, storageDir, 1, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the dropbox storage: %v", err)
@@ -678,6 +678,10 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
} else if matched[1] == "webdav" || matched[1] == "webdav-http" {
server := matched[3]
username := matched[2]
if username == "" {
LOG_ERROR("STORAGE_CREATE", "No username is provided to access the WebDAV storage")
return nil
}
username = username[:len(username)-1]
storageDir := matched[5]
port := 0
@@ -698,6 +702,35 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
}
SavePassword(preference, "webdav_password", password)
return webDAVStorage
} else if matched[1] == "fabric" {
endpoint := matched[3]
storageDir := matched[5]
prompt := fmt.Sprintf("Enter the token for accessing the Storage Made Easy File Fabric storage:")
token := GetPassword(preference, "fabric_token", prompt, true, resetPassword)
smeStorage, err := CreateFileFabricStorage(endpoint, token, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the File Fabric storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "fabric_token", token)
return smeStorage
} else if matched[1] == "storj" {
satellite := matched[2] + matched[3]
bucket := matched[5]
storageDir := ""
index := strings.Index(bucket, "/")
if index >= 0 {
storageDir = bucket[index + 1:]
bucket = bucket[:index]
}
apiKey := GetPassword(preference, "storj_key", "Enter the API access key:", true, resetPassword)
passphrase := GetPassword(preference, "storj_passphrase", "Enter the passphrase:", true, resetPassword)
storjStorage, err := CreateStorjStorage(satellite, apiKey, passphrase, bucket, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Storj storage at %s: %v", storageURL, err)
return nil
}
return storjStorage
} else {
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
return nil

View File

@@ -22,26 +22,17 @@ import (
"math/rand"
)
var testStorageName string
var testRateLimit int
var testQuickMode bool
var testThreads int
var testFixedChunkSize bool
var testRSAEncryption bool
func init() {
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
flag.IntVar(&testRateLimit, "limit-rate", 0, "maximum transfer speed in kbytes/sec")
flag.BoolVar(&testQuickMode, "quick", false, "quick test")
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption")
flag.Parse()
}
var testStorageName = flag.String("storage", "", "the test storage to use")
var testRateLimit = flag.Int("limit-rate", 0, "maximum transfer speed in kbytes/sec")
var testQuickMode = flag.Bool("quick", false, "quick test")
var testThreads = flag.Int("threads", 1, "number of downloading/uploading threads")
var testFixedChunkSize = flag.Bool("fixed-chunk-size", false, "fixed chunk size")
var testRSAEncryption = flag.Bool("rsa", false, "enable RSA encryption")
var testErasureCoding = flag.Bool("erasure-coding", false, "enable Erasure Coding")
func loadStorage(localStoragePath string, threads int) (Storage, error) {
if testStorageName == "" || testStorageName == "file" {
if *testStorageName == "" || *testStorageName == "file" {
storage, err := CreateFileStorage(localStoragePath, false, threads)
if storage != nil {
// Use a read level of at least 2 because this will catch more errors than a read level of 1.
@@ -62,113 +53,132 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
return nil, err
}
config, found := configs[testStorageName]
config, found := configs[*testStorageName]
if !found {
return nil, fmt.Errorf("No storage named '%s' found", testStorageName)
return nil, fmt.Errorf("No storage named '%s' found", *testStorageName)
}
if testStorageName == "flat" {
if *testStorageName == "flat" {
storage, err := CreateFileStorage(localStoragePath, false, threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "samba" {
} else if *testStorageName == "samba" {
storage, err := CreateFileStorage(localStoragePath, true, threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "sftp" {
} else if *testStorageName == "sftp" {
port, _ := strconv.Atoi(config["port"])
storage, err := CreateSFTPStorageWithPassword(config["server"], port, config["username"], config["directory"], 2, config["password"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "s3" {
} else if *testStorageName == "s3" {
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "wasabi" {
} else if *testStorageName == "wasabi" {
storage, err := CreateWasabiStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "s3c" {
} else if *testStorageName == "s3c" {
storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "digitalocean" {
} else if *testStorageName == "digitalocean" {
storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "minio" {
} else if *testStorageName == "minio" {
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, false, true)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "minios" {
} else if *testStorageName == "minios" {
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, true)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "dropbox" {
} else if *testStorageName == "dropbox" {
storage, err := CreateDropboxStorage(config["token"], config["directory"], 1, threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "b2" {
} else if *testStorageName == "b2" {
storage, err := CreateB2Storage(config["account"], config["key"], "", config["bucket"], config["directory"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcs-s3" {
} else if *testStorageName == "gcs-s3" {
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcs" {
} else if *testStorageName == "gcs" {
storage, err := CreateGCSStorage(config["token_file"], config["bucket"], config["directory"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcs-sa" {
} else if *testStorageName == "gcs-sa" {
storage, err := CreateGCSStorage(config["token_file"], config["bucket"], config["directory"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "azure" {
} else if *testStorageName == "azure" {
storage, err := CreateAzureStorage(config["account"], config["key"], config["container"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "acd" {
} else if *testStorageName == "acd" {
storage, err := CreateACDStorage(config["token_file"], config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcd" {
} else if *testStorageName == "gcd" {
storage, err := CreateGCDStorage(config["token_file"], "", config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcd-shared" {
} else if *testStorageName == "gcd-shared" {
storage, err := CreateGCDStorage(config["token_file"], config["drive"], config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "one" {
} else if *testStorageName == "gcd-impersonate" {
storage, err := CreateGCDStorage(config["token_file"], config["drive"], config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "one" {
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "odb" {
} else if *testStorageName == "odb" {
storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "one" {
} else if *testStorageName == "one" {
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "hubic" {
} else if *testStorageName == "hubic" {
storage, err := CreateHubicStorage(config["token_file"], config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "memset" {
} else if *testStorageName == "memset" {
storage, err := CreateSwiftStorage(config["storage_url"], config["key"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "pcloud" || testStorageName == "box" {
} else if *testStorageName == "pcloud" || *testStorageName == "box" {
storage, err := CreateWebDAVStorage(config["host"], 0, config["username"], config["password"], config["storage_path"], false, threads)
if err != nil {
return nil, err
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "fabric" {
storage, err := CreateFileFabricStorage(config["endpoint"], config["token"], config["storage_path"], threads)
if err != nil {
return nil, err
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "storj" {
storage, err := CreateStorjStorage(config["satellite"], config["key"], config["passphrase"], config["bucket"], config["storage_path"], threads)
if err != nil {
return nil, err
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
}
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
return nil, fmt.Errorf("Invalid storage named: %s", *testStorageName)
}
func cleanStorage(storage Storage) {
@@ -308,7 +318,7 @@ func TestStorage(t *testing.T) {
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
LOG_INFO("STORAGE_TEST", "storage: %s", *testStorageName)
threads := 8
storage, err := loadStorage(testDir, threads)
@@ -317,7 +327,7 @@ func TestStorage(t *testing.T) {
return
}
storage.EnableTestMode()
storage.SetRateLimits(testRateLimit, testRateLimit)
storage.SetRateLimits(*testRateLimit, *testRateLimit)
delay := 0
if _, ok := storage.(*ACDStorage); ok {
@@ -439,7 +449,7 @@ func TestStorage(t *testing.T) {
numberOfFiles := 10
maxFileSize := 64 * 1024
if testQuickMode {
if *testQuickMode {
numberOfFiles = 2
}
@@ -574,7 +584,7 @@ func TestCleanStorage(t *testing.T) {
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
LOG_INFO("STORAGE_TEST", "storage: %s", *testStorageName)
storage, err := loadStorage(testDir, 1)
if err != nil {

View File

@@ -0,0 +1,184 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"fmt"
"io"
"context"
"storj.io/uplink"
)
// StorjStorage is a storage backend for Storj.
type StorjStorage struct {
StorageBase
project *uplink.Project
bucket string
storageDir string
numberOfThreads int
}
// CreateStorjStorage creates a Storj storage.
func CreateStorjStorage(satellite string, apiKey string, passphrase string,
bucket string, storageDir string, threads int) (storage *StorjStorage, err error) {
ctx := context.Background()
access, err := uplink.RequestAccessWithPassphrase(ctx, satellite, apiKey, passphrase)
if err != nil {
return nil, fmt.Errorf("cannot request the access grant: %v", err)
}
project, err := uplink.OpenProject(ctx, access)
if err != nil {
return nil, fmt.Errorf("cannot open the project: %v", err)
}
_, err = project.StatBucket(ctx, bucket)
if err != nil {
return nil, fmt.Errorf("cannot found the bucket: %v", err)
}
if storageDir != "" && storageDir[len(storageDir) - 1] != '/' {
storageDir += "/"
}
storage = &StorjStorage {
project: project,
bucket: bucket,
storageDir: storageDir,
numberOfThreads: threads,
}
storage.DerivedStorage = storage
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively).
func (storage *StorjStorage) ListFiles(threadIndex int, dir string) (
files []string, sizes []int64, err error) {
fullPath := storage.storageDir + dir
if fullPath != "" && fullPath[len(fullPath) - 1] != '/' {
fullPath += "/"
}
options := uplink.ListObjectsOptions {
Prefix: fullPath,
System: true, // request SystemMetadata which includes ContentLength
}
objects := storage.project.ListObjects(context.Background(), storage.bucket, &options)
for objects.Next() {
if objects.Err() != nil {
return nil, nil, objects.Err()
}
item := objects.Item()
name := item.Key[len(fullPath):]
size := item.System.ContentLength
if item.IsPrefix {
if name != "" && name[len(name) - 1] != '/' {
name += "/"
size = 0
}
}
files = append(files, name)
sizes = append(sizes, size)
}
return files, sizes, nil
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *StorjStorage) DeleteFile(threadIndex int, filePath string) (err error) {
_, err = storage.project.DeleteObject(context.Background(), storage.bucket,
storage.storageDir + filePath)
return err
}
// MoveFile renames the file.
func (storage *StorjStorage) MoveFile(threadIndex int, from string, to string) (err error) {
err = storage.project.MoveObject(context.Background(), storage.bucket,
storage.storageDir + from, storage.bucket, storage.storageDir + to, nil)
return err
}
// CreateDirectory creates a new directory.
func (storage *StorjStorage) CreateDirectory(threadIndex int, dir string) (err error) {
return nil
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *StorjStorage) GetFileInfo(threadIndex int, filePath string) (
exist bool, isDir bool, size int64, err error) {
info, err := storage.project.StatObject(context.Background(), storage.bucket,
storage.storageDir + filePath)
if info == nil {
return false, false, 0, nil
} else if err != nil {
return false, false, 0, err
} else {
return true, info.IsPrefix, info.System.ContentLength, nil
}
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *StorjStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
file, err := storage.project.DownloadObject(context.Background(), storage.bucket,
storage.storageDir + filePath, nil)
if err != nil {
return err
}
defer file.Close()
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
return err
}
return nil
}
// UploadFile writes 'content' to the file at 'filePath'
func (storage *StorjStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
file, err := storage.project.UploadObject(context.Background(), storage.bucket,
storage.storageDir + filePath, nil)
if err != nil {
return err
}
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
_, err = io.Copy(file, reader)
if err != nil {
return err
}
err = file.Commit()
if err != nil {
return err
}
return nil
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *StorjStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *StorjStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *StorjStorage) IsStrongConsistent() bool { return false }
// If the storage supports fast listing of files names.
func (storage *StorjStorage) IsFastListing() bool { return true }
// Enable the test mode.
func (storage *StorjStorage) EnableTestMode() {}

View File

@@ -5,16 +5,18 @@
package duplicacy
import (
"context"
"strconv"
"strings"
"time"
"github.com/ncw/swift"
"github.com/ncw/swift/v2"
)
type SwiftStorage struct {
StorageBase
ctx context.Context
connection *swift.Connection
container string
storageDir string
@@ -106,6 +108,8 @@ func CreateSwiftStorage(storageURL string, key string, threads int) (storage *Sw
arguments["protocol"] = "https"
}
ctx, _ := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
// Please refer to https://godoc.org/github.com/ncw/swift#Connection
connection := swift.Connection{
Domain: arguments["domain"],
@@ -129,12 +133,18 @@ func CreateSwiftStorage(storageURL string, key string, threads int) (storage *Sw
TrustId: arguments["trust_id"],
}
_, _, err = connection.Container(container)
err = connection.Authenticate(ctx)
if err != nil {
return nil, err
}
_, _, err = connection.Container(ctx, container)
if err != nil {
return nil, err
}
storage = &SwiftStorage{
ctx: ctx,
connection: &connection,
container: container,
storageDir: storageDir,
@@ -163,7 +173,7 @@ func (storage *SwiftStorage) ListFiles(threadIndex int, dir string) (files []str
options.Delimiter = '/'
}
objects, err := storage.connection.ObjectsAll(storage.container, &options)
objects, err := storage.connection.ObjectsAll(storage.ctx, storage.container, &options)
if err != nil {
return nil, nil, err
}
@@ -185,12 +195,12 @@ func (storage *SwiftStorage) ListFiles(threadIndex int, dir string) (files []str
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *SwiftStorage) DeleteFile(threadIndex int, filePath string) (err error) {
return storage.connection.ObjectDelete(storage.container, storage.storageDir+filePath)
return storage.connection.ObjectDelete(storage.ctx, storage.container, storage.storageDir+filePath)
}
// MoveFile renames the file.
func (storage *SwiftStorage) MoveFile(threadIndex int, from string, to string) (err error) {
return storage.connection.ObjectMove(storage.container, storage.storageDir+from,
return storage.connection.ObjectMove(storage.ctx, storage.container, storage.storageDir+from,
storage.container, storage.storageDir+to)
}
@@ -202,7 +212,7 @@ func (storage *SwiftStorage) CreateDirectory(threadIndex int, dir string) (err e
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *SwiftStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
object, _, err := storage.connection.Object(storage.container, storage.storageDir+filePath)
object, _, err := storage.connection.Object(storage.ctx, storage.container, storage.storageDir+filePath)
if err != nil {
if err == swift.ObjectNotFound {
@@ -218,7 +228,7 @@ func (storage *SwiftStorage) GetFileInfo(threadIndex int, filePath string) (exis
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *SwiftStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
file, _, err := storage.connection.ObjectOpen(storage.container, storage.storageDir+filePath, false, nil)
file, _, err := storage.connection.ObjectOpen(storage.ctx, storage.container, storage.storageDir+filePath, false, nil)
if err != nil {
return err
}
@@ -229,7 +239,7 @@ func (storage *SwiftStorage) DownloadFile(threadIndex int, filePath string, chun
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *SwiftStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.threads)
_, err = storage.connection.ObjectPut(storage.container, storage.storageDir+filePath, reader, true, "", "application/duplicacy", nil)
_, err = storage.connection.ObjectPut(storage.ctx, storage.container, storage.storageDir+filePath, reader, true, "", "application/duplicacy", nil)
return err
}

View File

@@ -14,6 +14,7 @@ import (
"strconv"
"strings"
"time"
"runtime"
"github.com/gilbertchen/gopass"
"golang.org/x/crypto/pbkdf2"
@@ -434,7 +435,7 @@ func PrettyTime(seconds int64) string {
seconds/day, (seconds%day)/3600, (seconds%3600)/60, seconds%60)
} else if seconds > day {
return fmt.Sprintf("1 day %02d:%02d:%02d", (seconds%day)/3600, (seconds%3600)/60, seconds%60)
} else if seconds > 0 {
} else if seconds >= 0 {
return fmt.Sprintf("%02d:%02d:%02d", seconds/3600, (seconds%3600)/60, seconds%60)
} else {
return "n/a"
@@ -460,3 +461,16 @@ func AtoSize(sizeString string) int {
return size
}
func PrintMemoryUsage() {
for {
var m runtime.MemStats
runtime.ReadMemStats(&m)
LOG_INFO("MEMORY_STATS", "Currently allocated: %s, total allocated: %s, system memory: %s, number of GCs: %d",
PrettySize(int64(m.Alloc)), PrettySize(int64(m.TotalAlloc)), PrettySize(int64(m.Sys)), m.NumGC)
time.Sleep(time.Second)
}
}

View File

@@ -0,0 +1,14 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"strings"
)
func excludedByAttribute(attirbutes map[string][]byte) bool {
value, ok := attirbutes["com.apple.metadata:com_apple_backup_excludeItem"]
return ok && strings.Contains(string(value), "com.apple.backupd")
}

View File

@@ -0,0 +1,13 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
)
func excludedByAttribute(attirbutes map[string][]byte) bool {
_, ok := attirbutes["duplicacy_exclude"]
return ok
}

View File

@@ -0,0 +1,13 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
)
func excludedByAttribute(attirbutes map[string][]byte) bool {
_, ok := attirbutes["duplicacy_exclude"]
return ok
}

View File

@@ -13,7 +13,7 @@ import (
"path/filepath"
"syscall"
"github.com/gilbertchen/xattr"
"github.com/pkg/xattr"
)
func Readlink(path string) (isRegular bool, s string, err error) {
@@ -50,37 +50,38 @@ func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
func (entry *Entry) ReadAttributes(top string) {
fullPath := filepath.Join(top, entry.Path)
attributes, _ := xattr.Listxattr(fullPath)
attributes, _ := xattr.List(fullPath)
if len(attributes) > 0 {
entry.Attributes = make(map[string][]byte)
entry.Attributes = &map[string][]byte{}
for _, name := range attributes {
attribute, err := xattr.Getxattr(fullPath, name)
attribute, err := xattr.Get(fullPath, name)
if err == nil {
entry.Attributes[name] = attribute
(*entry.Attributes)[name] = attribute
}
}
}
}
func (entry *Entry) SetAttributesToFile(fullPath string) {
names, _ := xattr.Listxattr(fullPath)
names, _ := xattr.List(fullPath)
for _, name := range names {
newAttribute, found := entry.Attributes[name]
newAttribute, found := (*entry.Attributes)[name]
if found {
oldAttribute, _ := xattr.Getxattr(fullPath, name)
oldAttribute, _ := xattr.Get(fullPath, name)
if !bytes.Equal(oldAttribute, newAttribute) {
xattr.Setxattr(fullPath, name, newAttribute)
xattr.Set(fullPath, name, newAttribute)
}
delete(entry.Attributes, name)
delete(*entry.Attributes, name)
} else {
xattr.Removexattr(fullPath, name)
xattr.Remove(fullPath, name)
}
}
for name, attribute := range entry.Attributes {
xattr.Setxattr(fullPath, name, attribute)
for name, attribute := range *entry.Attributes {
xattr.Set(fullPath, name, attribute)
}
}

View File

@@ -131,3 +131,7 @@ func SplitDir(fullPath string) (dir string, file string) {
i := strings.LastIndex(fullPath, "\\")
return fullPath[:i+1], fullPath[i+1:]
}
func excludedByAttribute(attirbutes map[string][]byte) bool {
return false
}

View File

@@ -14,7 +14,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
//"net/http/httputil"
@@ -22,6 +21,7 @@ import (
"strings"
"sync"
"time"
"io/ioutil"
)
type WebDAVStorage struct {
@@ -128,7 +128,12 @@ func (storage *WebDAVStorage) sendRequest(method string, uri string, depth int,
dataReader = bytes.NewReader(data)
} else if method == "PUT" {
headers["Content-Type"] = "application/octet-stream"
dataReader = CreateRateLimitedReader(data, storage.UploadRateLimit/storage.threads)
headers["Content-Length"] = fmt.Sprintf("%d", len(data))
if storage.UploadRateLimit <= 0 {
dataReader = bytes.NewReader(data)
} else {
dataReader = CreateRateLimitedReader(data, storage.UploadRateLimit/storage.threads)
}
} else if method == "MOVE" {
headers["Destination"] = storage.createConnectionString(string(data))
headers["Content-Type"] = "application/octet-stream"
@@ -160,7 +165,7 @@ func (storage *WebDAVStorage) sendRequest(method string, uri string, depth int,
response, err := storage.client.Do(request)
if err != nil {
LOG_TRACE("WEBDAV_RETRY", "URL request '%s %s' returned an error (%v)", method, uri, err)
LOG_TRACE("WEBDAV_ERROR", "URL request '%s %s' returned an error (%v)", method, uri, err)
backoff = storage.retry(backoff)
continue
}
@@ -169,11 +174,13 @@ func (storage *WebDAVStorage) sendRequest(method string, uri string, depth int,
return response.Body, response.Header, nil
}
io.Copy(ioutil.Discard, response.Body)
response.Body.Close()
if response.StatusCode == 301 {
return nil, nil, errWebDAVMovedPermanently
}
response.Body.Close()
if response.StatusCode == 404 {
// Retry if it is UPLOAD, otherwise return immediately
if method != "PUT" {
@@ -214,53 +221,57 @@ type WebDAVMultiStatus struct {
func (storage *WebDAVStorage) getProperties(uri string, depth int, properties ...string) (map[string]WebDAVProperties, error) {
propfind := "<prop>"
for _, p := range properties {
propfind += fmt.Sprintf("<%s/>", p)
}
propfind += "</prop>"
maxTries := 3
for tries := 0; ; tries++ {
propfind := "<prop>"
for _, p := range properties {
propfind += fmt.Sprintf("<%s/>", p)
}
propfind += "</prop>"
body := fmt.Sprintf(`<?xml version="1.0" encoding="utf-8" ?><propfind xmlns="DAV:">%s</propfind>`, propfind)
body := fmt.Sprintf(`<?xml version="1.0" encoding="utf-8" ?><propfind xmlns="DAV:">%s</propfind>`, propfind)
readCloser, _, err := storage.sendRequest("PROPFIND", uri, depth, []byte(body))
if err != nil {
return nil, err
}
defer readCloser.Close()
content, err := ioutil.ReadAll(readCloser)
if err != nil {
return nil, err
}
readCloser, _, err := storage.sendRequest("PROPFIND", uri, depth, []byte(body))
if err != nil {
return nil, err
}
defer readCloser.Close()
defer io.Copy(ioutil.Discard, readCloser)
object := WebDAVMultiStatus{}
err = xml.Unmarshal(content, &object)
if err != nil {
return nil, err
}
if object.Responses == nil || len(object.Responses) == 0 {
return nil, errors.New("no WebDAV responses")
}
responses := make(map[string]WebDAVProperties)
for _, responseTag := range object.Responses {
if responseTag.PropStat == nil || responseTag.PropStat.Prop == nil || responseTag.PropStat.Prop.PropList == nil {
return nil, errors.New("no WebDAV properties")
object := WebDAVMultiStatus{}
err = xml.NewDecoder(readCloser).Decode(&object)
if err != nil {
if strings.Contains(err.Error(), "unexpected EOF") && tries < maxTries {
LOG_WARN("WEBDAV_RETRY", "Retrying on %v", err)
continue
}
return nil, err
}
properties := make(WebDAVProperties)
for _, prop := range responseTag.PropStat.Prop.PropList {
properties[prop.XMLName.Local] = prop.Value
if object.Responses == nil || len(object.Responses) == 0 {
return nil, errors.New("no WebDAV responses")
}
responseKey := responseTag.Href
responses[responseKey] = properties
responses := make(map[string]WebDAVProperties)
for _, responseTag := range object.Responses {
if responseTag.PropStat == nil || responseTag.PropStat.Prop == nil || responseTag.PropStat.Prop.PropList == nil {
return nil, errors.New("no WebDAV properties")
}
properties := make(WebDAVProperties)
for _, prop := range responseTag.PropStat.Prop.PropList {
properties[prop.XMLName.Local] = prop.Value
}
responseKey := responseTag.Href
responses[responseKey] = properties
}
return responses, nil
}
return responses, nil
}
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
@@ -309,6 +320,12 @@ func (storage *WebDAVStorage) ListFiles(threadIndex int, dir string) (files []st
}
files = append(files, file)
sizes = append(sizes, int64(0))
// Add the directory to the directory cache
storage.directoryCacheLock.Lock()
storage.directoryCache[dir + file] = 1
storage.directoryCacheLock.Unlock()
}
}
@@ -355,6 +372,7 @@ func (storage *WebDAVStorage) DeleteFile(threadIndex int, filePath string) (err
if err != nil {
return err
}
io.Copy(ioutil.Discard, readCloser)
readCloser.Close()
return nil
}
@@ -365,6 +383,7 @@ func (storage *WebDAVStorage) MoveFile(threadIndex int, from string, to string)
if err != nil {
return err
}
io.Copy(ioutil.Discard, readCloser)
readCloser.Close()
return nil
}
@@ -378,21 +397,7 @@ func (storage *WebDAVStorage) createParentDirectory(threadIndex int, dir string)
}
parent := dir[:found]
storage.directoryCacheLock.Lock()
_, exist := storage.directoryCache[parent]
storage.directoryCacheLock.Unlock()
if exist {
return nil
}
err = storage.CreateDirectory(threadIndex, parent)
if err == nil {
storage.directoryCacheLock.Lock()
storage.directoryCache[parent] = 1
storage.directoryCacheLock.Unlock()
}
return err
return storage.CreateDirectory(threadIndex, parent)
}
// CreateDirectory creates a new directory.
@@ -405,18 +410,35 @@ func (storage *WebDAVStorage) CreateDirectory(threadIndex int, dir string) (err
return nil
}
storage.directoryCacheLock.Lock()
_, exist := storage.directoryCache[dir]
storage.directoryCacheLock.Unlock()
if exist {
return nil
}
// If there is an error in creating the parent directory, proceed anyway
storage.createParentDirectory(threadIndex, dir)
readCloser, _, err := storage.sendRequest("MKCOL", dir, 0, []byte(""))
if err != nil {
if err == errWebDAVMethodNotAllowed || err == errWebDAVMovedPermanently {
if err == errWebDAVMethodNotAllowed || err == errWebDAVMovedPermanently || err == io.EOF {
// We simply ignore these errors and assume that the directory already exists
LOG_TRACE("WEBDAV_MKDIR", "Can't create directory %s: %v; error ignored", dir, err)
storage.directoryCacheLock.Lock()
storage.directoryCache[dir] = 1
storage.directoryCacheLock.Unlock()
return nil
}
return err
}
io.Copy(ioutil.Discard, readCloser)
readCloser.Close()
storage.directoryCacheLock.Lock()
storage.directoryCache[dir] = 1
storage.directoryCacheLock.Unlock()
return nil
}
@@ -441,6 +463,7 @@ func (storage *WebDAVStorage) UploadFile(threadIndex int, filePath string, conte
if err != nil {
return err
}
io.Copy(ioutil.Discard, readCloser)
readCloser.Close()
return nil
}