1
0
mirror of https://github.com/gilbertchen/duplicacy synced 2025-12-06 00:03:38 +00:00

Compare commits

...

69 Commits

Author SHA1 Message Date
Luca Trevisani
dd60365abc Merge 74bb98a58c into bb214b6e04 2024-11-27 04:58:35 +00:00
Gilbert Chen
bb214b6e04 Bump version to 3.2.4 2024-10-30 12:05:18 -04:00
Gilbert Chen
6bca9fccdd maxCollectionNumber must be increased even in collect-only mode
This fixed a bug that caused collect-only mode to keep overriding
collect 1 during every prune
2024-10-29 22:46:46 -04:00
Gilbert Chen
a06d925e53 Remove 'incomplete_files' in deleteIncompleteSnapshot()
This file is used for storing the on-disk entry list.  When an error occurs,
this file is renamed to 'incomplete_snapshot' for fast resuming on next run.
But if there is no error this file should be removed.
2024-10-29 21:04:51 -04:00
Gilbert Chen
69f5d2f7bf Don't save the incomplete snapshot for a dry run
Saving the incomple snapshot would trick the next backup into thinking that
all chunks have been uploaded.
2024-07-25 22:54:19 -04:00
Gilbert Chen
d182708eb5 Fix zstd level name (fast -> fastest) 2024-07-11 14:47:40 -04:00
Gilbert Chen
f8a0964ac0 Save the list of verified chunks every 5 minutes.
This can be useful if the list isn't saved at the end of the run for some
reason, such as when the program is terminated abruptly.
2024-07-11 14:12:12 -04:00
Gilbert Chen
b659456f12 Don't add corrupt chunks to verified_chunks
When -persist is specified, the chunk downloader may return the chunk even if
the chunk is corrupt.  We use the chunk.isBroken flag to detect this situation.
2024-07-10 10:32:40 -04:00
Gilbert Chen
0d3ee4186c Use a different api to find the id of a GCD drive by name
Previous a listing api was used to find the id.  That api only returns 10
drives for each call.  If there are more than 10 shared drives some may
not be returned.
2024-01-16 12:50:59 -05:00
Luca Trevisani
74bb98a58c Fix -zstd flag documentation 2023-11-09 23:20:21 +01:00
Gilbert Chen
2549532676 Bump version to 3.2.3 2023-10-06 22:13:33 -04:00
Gilbert Chen
e99dfea048 Upgrade storj.io/uplink to v1.12.1 to fix an 64 bit alignment issue 2023-10-06 22:12:12 -04:00
Gilbert Chen
50120146df Bump version to 3.2.2 2023-10-03 22:35:18 -04:00
Gilbert Chen
7bfc0e7d51 Save passwords after a storj storage has been created 2023-10-03 22:33:41 -04:00
Gilbert Chen
fd3bceae19 Bump version to 3.2.1 2023-10-02 12:30:23 -04:00
Gilbert Chen
7cc1b4222c Update go version to 1.19.13 2023-09-28 23:17:24 -04:00
Gilbert Chen
d92b1734f4 Skip identical entries when listing chunks
The prune command can remove redundant chunks (chunks with the same chunk id
but at different subdirectory level).  However, if the same chunk appears
mutliple times in the listing returned by the storage, it will be treated as
a redundant chunk and thus removed.
2023-09-27 15:31:08 -04:00
Gilbert Chen
4e9d2c4cca Allow two copy-compatible storages to have different compression levels
This is useful for upgrading an existing storage to zstd compression or others.
Chunks need to be decompressed and re-compressed during copy anyway.  Only
the bit-identical option requires the same compression level

Also fix a typo: compatiable -> compatible
2023-09-18 14:44:41 -04:00
gilbertchen
cc482beb95 Merge pull request #653 from gorbak25/fix-compression
Fix compression level check
2023-09-18 10:50:55 -04:00
Grzegorz Uriasz
bf3ea8a83c Fix compression level check 2023-09-11 23:03:34 +02:00
Gilbert Chen
981efc13e6 Bump version to 3.2.0 2023-09-08 15:25:22 -04:00
Gilbert Chen
6445ecbcde Add dependencies required by github.com/hirochachacha/go-smb2 2023-09-08 13:54:03 -04:00
Gilbert Chen
ff207ba5bf Convert the file path to the real one when downloading a chunk
This is mainly to handle the case when a fossil needs to be downloaded.
This happens when a metadata chunk has been marked as a fossil while the
correpsonding snapshot must be reconstructed for determine referenced
chunks.
2023-09-08 13:30:28 -04:00
Gilbert Chen
3a81c1065a Add a new Samba backend
The storage url is smb://user@server[:port]/share/path.  The password can be
set in the environment variable DUPLICACY_SMB_PASSWORD for default storage or
DUPLICACY_<STORAGE_NAME>_SMB_PASSWORD.

This backend is based on https://github.com/hirochachacha/go-smb2.  The
previous samba:// backend is just an alias for the disk-based backend with
caching enabled.
2023-07-05 22:51:24 -04:00
Gilbert Chen
cdf8f5a857 Check the length of 'file' before checking if it ends with '/' 2023-04-09 22:11:08 -04:00
Gilbert Chen
1f9ad0e35c B2 backend should be able to download .fsl files
This is needed when a metadata chunk has been turned into a fossil.  In B2,
the fossil file is the last version of the file with an 'upload' action, so
to download the file, the 'b2_list_file_versions' api is called to find the
file id and then download it using 'b2_download_file_by_id'.

This is needed when a metadata chunk has been turned into a fossil
2023-03-30 13:27:29 -04:00
Gilbert Chen
53b0f3f7b6 Implement zstd compression
Zstd compression can be enabled by providing `-zstd` or `-zstd-level <level>`
to `init`, `add`, or `backup`. With `-zstd` the compression level will be
`default`, and with `-zstd-level` the level can be any of `fastest`, `default`,
`better`, or `best`.
2023-03-26 21:31:51 -04:00
Gilbert Chen
9f276047db Fixed a typo in log id
SFT_RECONNECT -> SFTP_RECONNECT
2023-03-26 21:26:51 -04:00
gilbertchen
c237269589 Merge pull request #649 from northnose/saveVerifiedChunksLock
Acquire verifiedChunksLock in saveVerifiedChunks
2023-03-23 22:11:53 -04:00
gilbertchen
493ef603e3 Merge pull request #648 from northnose/dropbox-pointer
Upgrade go-dropbox to the latest
2023-03-23 22:10:24 -04:00
David Zhang
889191a814 Upgrade go-dropbox to the latest 2023-03-20 20:19:43 -07:00
David Zhang
df80096cdf Acquire verifiedChunksLock in saveVerifiedChunks 2023-03-12 21:00:51 -07:00
gilbertchen
24c2ea76b9 Merge pull request #633 from sevimo123/sharepoint_support
Sharepoint support
2023-01-19 14:06:42 -05:00
gilbertchen
15b6ef9d76 Merge pull request #632 from sevimo123/custom_odb_creds
CLI support for custom credentials for OneDrive (client_id/client_secret)
2023-01-19 13:01:01 -05:00
gilbertchen
75b310b98e Merge pull request #641 from A-wels/patch-1
Fixed typo incomlete -> incomplete
2023-01-17 11:51:20 -05:00
Alexander Welsing
039b749a3e Fixed typo incomlete -> incomplete
"Previous incomlete backup contains %d files and %d chunks -> "Previous incomplete backup contains %d files and %d chunks
2023-01-12 12:41:45 +01:00
Gilbert Chen
9be475f876 Fix another chunk leak in listing files in a revision.
This bug leaks a chunk every time files in a revision are listed.  Not a big
deal for backup and restore, but it becomes problematic when listing files in
many revisions for commands such check and history.
2023-01-06 23:02:48 -05:00
Gilbert Chen
27ff3e216b Bump version to 3.1.0 2022-12-06 23:28:44 -05:00
Gilbert Chen
1ba204a21b Upgrade go-dropbox to the latest
This is to incorporate the fix:
https://github.com/gilbertchen/go-dropbox/commit/60ebcd

Otherwise the access token won't get updated after token refresh
2022-12-06 23:24:11 -05:00
Gilbert Chen
b8c7594dbf Release the chunk used to download files when finished
Without this fix, a chunk is leaked for each snapshot checked
with `-files`.
2022-12-06 22:46:25 -05:00
Gilbert Chen
58f0d2be5a Fixed a bug that didn't preserve the version bit when copying old snapshots
The version bit should not be set to 1 when encoding a snapshot.  Instead,
it must be set to 1 on snapshot creation.

To correctly process old snapshots encoded incorrectly with version bit set
to 1, the first byte of the encoded file list is also checked.  If the first
byte is `[`, then it must be an old snapshot, since the file list in the new
snapshot format always starts with a string encoded in msgpack, the first
byte of which can't be `[`.
2022-11-22 21:31:24 -05:00
Gilbert Chen
0a794e6fea Fixed test errors and remove obsolete tests 2022-11-15 11:53:46 -05:00
Gilbert Chen
bc2d762e41 Add -rewrite to the check command to fix corrupted chunks
This option is useful only when erasure coding is enabled.  It will
download and re-upload chunks that contain corruption but are
generally recoverable.  It can also be used to fix chunks that
are created by 3.0.1 on arm64 machines with wrong hashes.
2022-11-15 11:47:02 -05:00
Gilbert Chen
6a7a2c8048 Upgrade github.com/minio/highwayhash to 1.0.2
highwayhash 1.0.1 contains a bug leading to incorrect hashes on arm64 machines.
The 1.0.1 version is retained in github.com/gilbertchen/highwayhash so the hash
can be checked again if a mismatch is detected by 1.0.2.
2022-11-09 14:44:24 -05:00
Gilbert Chen
3472206bcf Handle zero-byte files correctly
This commit fixed 2 bugs.  The first bug occurs when an incomplete backup
contains a zero-byte file and no chunks.  The second bug occurs when the
repository contains only zero-byte files.
2022-11-08 22:54:35 -05:00
Gilbert Chen
72eb339837 Bump version to 3.0.1 2022-10-06 20:30:05 -04:00
Gilbert Chen
901044b348 Update dependencies 2022-10-06 20:29:51 -04:00
Gilbert Chen
d6f5336784 Bump version to 3.0.0 2022-10-05 22:21:17 -04:00
Gilbert Chen
4b47ea55e4 Bump version to 2.8.0 2022-10-04 12:49:50 -04:00
Gilbert Chen
5c35ef799a Switch to go modules 2022-10-04 12:48:58 -04:00
Gilbert Chen
2c63d32142 Use swift V2 2022-10-04 12:47:38 -04:00
Gilbert Chen
6009f64b66 Add a storage backend for Storj
The url format is storj://satellite/bucket/path.  You can get the
satellite along with the api access key when requesting an Access
Grant of type API Access.
2022-09-30 10:30:06 -04:00
Gilbert Chen
cde660ee9f Use long-lived refresh token for the Dropbox backend
The refresh token can be downloaded from https://duplicacy.com/dropbox_start
2022-08-12 22:17:06 -04:00
Victor Mozgin
d7593a828c Added support for SharePoint document libraries via odb://DRIVEID@path/to/storage. 2022-07-22 23:31:27 -04:00
Victor Mozgin
238ef63e16 CLI support for custom credentials for OneDrive (client_id/client_secret) 2022-07-22 21:18:52 -04:00
Gilbert Chen
54952cef26 Fixed a bug that referenced uninitialized operator.snapshotCache 2022-07-10 12:23:11 -04:00
Gilbert Chen
fc2386f9cc Initialize startTime correctly in CreateChunkOperator 2022-06-09 23:28:29 -04:00
Gilbert Chen
0d8a37f9f3 Dependency change: update github.com/ncw/swift to v2.0.1 2022-04-08 23:16:51 -04:00
gilbertchen
345fc5ed87 Merge pull request #626 from markfeit/swift-v2
Swift v2
2022-04-08 22:29:30 -04:00
Gilbert Chen
8df529dffe Fixed the type of a test parameter 2022-04-07 23:34:48 -04:00
gilbertchen
f2d6de3fff Merge pull request #625 from gilbertchen/memory_optimization
Rewrite the backup procedure to reduce memory usage
2022-04-07 23:26:46 -04:00
Mark Feit
4743c7ba0d DOn't cancel the context and use a sane deadline. 2021-12-17 10:09:36 -05:00
Mark Feit
590d3b1b5b More development 2021-12-04 10:56:47 -05:00
Mark Feit
0590daff85 More dev 2021-12-04 10:53:34 -05:00
Mark Feit
95b1227d93 Use empty context 2021-12-04 10:48:01 -05:00
Mark Feit
1661caeb92 More fixups 2021-12-04 10:37:11 -05:00
Mark Feit
041ba944c4 Typo 2021-12-04 10:24:31 -05:00
Mark Feit
934c2515cc Import context 2021-12-04 10:23:42 -05:00
Mark Feit
c363d21954 First cut of Swift v2 2021-12-04 10:11:19 -05:00
31 changed files with 1490 additions and 1191 deletions

276
Gopkg.lock generated
View File

@@ -1,276 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata","iam","internal","internal/optional","internal/version","storage"]
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
version = "v0.16.0"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date","logger","version"]
revision = "9bc4033dd347c7f416fca46b2f42a043dc1fbdf6"
version = "v10.15.5"
[[projects]]
branch = "master"
name = "github.com/aryann/difflib"
packages = ["."]
revision = "e206f873d14a916d3d26c40ab667bca123f365a3"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = ["aws","aws/arn","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/processcreds","aws/credentials/stscreds","aws/csm","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/context","internal/ini","internal/s3err","internal/sdkio","internal/sdkmath","internal/sdkrand","internal/sdkuri","internal/shareddefaults","internal/strings","internal/sync/singleflight","private/protocol","private/protocol/eventstream","private/protocol/eventstream/eventstreamapi","private/protocol/json/jsonutil","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/internal/arn","service/sts","service/sts/stsiface"]
revision = "851d5ffb66720c2540cc68020d4d8708950686c8"
version = "v1.30.7"
[[projects]]
name = "github.com/bkaradzic/go-lz4"
packages = ["."]
revision = "74ddf82598bc4745b965729e9c6a463bedd33049"
version = "v1.0.0"
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/azure-sdk-for-go"
packages = ["storage","version"]
revision = "8fd4663cab7c7c1c46d00449291c92ad23b0d0d9"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/cli"
packages = ["."]
revision = "1de0a1836ce9c3ae1bf737a0869c4f04f28a7f98"
[[projects]]
name = "github.com/gilbertchen/go-dropbox"
packages = ["."]
revision = "2233fa1dd846b3a3e8060b6c1ea12883deb9d288"
[[projects]]
name = "github.com/gilbertchen/go-ole"
packages = ["."]
revision = "0e87ea779d9deb219633b828a023b32e1244dd57"
version = "v1.2.0"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/go.dbus"
packages = ["."]
revision = "8591994fa32f1dbe3fa9486bc6f4d4361ac16649"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/goamz"
packages = ["aws","s3"]
revision = "eada9f4e8cc2a45db775dee08a2c37597ce4760a"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/gopass"
packages = ["."]
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/keyring"
packages = ["."]
revision = "8855f5632086e51468cd7ce91056f8da69687ef6"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/xattr"
packages = ["."]
revision = "68e7a6806b0137a396d7d05601d7403ae1abac58"
[[projects]]
branch = "master"
name = "github.com/golang/groupcache"
packages = ["lru"]
revision = "8c9f03a8e57eb486e42badaed3fb287da51807ba"
[[projects]]
name = "github.com/golang/protobuf"
packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
revision = "84668698ea25b64748563aa20726db66a6b8d299"
version = "v1.3.5"
[[projects]]
name = "github.com/googleapis/gax-go"
packages = [".","v2"]
revision = "c8a15bac9b9fe955bd9f900272f9a306465d28cf"
version = "v2.0.3"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "c2b33e84"
[[projects]]
name = "github.com/klauspost/cpuid"
packages = ["."]
revision = "750c0591dbbd50ef88371c665ad49e426a4b830b"
version = "v1.3.1"
[[projects]]
name = "github.com/klauspost/reedsolomon"
packages = ["."]
revision = "7daa20bf74337a939c54f892a2eca9d9b578eb7f"
version = "v1.9.9"
[[projects]]
name = "github.com/kr/fs"
packages = ["."]
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
version = "v0.1.0"
[[projects]]
name = "github.com/marstr/guid"
packages = ["."]
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/minio/blake2b-simd"
packages = ["."]
revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4"
[[projects]]
name = "github.com/minio/highwayhash"
packages = ["."]
revision = "86a2a969d04373bf05ca722517d30fb1c9a3e4f9"
version = "v1.0.1"
[[projects]]
branch = "master"
name = "github.com/mmcloughlin/avo"
packages = ["attr","build","buildtags","gotypes","internal/prnt","internal/stack","ir","operand","pass","printer","reg","src","x86"]
revision = "443f81d771042b019379ae4bfcd0a591cb47c88a"
[[projects]]
name = "github.com/ncw/swift"
packages = ["."]
revision = "3e1a09f21340e4828e7265aa89f4dc1495fa7ccc"
version = "v1.0.50"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "614d223910a179a466c1767a985424175c39b465"
version = "v0.9.1"
[[projects]]
name = "github.com/pkg/sftp"
packages = ["."]
revision = "5616182052227b951e76d9c9b79a616c608bd91b"
version = "v1.11.0"
[[projects]]
name = "github.com/pkg/xattr"
packages = ["."]
revision = "dd870b5cfebab49617ea0c1da6176474e8a52bf4"
version = "v0.4.1"
[[projects]]
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.2.0"
[[projects]]
branch = "master"
name = "github.com/vaughan0/go-ini"
packages = ["."]
revision = "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
[[projects]]
name = "go.opencensus.io"
packages = [".","internal","internal/tagencoding","metric/metricdata","metric/metricproducer","plugin/ochttp","plugin/ochttp/propagation/b3","resource","stats","stats/internal","stats/view","tag","trace","trace/internal","trace/propagation","trace/tracestate"]
revision = "d835ff86be02193d324330acdb7d65546b05f814"
version = "v0.22.3"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["blowfish","chacha20","curve25519","ed25519","ed25519/internal/edwards25519","internal/subtle","pbkdf2","poly1305","ssh","ssh/agent","ssh/internal/bcrypt_pbkdf","ssh/terminal"]
revision = "056763e48d71961566155f089ac0f02f1dda9b5a"
[[projects]]
name = "golang.org/x/mod"
packages = ["semver"]
revision = "859b3ef565e237f9f1a0fb6b55385c497545680d"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context","context/ctxhttp","http/httpguts","http2","http2/hpack","idna","internal/timeseries","trace"]
revision = "d3edc9973b7eb1fb302b0ff2c62357091cea9a30"
[[projects]]
name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"]
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["cpu","unix","windows"]
revision = "59c9f1ba88faf592b225274f69c5ef1e4ebacf82"
[[projects]]
name = "golang.org/x/text"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/language","internal/language/compact","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
version = "v0.3.2"
[[projects]]
branch = "master"
name = "golang.org/x/tools"
packages = ["go/ast/astutil","go/gcexportdata","go/internal/gcimporter","go/internal/packagesdriver","go/packages","go/types/typeutil","internal/event","internal/event/core","internal/event/keys","internal/event/label","internal/gocommand","internal/packagesinternal","internal/typesinternal"]
revision = "5d1fdd8fa3469142b9369713b23d8413d6d83189"
[[projects]]
branch = "master"
name = "golang.org/x/xerrors"
packages = [".","internal"]
revision = "5ec99f83aff198f5fbd629d6c8d8eb38a04218ca"
[[projects]]
name = "google.golang.org/api"
packages = ["drive/v3","googleapi","googleapi/transport","internal","internal/gensupport","internal/third_party/uritemplates","iterator","option","option/internaloption","storage/v1","transport/cert","transport/http","transport/http/internal/propagation"]
revision = "52f0532eadbcc6f6b82d6f5edf66e610d10bfde6"
version = "v0.21.0"
[[projects]]
name = "google.golang.org/appengine"
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
revision = "971852bfffca25b069c31162ae8f247a3dba083b"
version = "v1.6.5"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status","googleapis/type/expr"]
revision = "baae70f3302d3efdff74db41e48a5d476d036906"
[[projects]]
name = "google.golang.org/grpc"
packages = [".","attributes","backoff","balancer","balancer/base","balancer/roundrobin","binarylog/grpc_binarylog_v1","codes","connectivity","credentials","credentials/internal","encoding","encoding/proto","grpclog","internal","internal/backoff","internal/balancerload","internal/binarylog","internal/buffer","internal/channelz","internal/envconfig","internal/grpclog","internal/grpcrand","internal/grpcsync","internal/grpcutil","internal/resolver/dns","internal/resolver/passthrough","internal/syscall","internal/transport","keepalive","metadata","naming","peer","resolver","serviceconfig","stats","status","tap"]
revision = "ac54eec90516cee50fc6b9b113b34628a85f976f"
version = "v1.28.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "e46f7c2dac527af6d5a0d47f1444421a6738d28252eb5d6084fc1c65f2b41bd8"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -1,98 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
name = "cloud.google.com/go"
version = "0.16.0"
[[constraint]]
branch = "master"
name = "github.com/aryann/difflib"
[[constraint]]
name = "github.com/aws/aws-sdk-go"
version = "1.30.7"
[[constraint]]
name = "github.com/bkaradzic/go-lz4"
version = "1.0.0"
[[constraint]]
name = "github.com/gilbertchen/azure-sdk-for-go"
branch = "master"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/cli"
[[constraint]]
name = "github.com/gilbertchen/go-dropbox"
revision = "2233fa1dd846b3a3e8060b6c1ea12883deb9d288"
[[constraint]]
name = "github.com/gilbertchen/go-ole"
version = "1.2.0"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/goamz"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/gopass"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/keyring"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/xattr"
[[constraint]]
branch = "master"
name = "github.com/minio/blake2b-simd"
[[constraint]]
name = "github.com/pkg/sftp"
version = "1.10.1"
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[[constraint]]
name = "golang.org/x/oauth2"
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
[[constraint]]
name = "google.golang.org/api"
version = "0.21.0"
[[constraint]]
name = "google.golang.org/grpc"
version = "1.28.0"

View File

@@ -368,9 +368,8 @@ func configRepository(context *cli.Context, init bool) {
"The storage '%s' has already been initialized", preference.StorageURL)
if existingConfig.CompressionLevel >= -1 && existingConfig.CompressionLevel <= 9 {
duplicacy.LOG_INFO("STORAGE_FORMAT", "This storage is configured to use the pre-1.2.0 format")
} else if existingConfig.CompressionLevel != 100 {
duplicacy.LOG_ERROR("STORAGE_COMPRESSION", "This storage is configured with an invalid compression level %d", existingConfig.CompressionLevel)
return
} else if existingConfig.CompressionLevel != duplicacy.DEFAULT_COMPRESSION_LEVEL {
duplicacy.LOG_INFO("STORAGE_COMPRESSION", "Compression level: %d", existingConfig.CompressionLevel)
}
// Don't print config in the background mode
@@ -378,8 +377,6 @@ func configRepository(context *cli.Context, init bool) {
existingConfig.Print()
}
} else {
compressionLevel := 100
averageChunkSize := duplicacy.AtoSize(context.String("chunk-size"))
if averageChunkSize == 0 {
fmt.Fprintf(context.App.Writer, "Invalid average chunk size: %s.\n\n", context.String("chunk-size"))
@@ -487,6 +484,18 @@ func configRepository(context *cli.Context, init bool) {
}
}
compressionLevel := 100
zstdLevel := context.String("zstd-level")
if zstdLevel != "" {
if level, found := duplicacy.ZSTD_COMPRESSION_LEVELS[zstdLevel]; found {
compressionLevel = level
} else {
duplicacy.LOG_ERROR("STORAGE_COMPRESSION", "Invalid zstd compression level: %s", zstdLevel)
}
} else if context.Bool("zstd") {
compressionLevel = duplicacy.ZSTD_COMPRESSION_LEVEL_DEFAULT
}
duplicacy.ConfigStorage(storage, iterations, compressionLevel, averageChunkSize, maximumChunkSize,
minimumChunkSize, storagePassword, otherConfig, bitCopy, context.String("key"), dataShards, parityShards)
}
@@ -786,6 +795,17 @@ func backupRepository(context *cli.Context) {
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SetDryRun(dryRun)
zstdLevel := context.String("zstd-level")
if zstdLevel != "" {
if level, found := duplicacy.ZSTD_COMPRESSION_LEVELS[zstdLevel]; found {
backupManager.SetCompressionLevel(level)
} else {
duplicacy.LOG_ERROR("STORAGE_COMPRESSION", "Invalid zstd compression level: %s", zstdLevel)
}
} else if context.Bool("zstd") {
backupManager.SetCompressionLevel(duplicacy.ZSTD_COMPRESSION_LEVEL_DEFAULT)
}
metadataChunkSize := context.Int("metadata-chunk-size")
maximumInMemoryEntries := context.Int("max-in-memory-entries")
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS, vssTimeout, enumOnly, metadataChunkSize, maximumInMemoryEntries)
@@ -930,6 +950,7 @@ func listSnapshots(context *cli.Context) {
}
func checkSnapshots(context *cli.Context) {
setGlobalOptions(context)
defer duplicacy.CatchLogException()
@@ -981,10 +1002,11 @@ func checkSnapshots(context *cli.Context) {
checkChunks := context.Bool("chunks")
searchFossils := context.Bool("fossils")
resurrect := context.Bool("resurrect")
rewrite := context.Bool("rewrite")
persist := context.Bool("persist")
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, checkChunks, searchFossils, resurrect, threads, persist)
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, checkChunks, searchFossils, resurrect, rewrite, threads, persist)
runScript(context, preference.Name, "post")
}
@@ -1427,6 +1449,15 @@ func main() {
Usage: "the minimum size of chunks (defaults to chunk-size/4)",
Argument: "<size>",
},
cli.StringFlag{
Name: "zstd-level",
Usage: "set zstd compression level (fastest, default, better, or best)",
Argument: "<level>",
},
cli.BoolFlag{
Name: "zstd",
Usage: "short for -zstd-level default",
},
cli.IntFlag{
Name: "iterations",
Usage: "the number of iterations used in storage key derivation (default is 16384)",
@@ -1494,6 +1525,15 @@ func main() {
Name: "dry-run",
Usage: "dry run for testing, don't backup anything. Use with -stats and -d",
},
cli.StringFlag{
Name: "zstd-level",
Usage: "set zstd compression level (fastest, default, better, or best)",
Argument: "<level>",
},
cli.BoolFlag{
Name: "zstd",
Usage: "short for -zstd-level default",
},
cli.BoolFlag{
Name: "vss",
Usage: "enable the Volume Shadow Copy service (Windows and macOS using APFS only)",
@@ -1676,6 +1716,10 @@ func main() {
Name: "resurrect",
Usage: "turn referenced fossils back into chunks",
},
cli.BoolFlag{
Name: "rewrite",
Usage: "rewrite chunks with recoverable corruption",
},
cli.BoolFlag{
Name: "files",
Usage: "verify the integrity of every file",
@@ -1933,6 +1977,15 @@ func main() {
Usage: "the minimum size of chunks (default is chunk-size/4)",
Argument: "<size>",
},
cli.StringFlag{
Name: "zstd-level",
Usage: "set zstd compression level (fastest, default, better, or best)",
Argument: "<level>",
},
cli.BoolFlag{
Name: "zstd",
Usage: "short for -zstd-level default",
},
cli.IntFlag{
Name: "iterations",
Usage: "the number of iterations used in storage key derivation (default is 16384)",
@@ -2210,7 +2263,7 @@ func main() {
app.Name = "duplicacy"
app.HelpName = "duplicacy"
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
app.Version = "2.7.2" + " (" + GitCommit + ")"
app.Version = "3.2.4" + " (" + GitCommit + ")"
// Exit with code 2 if an invalid command is provided
app.CommandNotFound = func(context *cli.Context, command string) {

78
go.mod Normal file
View File

@@ -0,0 +1,78 @@
module github.com/gilbertchen/duplicacy
go 1.19
require (
cloud.google.com/go v0.38.0
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a
github.com/aws/aws-sdk-go v1.30.7
github.com/bkaradzic/go-lz4 v1.0.0
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible
github.com/gilbertchen/cli v1.2.1-0.20160223210219-1de0a1836ce9
github.com/gilbertchen/go-dropbox v0.0.0-20230321030224-087ef8db1916
github.com/gilbertchen/go-ole v1.2.0
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2
github.com/gilbertchen/gopass v0.0.0-20170109162249-bf9dde6d0d2c
github.com/gilbertchen/highwayhash v0.0.0-20221109044721-eeab1f4799d8
github.com/gilbertchen/keyring v0.0.0-20221004152639-1661cbebc508
github.com/gilbertchen/xattr v0.0.0-20160926155429-68e7a6806b01
github.com/hirochachacha/go-smb2 v1.1.0
github.com/klauspost/compress v1.16.3
github.com/klauspost/reedsolomon v1.9.9
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
github.com/minio/highwayhash v1.0.2
github.com/ncw/swift/v2 v2.0.1
github.com/pkg/sftp v1.11.0
github.com/pkg/xattr v0.4.1
github.com/vmihailenco/msgpack v4.0.4+incompatible
golang.org/x/crypto v0.12.0
golang.org/x/net v0.10.0
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
google.golang.org/api v0.21.0
storj.io/uplink v1.12.1
)
require (
github.com/Azure/go-autorest v10.15.5+incompatible // indirect
github.com/calebcase/tmpfile v1.0.3 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
github.com/dnaeon/go-vcr v1.2.0 // indirect
github.com/flynn/noise v1.0.0 // indirect
github.com/geoffgarside/ber v1.1.0 // indirect
github.com/goamz/goamz v0.0.0-20180131231218-8b901b531db8 // indirect
github.com/godbus/dbus v4.1.0+incompatible // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/golang/protobuf v1.5.0 // indirect
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/jmespath/go-jmespath v0.3.0 // indirect
github.com/jtolio/eventkit v0.0.0-20221004135224-074cf276595b // indirect
github.com/jtolio/noiseconn v0.0.0-20230111204749-d7ec1a08b0b8 // indirect
github.com/klauspost/cpuid v1.3.1 // indirect
github.com/klauspost/cpuid/v2 v2.0.12 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/marstr/guid v1.1.0 // indirect
github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/satori/go.uuid v1.2.0 // indirect
github.com/segmentio/go-env v1.1.0 // indirect
github.com/spacemonkeygo/monkit/v3 v3.0.22 // indirect
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec // indirect
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 // indirect
github.com/zeebo/blake3 v0.2.3 // indirect
github.com/zeebo/errs v1.3.0 // indirect
go.opencensus.io v0.22.3 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/term v0.11.0 // indirect
golang.org/x/text v0.12.0 // indirect
golang.org/x/tools v0.9.1 // indirect
google.golang.org/appengine v1.6.5 // indirect
google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d // indirect
google.golang.org/grpc v1.28.1 // indirect
google.golang.org/protobuf v1.28.1 // indirect
storj.io/common v0.0.0-20230920095429-0ce0a575e6f8 // indirect
storj.io/drpc v0.0.33 // indirect
storj.io/picobuf v0.0.2-0.20230906122608-c4ba17033c6c // indirect
)

302
go.sum Normal file
View File

@@ -0,0 +1,302 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
github.com/Azure/go-autorest v10.15.5+incompatible h1:vdxx6wM1rVkKt/3niByPVjguoLWkWImOcJNvEykgBzY=
github.com/Azure/go-autorest v10.15.5+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a h1:pv34s756C4pEXnjgPfGYgdhg/ZdajGhyOvzx8k+23nw=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
github.com/aws/aws-sdk-go v1.30.7 h1:IaXfqtioP6p9SFAnNfsqdNczbR5UNbYqvcZUSsCAdTY=
github.com/aws/aws-sdk-go v1.30.7/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk=
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
github.com/calebcase/tmpfile v1.0.3 h1:BZrOWZ79gJqQ3XbAQlihYZf/YCV0H4KPIdM5K5oMpJo=
github.com/calebcase/tmpfile v1.0.3/go.mod h1:UAUc01aHeC+pudPagY/lWvt2qS9ZO5Zzof6/tIUzqeI=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/dsnet/try v0.0.3 h1:ptR59SsrcFUYbT/FhAbKTV6iLkeD6O18qfIWRml2fqI=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/geoffgarside/ber v1.1.0 h1:qTmFG4jJbwiSzSXoNJeHcOprVzZ8Ulde2Rrrifu5U9w=
github.com/geoffgarside/ber v1.1.0/go.mod h1:jVPKeCbj6MvQZhwLYsGwaGI52oUorHoHKNecGT85ZCc=
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible h1:2fZxTUw5D9uGWnYTsU/obVavn+1qTF+TsVok3U8uN2Q=
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible/go.mod h1:qsVRCpBUm2l0eMUeI9wZ47yzra2+lv2YkGhMZpzBVUc=
github.com/gilbertchen/cli v1.2.1-0.20160223210219-1de0a1836ce9 h1:uMgtTp4sRJ7kMQMF3xEKeFntf3XatwkLNL/byj8v97g=
github.com/gilbertchen/cli v1.2.1-0.20160223210219-1de0a1836ce9/go.mod h1:WOnN3JdZiZwUaYtLH2DRxe5PpD43wuOIvc/Wem/39M0=
github.com/gilbertchen/go-dropbox v0.0.0-20230321030224-087ef8db1916 h1:7VpJiGwW51MB7yJ5e27Ar/ej8Yu7WuU2SEo409qPoNs=
github.com/gilbertchen/go-dropbox v0.0.0-20230321030224-087ef8db1916/go.mod h1:85+2CRHC/klHy4vEM+TYtbhDo2wMjPa4JNdVzUHsDIk=
github.com/gilbertchen/go-ole v1.2.0 h1:ay65uwxo6w8UVOxN0+fuCqUXGaXxbmkGs5m4uY6e1Zw=
github.com/gilbertchen/go-ole v1.2.0/go.mod h1:NNiozp7QxhyGmHxxNdFKIcVaINvJFTAjBJ2gYzh8fsg=
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2 h1:VDPwi3huqeJBtymgLOvPAP4S2gbSSK/UrWVwRbRAmnw=
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2/go.mod h1:AoxJeh8meXUrSWBLiq9BJvYMd9RAAGgEUU0gSkNedRY=
github.com/gilbertchen/gopass v0.0.0-20170109162249-bf9dde6d0d2c h1:0SR0aXvil/eQReU0olxp/j04B+Y/47fjDMotIxaAgKo=
github.com/gilbertchen/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:HDsXH7AAfDsfYYX0te4zsNbnwVvZ2RtLEOCjN4y84jw=
github.com/gilbertchen/highwayhash v0.0.0-20221109044721-eeab1f4799d8 h1:ijgl4Y+OKCIFiCPk/Rf9tb6PrarVqitu5TynpyCmRK0=
github.com/gilbertchen/highwayhash v0.0.0-20221109044721-eeab1f4799d8/go.mod h1:0lQcVva56+L1PuUFXLOsJ6arJQaU0baIH8q+IegeBhg=
github.com/gilbertchen/keyring v0.0.0-20221004152639-1661cbebc508 h1:SqTyk5KkNXp7zTdTttIZSDcTrL5uau4K/2OpKvgBZVI=
github.com/gilbertchen/keyring v0.0.0-20221004152639-1661cbebc508/go.mod h1:w/pisxUZezf2XzU9Ewjphcf6q1mZtOzKPHhJiuc8cag=
github.com/gilbertchen/xattr v0.0.0-20160926155429-68e7a6806b01 h1:LqwS9qL6SrDkp0g0iwUkETrDdtB9gTKaIbSn9imUq5o=
github.com/gilbertchen/xattr v0.0.0-20160926155429-68e7a6806b01/go.mod h1:TMlibuxKfkdtHyltooAw7+DHqRpaXs9nxaffk00Sh1Q=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/goamz/goamz v0.0.0-20180131231218-8b901b531db8 h1:G1U0vew/vA/1/hBmf1XNeyIzJJbPFVv+kb+HPl6rj6c=
github.com/goamz/goamz v0.0.0-20180131231218-8b901b531db8/go.mod h1:/Ya1YZsqLQp17bDgHdyE9/XBR1uIH1HKasTvLxcoM/A=
github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4=
github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.5.0 h1:LUVKkCeviFUMKqHa4tXIIij/lbhnMbP7Fn5wKdKkRh4=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20211108044417-e9b028704de0 h1:rsq1yB2xiFLDYYaYdlGBsSkwVzsCo500wMhxvW5A/bk=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hirochachacha/go-smb2 v1.1.0 h1:b6hs9qKIql9eVXAiN0M2wSFY5xnhbHAQoCwRKbaRTZI=
github.com/hirochachacha/go-smb2 v1.1.0/go.mod h1:8F1A4d5EZzrGu5R7PU163UcMRDJQl4FtcxjBfsY8TZE=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolio/eventkit v0.0.0-20221004135224-074cf276595b h1:tO4MX3k5bvV0Sjv5jYrxStMTJxf1m/TW24XRyHji4aU=
github.com/jtolio/eventkit v0.0.0-20221004135224-074cf276595b/go.mod h1:q7yMR8BavTz/gBNtIT/uF487LMgcuEpNGKISLAjNQes=
github.com/jtolio/noiseconn v0.0.0-20230111204749-d7ec1a08b0b8 h1:+A1uT26XjTsxiUUZjAAuveILWWy+Sy2TPX8OIgGvPQE=
github.com/jtolio/noiseconn v0.0.0-20230111204749-d7ec1a08b0b8/go.mod h1:f0ijQHcvHYAuxX6JA/JUr/Z0FVn12D9REaT/HAWVgP4=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.16.3 h1:XuJt9zzcnaz6a16/OU53ZjWp/v7/42WcR5t2a0PcNQY=
github.com/klauspost/compress v1.16.3/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid v1.2.4/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
github.com/klauspost/cpuid/v2 v2.0.12 h1:p9dKCg8i4gmOxtv35DvrYoWqYzQrvEVdjQ762Y0OqZE=
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
github.com/klauspost/reedsolomon v1.9.9 h1:qCL7LZlv17xMixl55nq2/Oa1Y86nfO8EqDfv2GHND54=
github.com/klauspost/reedsolomon v1.9.9/go.mod h1:O7yFFHiQwDR6b2t63KPUpccPtNdp5ADgh1gg4fd12wo=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104 h1:ULR/QWMgcgRiZLUjSSJMU+fW+RDMstRdmnDWj9Q+AsA=
github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104/go.mod h1:wqKykBG2QzQDJEzvRkcS8x6MiSJkF52hXZsXcjaB3ls=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/ncw/swift/v2 v2.0.1 h1:q1IN8hNViXEv8Zvg3Xdis4a3c4IlIGezkYz09zQL5J0=
github.com/ncw/swift/v2 v2.0.1/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg=
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.11.0 h1:4Zv0OGbpkg4yNuUtH0s8rvoYxRCNyT29NVUo6pgPmxI=
github.com/pkg/sftp v1.11.0/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/xattr v0.4.1 h1:dhclzL6EqOXNaPDWqoeb9tIxATfBSmjqL0b4DpSjwRw=
github.com/pkg/xattr v0.4.1/go.mod h1:W2cGD0TBEus7MkUgv0tNZ9JutLtVO3cXu+IBRuHqnFs=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI=
github.com/quic-go/quic-go v0.38.0 h1:T45lASr5q/TrVwt+jrVccmqHhPL2XuSyoCLVCpfOSLc=
github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/segmentio/go-env v1.1.0 h1:AGJ7OnCx9M5NWpkYPGYELS6III/pFSnAs1GvKWStiEo=
github.com/segmentio/go-env v1.1.0/go.mod h1:pEKO2ieHe8zF098OMaAHw21SajMuONlnI/vJNB3pB7I=
github.com/spacemonkeygo/monkit/v3 v3.0.22 h1:4/g8IVItBDKLdVnqrdHZrCVPpIrwDBzl1jrV0IHQHDU=
github.com/spacemonkeygo/monkit/v3 v3.0.22/go.mod h1:XkZYGzknZwkD0AKUnZaSXhRiVTLCkq7CWVa3IsE72gA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec h1:DGmKwyZwEB8dI7tbLt/I/gQuP559o/0FrAkHKlQM/Ks=
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw=
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 h1:zMsHhfK9+Wdl1F7sIKLyx3wrOFofpb3rWFbA4HgcK5k=
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw7ElaGSLOZUSwBm/GgVwMd30jWxBDdAyMOeTuc=
github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI=
github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
github.com/zeebo/assert v1.3.1 h1:vukIABvugfNMZMQO1ABsyQDJDTVQbn+LWSMy1ol1h6A=
github.com/zeebo/assert v1.3.1/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg=
github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ=
github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs=
github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
golang.org/x/arch v0.0.0-20190909030613-46d78d1859ac/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db h1:D/cFflL63o2KSLJIwjlcIt8PR064j/xsmdEJL/YvY/o=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181021155630-eda9bb28ed51/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200425043458-8463f397d07c/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.9.1 h1:8WMNJAz3zrtPmnYC7ISf5dEn3MT0gY7jBJfw27yrrLo=
golang.org/x/tools v0.9.1/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.21.0 h1:zS+Q/CJJnVlXpXQVIz+lH0ZT2lBuT2ac7XD8Y/3w6hY=
google.golang.org/api v0.21.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d h1:I7Vuu5Ejagca+VcgfBINHke3xwjCTYnIG4Q57fv0wYY=
google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.1 h1:C1QC6KzgSiLyBabDi87BbjaGreoRgGUF5nOyvfrAZ1k=
google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
storj.io/common v0.0.0-20230920095429-0ce0a575e6f8 h1:i+bWPhVnNL6z/TLW3vDZytB6/0bsvJM0a1GhLCxrlxQ=
storj.io/common v0.0.0-20230920095429-0ce0a575e6f8/go.mod h1:ZmeGPzRb2sm705Nwt/WwuH3e6mliShfvvoUNy1bb9v4=
storj.io/drpc v0.0.33 h1:yCGZ26r66ZdMP0IcTYsj7WDAUIIjzXk6DJhbhvt9FHI=
storj.io/drpc v0.0.33/go.mod h1:vR804UNzhBa49NOJ6HeLjd2H3MakC1j5Gv8bsOQT6N4=
storj.io/picobuf v0.0.2-0.20230906122608-c4ba17033c6c h1:or/DtG5uaZpzimL61ahlgAA+MTYn/U3txz4fe+XBFUg=
storj.io/picobuf v0.0.2-0.20230906122608-c4ba17033c6c/go.mod h1:JCuc3C0gzCJHQ4J6SOx/Yjg+QTpX0D+Fvs5H46FETCk=
storj.io/uplink v1.12.1 h1:bDc2dI6Q7EXcvPJLZuH9jIOTIf2oKxvW3xKEA+Y5EI0=
storj.io/uplink v1.12.1/go.mod h1:1+czctHG25pMzcUp4Mds6QnoJ7LvbgYA5d1qlpFFexg=

View File

@@ -1,153 +0,0 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"testing"
crypto_rand "crypto/rand"
"math/rand"
)
func TestACDClient(t *testing.T) {
acdClient, err := NewACDClient("acd-token.json")
if err != nil {
t.Errorf("Failed to create the ACD client: %v", err)
return
}
acdClient.TestMode = true
rootID, _, _, err := acdClient.ListByName("", "")
if err != nil {
t.Errorf("Failed to get the root node: %v", err)
return
}
if rootID == "" {
t.Errorf("No root node")
return
}
testID, _, _, err := acdClient.ListByName(rootID, "test")
if err != nil {
t.Errorf("Failed to list the test directory: %v", err)
return
}
if testID == "" {
testID, err = acdClient.CreateDirectory(rootID, "test")
if err != nil {
t.Errorf("Failed to create the test directory: %v", err)
return
}
}
test1ID, _, _, err := acdClient.ListByName(testID, "test1")
if err != nil {
t.Errorf("Failed to list the test1 directory: %v", err)
return
}
if test1ID == "" {
test1ID, err = acdClient.CreateDirectory(testID, "test1")
if err != nil {
t.Errorf("Failed to create the test1 directory: %v", err)
return
}
}
test2ID, _, _, err := acdClient.ListByName(testID, "test2")
if err != nil {
t.Errorf("Failed to list the test2 directory: %v", err)
return
}
if test2ID == "" {
test2ID, err = acdClient.CreateDirectory(testID, "test2")
if err != nil {
t.Errorf("Failed to create the test2 directory: %v", err)
return
}
}
fmt.Printf("test1: %s, test2: %s\n", test1ID, test2ID)
numberOfFiles := 20
maxFileSize := 64 * 1024
for i := 0; i < numberOfFiles; i++ {
content := make([]byte, rand.Int()%maxFileSize+1)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
hasher := sha256.New()
hasher.Write(content)
filename := hex.EncodeToString(hasher.Sum(nil))
fmt.Printf("file: %s\n", filename)
_, err = acdClient.UploadFile(test1ID, filename, content, 100)
if err != nil {
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
t.Errorf("Failed to upload the file %s: %v", filename, err)
return
}
}
}
entries, err := acdClient.ListEntries(test1ID, true, false)
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
for _, entry := range entries {
err = acdClient.MoveFile(entry.ID, test1ID, test2ID)
if err != nil {
t.Errorf("Failed to move %s: %v", entry.Name, err)
return
}
}
entries, err = acdClient.ListEntries(test2ID, true, false)
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
for _, entry := range entries {
readCloser, _, err := acdClient.DownloadFile(entry.ID)
if err != nil {
t.Errorf("Error downloading file %s: %v", entry.Name, err)
return
}
hasher := sha256.New()
io.Copy(hasher, readCloser)
hash := hex.EncodeToString(hasher.Sum(nil))
if hash != entry.Name {
t.Errorf("File %s, hash %s", entry.Name, hash)
}
readCloser.Close()
}
for _, entry := range entries {
err = acdClient.DeleteFile(entry.ID)
if err != nil {
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
return
}
}
}

View File

@@ -329,7 +329,7 @@ func (client *B2Client) AuthorizeAccount(threadIndex int) (err error, allowed bo
if client.DownloadURL == "" {
client.DownloadURL = output.DownloadURL
}
LOG_INFO("BACKBLAZE_URL", "download URL is: %s", client.DownloadURL)
LOG_INFO("BACKBLAZE_URL", "Download URL is: %s", client.DownloadURL)
client.IsAuthorized = true
client.LastAuthorizationTime = time.Now().Unix()
@@ -584,8 +584,26 @@ func (client *B2Client) HideFile(threadIndex int, fileName string) (fileID strin
func (client *B2Client) DownloadFile(threadIndex int, filePath string) (io.ReadCloser, int64, error) {
url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + filePath)
if !strings.HasSuffix(filePath, ".fsl") {
url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + filePath)
readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0)
return readCloser, len, err
}
// We're trying to download a fossil file. We need to find the file ID of the last 'upload' of the file.
filePath = strings.TrimSuffix(filePath, ".fsl")
entries, err := client.ListFileNames(threadIndex, filePath, true, true)
fileId := ""
for _, entry := range entries {
if entry.FileName == filePath && entry.Action == "upload" && entry.Size > 0 {
fileId = entry.FileID
break
}
}
// Proceed with the b2_download_file_by_id call
url := client.getAPIURL() + "/b2api/v1/b2_download_file_by_id?fileId=" + fileId
readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0)
return readCloser, len, err
}

View File

@@ -1,133 +0,0 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"testing"
crypto_rand "crypto/rand"
"io"
"io/ioutil"
"math/rand"
)
func createB2ClientForTest(t *testing.T) (*B2Client, string) {
config, err := ioutil.ReadFile("test_storage.conf")
if err != nil {
t.Errorf("Failed to read config file: %v", err)
return nil, ""
}
storages := make(map[string]map[string]string)
err = json.Unmarshal(config, &storages)
if err != nil {
t.Errorf("Failed to parse config file: %v", err)
return nil, ""
}
b2, found := storages["b2"]
if !found {
t.Errorf("Failed to find b2 config")
return nil, ""
}
return NewB2Client(b2["account"], b2["key"], "", b2["directory"], 1), b2["bucket"]
}
func TestB2Client(t *testing.T) {
b2Client, bucket := createB2ClientForTest(t)
if b2Client == nil {
return
}
b2Client.TestMode = true
err, _ := b2Client.AuthorizeAccount(0)
if err != nil {
t.Errorf("Failed to authorize the b2 account: %v", err)
return
}
err = b2Client.FindBucket(bucket)
if err != nil {
t.Errorf("Failed to find bucket '%s': %v", bucket, err)
return
}
testDirectory := "b2client_test/"
files, err := b2Client.ListFileNames(0, testDirectory, false, false)
if err != nil {
t.Errorf("Failed to list files: %v", err)
return
}
for _, file := range files {
err = b2Client.DeleteFile(0, file.FileName, file.FileID)
if err != nil {
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
}
}
maxSize := 10000
for i := 0; i < 20; i++ {
size := rand.Int()%maxSize + 1
content := make([]byte, size)
_, err := crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
hash := sha256.Sum256(content)
name := hex.EncodeToString(hash[:])
err = b2Client.UploadFile(0, testDirectory+name, content, 100)
if err != nil {
t.Errorf("Error uploading file '%s': %v", name, err)
return
}
}
files, err = b2Client.ListFileNames(0, testDirectory, false, false)
if err != nil {
t.Errorf("Failed to list files: %v", err)
return
}
for _, file := range files {
readCloser, _, err := b2Client.DownloadFile(0, file.FileName)
if err != nil {
t.Errorf("Error downloading file '%s': %v", file.FileName, err)
return
}
defer readCloser.Close()
hasher := sha256.New()
_, err = io.Copy(hasher, readCloser)
hash := hex.EncodeToString(hasher.Sum(nil))
if testDirectory+hash != file.FileName {
t.Errorf("File %s has hash %s", file.FileName, hash)
}
}
for _, file := range files {
err = b2Client.DeleteFile(0, file.FileName, file.FileID)
if err != nil {
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
}
}
}

View File

@@ -47,6 +47,10 @@ func (manager *BackupManager) SetDryRun(dryRun bool) {
manager.config.dryRun = dryRun
}
func (manager *BackupManager) SetCompressionLevel(level int) {
manager.config.CompressionLevel = level
}
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
// master key which can be nil if encryption is not enabled.
@@ -138,6 +142,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
LOG_DEBUG("BACKUP_PARAMETERS", "top: %s, quick: %t, tag: %s", top, quickMode, tag)
manager.config.PrintCompressionLevel()
if manager.config.DataShards != 0 && manager.config.ParityShards != 0 {
LOG_INFO("BACKUP_ERASURECODING", "Erasure coding is enabled with %d data shards and %d parity shards",
manager.config.DataShards, manager.config.ParityShards)
@@ -223,7 +229,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
localListingChannel := make(chan *Entry)
remoteListingChannel := make(chan *Entry)
chunkOperator := CreateChunkOperator(manager.config, manager.storage, manager.snapshotCache, showStatistics, threads, false)
chunkOperator := CreateChunkOperator(manager.config, manager.storage, manager.snapshotCache, showStatistics, false, threads, false)
var skippedDirectories []string
var skippedFiles []string
@@ -301,26 +307,27 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
if compareResult == 0 {
// No need to check if it is in hash mode -- in that case remote listing is nil
if localEntry.IsSameAs(remoteEntry) && localEntry.IsFile() {
if localEntry.Size > 0 {
localEntry.Hash = remoteEntry.Hash
localEntry.StartOffset = remoteEntry.StartOffset
localEntry.EndOffset = remoteEntry.EndOffset
delta := remoteEntry.StartChunk - len(localEntryList.PreservedChunkHashes)
if lastPreservedChunk != remoteEntry.StartChunk {
lastPreservedChunk = remoteEntry.StartChunk
localEntryList.AddPreservedChunk(remoteSnapshot.ChunkHashes[lastPreservedChunk], remoteSnapshot.ChunkLengths[lastPreservedChunk])
} else {
delta++
}
localEntry.Hash = remoteEntry.Hash
localEntry.StartOffset = remoteEntry.StartOffset
localEntry.EndOffset = remoteEntry.EndOffset
delta := remoteEntry.StartChunk - len(localEntryList.PreservedChunkHashes)
if lastPreservedChunk != remoteEntry.StartChunk {
lastPreservedChunk = remoteEntry.StartChunk
localEntryList.AddPreservedChunk(remoteSnapshot.ChunkHashes[lastPreservedChunk], remoteSnapshot.ChunkLengths[lastPreservedChunk])
} else {
delta++
for i := remoteEntry.StartChunk + 1; i <= remoteEntry.EndChunk; i++ {
localEntryList.AddPreservedChunk(remoteSnapshot.ChunkHashes[i], remoteSnapshot.ChunkLengths[i])
lastPreservedChunk = i
}
localEntry.StartChunk = remoteEntry.StartChunk - delta
localEntry.EndChunk = remoteEntry.EndChunk - delta
preservedFileSize += localEntry.Size
}
for i := remoteEntry.StartChunk + 1; i <= remoteEntry.EndChunk; i++ {
localEntryList.AddPreservedChunk(remoteSnapshot.ChunkHashes[i], remoteSnapshot.ChunkLengths[i])
lastPreservedChunk = i
}
localEntry.StartChunk = remoteEntry.StartChunk - delta
localEntry.EndChunk = remoteEntry.EndChunk - delta
preservedFileSize += localEntry.Size
} else {
totalModifiedFileSize += localEntry.Size
if localEntry.Size > 0 {
@@ -374,7 +381,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
}
var once sync.Once
if hashMode {
if hashMode && !manager.config.dryRun {
// In case an error occurs during a hash mode backup, save the incomplete snapshot
RunAtError = func() {
once.Do(func() {
@@ -672,7 +679,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
localListingChannel := make(chan *Entry)
remoteListingChannel := make(chan *Entry)
chunkOperator := CreateChunkOperator(manager.config, manager.storage, manager.snapshotCache, showStatistics, threads, false)
chunkOperator := CreateChunkOperator(manager.config, manager.storage, manager.snapshotCache, showStatistics, false, threads, allowFailures)
LOG_INFO("RESTORE_INDEXING", "Indexing %s", top)
go func() {
@@ -1047,24 +1054,24 @@ func (manager *BackupManager) UploadSnapshot(chunkOperator *ChunkOperator, top s
uploadEntryInfoFunc := func(entry *Entry) error {
delta := entry.StartChunk - len(chunkHashes) + 1
if entry.StartChunk != lastChunk {
chunkHashes = append(chunkHashes, snapshot.ChunkHashes[entry.StartChunk])
chunkLengths = append(chunkLengths, snapshot.ChunkLengths[entry.StartChunk])
delta--
}
if entry.IsFile() && entry.Size > 0 {
delta := entry.StartChunk - len(chunkHashes) + 1
if entry.StartChunk != lastChunk {
chunkHashes = append(chunkHashes, snapshot.ChunkHashes[entry.StartChunk])
chunkLengths = append(chunkLengths, snapshot.ChunkLengths[entry.StartChunk])
delta--
}
for i := entry.StartChunk + 1; i <= entry.EndChunk; i++ {
chunkHashes = append(chunkHashes, snapshot.ChunkHashes[i])
chunkLengths = append(chunkLengths, snapshot.ChunkLengths[i])
}
for i := entry.StartChunk + 1; i <= entry.EndChunk; i++ {
chunkHashes = append(chunkHashes, snapshot.ChunkHashes[i])
chunkLengths = append(chunkLengths, snapshot.ChunkLengths[i])
}
lastChunk = entry.EndChunk
entry.StartChunk -= delta
entry.EndChunk -= delta
lastChunk = entry.EndChunk
entry.StartChunk -= delta
entry.EndChunk -= delta
if entry.IsFile() {
delta := entry.EndChunk - entry.StartChunk
delta = entry.EndChunk - entry.StartChunk
entry.StartChunk -= lastEndChunk
lastEndChunk = entry.EndChunk
entry.EndChunk = delta
@@ -1551,7 +1558,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapshotID string,
revisionsToBeCopied []int, uploadingThreads int, downloadingThreads int) bool {
if !manager.config.IsCompatiableWith(otherManager.config) {
if !manager.config.IsCompatibleWith(otherManager.config) {
LOG_ERROR("CONFIG_INCOMPATIBLE", "Two storages are not compatible for the copy operation")
return false
}
@@ -1714,13 +1721,13 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
LOG_INFO("SNAPSHOT_COPY", "Chunks to copy: %d, to skip: %d, total: %d", len(chunksToCopy), len(chunks) - len(chunksToCopy), len(chunks))
chunkDownloader := CreateChunkOperator(manager.config, manager.storage, nil, false, downloadingThreads, false)
chunkDownloader := CreateChunkOperator(manager.config, manager.storage, nil, false, false, downloadingThreads, false)
var uploadedBytes int64
startTime := time.Now()
copiedChunks := 0
chunkUploader := CreateChunkOperator(otherManager.config, otherManager.storage, nil, false, uploadingThreads, false)
chunkUploader := CreateChunkOperator(otherManager.config, otherManager.storage, nil, false, false, uploadingThreads, false)
chunkUploader.UploadCompletionFunc = func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
action := "Skipped"
if !skipped {

View File

@@ -358,16 +358,17 @@ func TestBackupManager(t *testing.T) {
if numberOfSnapshots != 3 {
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
}
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1, 2, 3} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1", /*revisions*/ []int{1, 2, 3}, /*tag*/ "", /*showStatistics*/ false,
/*showTabular*/ false, /*checkFiles*/ false, /*checkChunks*/ false, /*searchFossils*/ false, /*resurrect*/ false, /*rewiret*/ false, 1, /*allowFailures*/false)
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, []int{1} /*tags*/, nil /*retentions*/, nil,
/*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
if numberOfSnapshots != 2 {
t.Errorf("Expected 2 snapshots but got %d", numberOfSnapshots)
}
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1", /*revisions*/ []int{2, 3}, /*tag*/ "", /*showStatistics*/ false,
/*showTabular*/ false, /*checkFiles*/ false, /*checkChunks*/ false, /*searchFossils*/ false, /*resurrect*/ false, /*rewiret*/ false, 1, /*allowFailures*/ false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false, 1024, 1024)
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, nil /*tags*/, nil /*retentions*/, nil,
/*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
@@ -375,8 +376,8 @@ func TestBackupManager(t *testing.T) {
if numberOfSnapshots != 3 {
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
}
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3, 4} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1", /*revisions*/ []int{2, 3, 4}, /*tag*/ "", /*showStatistics*/ false,
/*showTabular*/ false, /*checkFiles*/ false, /*checkChunks*/ false, /*searchFossils*/ false, /*resurrect*/ false, /*rewiret*/ false, 1, /*allowFailures*/ false)
/*buf := make([]byte, 1<<16)
runtime.Stack(buf, true)
@@ -548,13 +549,13 @@ func TestPersistRestore(t *testing.T) {
// check snapshots
unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, false)
unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1", /*revisions*/ []int{1}, /*tag*/ "",
/*showStatistics*/ true, /*showTabular*/ false, /*checkFiles*/ true, /*checkChunks*/ false,
/*searchFossils*/ false, /*resurrect*/ false, /*rewiret*/ false, 1, /*allowFailures*/ false)
encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, false)
encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1", /*revisions*/ []int{1}, /*tag*/ "",
/*showStatistics*/ true, /*showTabular*/ false, /*checkFiles*/ true, /*checkChunks*/ false,
/*searchFossils*/ false, /*resurrect*/ false, /*rewiret*/ false, 1, /*allowFailures*/ false)
// check functions
checkAllUncorrupted := func(cmpRepository string) {
@@ -640,18 +641,28 @@ func TestPersistRestore(t *testing.T) {
os.Remove(testDir+"/unenc_storage"+"/chunks"+chunkToCorrupt1)
os.Remove(testDir+"/enc_storage"+"/chunks"+chunkToCorrupt2)
}
// This is to make sure that allowFailures is set to true. Note that this is not needed
// in the production code because chunkOperator can be only recreated multiple time in tests.
if unencBackupManager.SnapshotManager.chunkOperator != nil {
unencBackupManager.SnapshotManager.chunkOperator.allowFailures = true
}
if encBackupManager.SnapshotManager.chunkOperator != nil {
encBackupManager.SnapshotManager.chunkOperator.allowFailures = true
}
// check snapshots with --persist (allowFailures == true)
// this would cause a panic and os.Exit from duplicacy_log if allowFailures == false
unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, true)
unencBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1", /*revisions*/ []int{1}, /*tag*/ "",
/*showStatistics*/ true, /*showTabular*/ false, /*checkFiles*/ true, /*checkChunks*/ false,
/*searchFossils*/ false, /*resurrect*/ false, /*rewrite*/ false, 1, /*allowFailures*/ true)
encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1} /*tag*/, "",
/*showStatistics*/ true /*showTabular*/, false /*checkFiles*/, true /*checkChunks*/, false,
/*searchFossils*/ false /*resurrect*/, false, 1 /*allowFailures*/, true)
encBackupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1", /*revisions*/ []int{1}, /*tag*/ "",
/*showStatistics*/ true, /*showTabular*/ false, /*checkFiles*/ true, /*checkChunks*/ false,
/*searchFossils*/ false, /*resurrect*/ false, /*rewrite*/ false, 1, /*allowFailures*/ true)
// test restore corrupted, inPlace = true, corrupted files will have hash failures
os.RemoveAll(testDir+"/repository2")
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")

View File

@@ -24,6 +24,13 @@ import (
"github.com/bkaradzic/go-lz4"
"github.com/minio/highwayhash"
"github.com/klauspost/reedsolomon"
"github.com/klauspost/compress/zstd"
// This is a fork of github.com/minio/highwayhash at 1.0.1 that computes incorrect hash on
// arm64 machines. We need this fork to be able to read the chunks created by Duplicacy
// CLI 3.0.1 which unfortunately relies on incorrect hashes to determine if each shard is valid.
wronghighwayhash "github.com/gilbertchen/highwayhash"
)
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
@@ -261,6 +268,38 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isMetada
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
deflater.Write(chunk.buffer.Bytes())
deflater.Close()
} else if chunk.config.CompressionLevel >= ZSTD_COMPRESSION_LEVEL_FASTEST && chunk.config.CompressionLevel <= ZSTD_COMPRESSION_LEVEL_BEST {
encryptedBuffer.Write([]byte("ZSTD"))
compressionLevel := zstd.SpeedDefault
if chunk.config.CompressionLevel == ZSTD_COMPRESSION_LEVEL_FASTEST {
compressionLevel = zstd.SpeedFastest
} else if chunk.config.CompressionLevel == ZSTD_COMPRESSION_LEVEL_BETTER {
compressionLevel = zstd.SpeedBetterCompression
} else if chunk.config.CompressionLevel == ZSTD_COMPRESSION_LEVEL_BEST {
compressionLevel = zstd.SpeedBestCompression
}
deflater, err := zstd.NewWriter(encryptedBuffer, zstd.WithEncoderLevel(compressionLevel))
if err != nil {
return err
}
// Make sure we have enough space in encryptedBuffer
availableLength := encryptedBuffer.Cap() - len(encryptedBuffer.Bytes())
maximumLength := deflater.MaxEncodedSize(chunk.buffer.Len())
if availableLength < maximumLength {
encryptedBuffer.Grow(maximumLength - availableLength)
}
_, err = deflater.Write(chunk.buffer.Bytes())
if err != nil {
return fmt.Errorf("ZSTD compression error: %v", err)
}
err = deflater.Close()
if err != nil {
return fmt.Errorf("ZSTD compression error: %v", err)
}
} else if chunk.config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
encryptedBuffer.Write([]byte("LZ4 "))
// Make sure we have enough space in encryptedBuffer
@@ -355,7 +394,6 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isMetada
chunk.buffer.Write(header)
return nil
}
// This is to ensure compatibility with Vertical Backup, which still uses HMAC-SHA256 (instead of HMAC-BLAKE2) to
@@ -371,8 +409,9 @@ func init() {
// Decrypt decrypts the encrypted data stored in the chunk buffer. If derivationKey is not nil, the actual
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err error) {
func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err error, rewriteNeeded bool) {
rewriteNeeded = false
var offset int
encryptedBuffer := AllocateChunkBuffer()
@@ -388,13 +427,13 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
// The chunk was encoded with erasure coding
if len(encryptedBuffer.Bytes()) < bannerLength + 14 {
return fmt.Errorf("Erasure coding header truncated (%d bytes)", len(encryptedBuffer.Bytes()))
return fmt.Errorf("Erasure coding header truncated (%d bytes)", len(encryptedBuffer.Bytes())), false
}
// Check the header checksum
header := encryptedBuffer.Bytes()[bannerLength: bannerLength + 14]
if header[12] != header[0] ^ header[2] ^ header[4] ^ header[6] ^ header[8] ^ header[10] ||
header[13] != header[1] ^ header[3] ^ header[5] ^ header[7] ^ header[9] ^ header[11] {
return fmt.Errorf("Erasure coding header corrupted (%x)", header)
return fmt.Errorf("Erasure coding header corrupted (%x)", header), false
}
// Read the parameters
@@ -414,7 +453,7 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
} else if len(encryptedBuffer.Bytes()) > minimumLength {
LOG_WARN("CHUNK_ERASURECODE", "Chunk is truncated (%d out of %d bytes)", len(encryptedBuffer.Bytes()), expectedLength)
} else {
return fmt.Errorf("Not enough chunk data for recovery; chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards)
return fmt.Errorf("Not enough chunk data for recovery; chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards), false
}
// Where the hashes start
@@ -426,6 +465,8 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
recoveryNeeded := false
hashKey := make([]byte, 32)
availableShards := 0
wrongHashDetected := false
for i := 0; i < dataShards + parityShards; i++ {
start := dataOffset + i * shardSize
if start + shardSize > len(encryptedBuffer.Bytes()) {
@@ -435,15 +476,34 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
// Now verify the hash
hasher, err := highwayhash.New(hashKey)
if err != nil {
return err
return err, false
}
_, err = hasher.Write(encryptedBuffer.Bytes()[start: start + shardSize])
if err != nil {
return err
return err, false
}
if bytes.Compare(hasher.Sum(nil), encryptedBuffer.Bytes()[hashOffset + i * 32: hashOffset + (i + 1) * 32]) != 0 {
matched := bytes.Compare(hasher.Sum(nil), encryptedBuffer.Bytes()[hashOffset + i * 32: hashOffset + (i + 1) * 32]) == 0
if !matched && runtime.GOARCH == "arm64" {
hasher, err := wronghighwayhash.New(hashKey)
if err == nil {
_, err = hasher.Write(encryptedBuffer.Bytes()[start: start + shardSize])
if err == nil {
matched = bytes.Compare(hasher.Sum(nil), encryptedBuffer.Bytes()[hashOffset + i * 32: hashOffset + (i + 1) * 32]) == 0
if matched && !wrongHashDetected {
LOG_WARN("CHUNK_ERASURECODE", "Hash for shard %d was calculated with a wrong version of highwayhash", i)
wrongHashDetected = true
rewriteNeeded = true
}
}
}
}
if !matched {
if i < dataShards {
recoveryNeeded = true
rewriteNeeded = true
}
} else {
// The shard is good
@@ -463,7 +523,7 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
encryptedBuffer.Read(encryptedBuffer.Bytes()[:dataOffset])
} else {
if availableShards < dataShards {
return fmt.Errorf("Not enough chunk data for recover; only %d out of %d shards are complete", availableShards, dataShards + parityShards)
return fmt.Errorf("Not enough chunk data for recover; only %d out of %d shards are complete", availableShards, dataShards + parityShards), false
}
// Show the validity of shards using a string of * and -
@@ -479,11 +539,11 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
LOG_WARN("CHUNK_ERASURECODE", "Recovering a %d byte chunk from %d byte shards: %s", chunkSize, shardSize, slots)
encoder, err := reedsolomon.New(dataShards, parityShards)
if err != nil {
return err
return err, false
}
err = encoder.Reconstruct(data)
if err != nil {
return err
return err, false
}
LOG_DEBUG("CHUNK_ERASURECODE", "Chunk data successfully recovered")
buffer := AllocateChunkBuffer()
@@ -516,28 +576,28 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
}
if len(encryptedBuffer.Bytes()) < bannerLength + 12 {
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes())), false
}
if string(encryptedBuffer.Bytes()[:bannerLength-1]) != ENCRYPTION_BANNER[:bannerLength-1] {
return fmt.Errorf("The storage doesn't seem to be encrypted")
return fmt.Errorf("The storage doesn't seem to be encrypted"), false
}
encryptionVersion := encryptedBuffer.Bytes()[bannerLength-1]
if encryptionVersion != 0 && encryptionVersion != ENCRYPTION_VERSION_RSA {
return fmt.Errorf("Unsupported encryption version %d", encryptionVersion)
return fmt.Errorf("Unsupported encryption version %d", encryptionVersion), false
}
if encryptionVersion == ENCRYPTION_VERSION_RSA {
if chunk.config.rsaPrivateKey == nil {
LOG_ERROR("CHUNK_DECRYPT", "An RSA private key is required to decrypt the chunk")
return fmt.Errorf("An RSA private key is required to decrypt the chunk")
return fmt.Errorf("An RSA private key is required to decrypt the chunk"), false
}
encryptedKeyLength := binary.LittleEndian.Uint16(encryptedBuffer.Bytes()[bannerLength:bannerLength+2])
if len(encryptedBuffer.Bytes()) < bannerLength + 14 + int(encryptedKeyLength) {
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes())), false
}
encryptedKey := encryptedBuffer.Bytes()[bannerLength + 2:bannerLength + 2 + int(encryptedKeyLength)]
@@ -545,19 +605,19 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
decryptedKey, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPrivateKey, encryptedKey, nil)
if err != nil {
return err
return err, false
}
key = decryptedKey
}
aesBlock, err := aes.NewCipher(key)
if err != nil {
return err
return err, false
}
gcm, err := cipher.NewGCM(aesBlock)
if err != nil {
return err
return err, false
}
offset = bannerLength + gcm.NonceSize()
@@ -567,7 +627,7 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
encryptedBuffer.Bytes()[offset:], nil)
if err != nil {
return err
return err, false
}
paddingLength := int(decryptedBytes[len(decryptedBytes)-1])
@@ -575,14 +635,14 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
paddingLength = 256
}
if len(decryptedBytes) <= paddingLength {
return fmt.Errorf("Incorrect padding length %d out of %d bytes", paddingLength, len(decryptedBytes))
return fmt.Errorf("Incorrect padding length %d out of %d bytes", paddingLength, len(decryptedBytes)), false
}
for i := 0; i < paddingLength; i++ {
padding := decryptedBytes[len(decryptedBytes)-1-i]
if padding != byte(paddingLength) {
return fmt.Errorf("Incorrect padding of length %d: %x", paddingLength,
decryptedBytes[len(decryptedBytes)-paddingLength:])
decryptedBytes[len(decryptedBytes)-paddingLength:]), false
}
}
@@ -596,18 +656,36 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
chunk.buffer.Reset()
decompressed, err := lz4.Decode(chunk.buffer.Bytes(), encryptedBuffer.Bytes()[4:])
if err != nil {
return err
return err, false
}
chunk.buffer.Write(decompressed)
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
chunk.hasher.Write(decompressed)
chunk.hash = nil
return nil
return nil, rewriteNeeded
}
if len(compressed) > 4 && string(compressed[:4]) == "ZSTD" {
chunk.buffer.Reset()
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
chunk.hash = nil
encryptedBuffer.Read(encryptedBuffer.Bytes()[:4])
inflater, err := zstd.NewReader(encryptedBuffer)
if err != nil {
return err, false
}
defer inflater.Close()
if _, err = io.Copy(chunk, inflater); err != nil {
return err, false
}
return nil, rewriteNeeded
}
inflater, err := zlib.NewReader(encryptedBuffer)
if err != nil {
return err
return err, false
}
defer inflater.Close()
@@ -617,9 +695,9 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
chunk.hash = nil
if _, err = io.Copy(chunk, inflater); err != nil {
return err
return err, false
}
return nil
return nil, rewriteNeeded
}

View File

@@ -43,7 +43,7 @@ func TestErasureCoding(t *testing.T) {
chunk.Reset(false)
chunk.Write(encryptedData)
err = chunk.Decrypt([]byte(""), "")
err, _ = chunk.Decrypt([]byte(""), "")
if err != nil {
t.Errorf("Failed to decrypt the data: %v", err)
return
@@ -110,7 +110,7 @@ func TestChunkBasic(t *testing.T) {
chunk.Reset(false)
chunk.Write(encryptedData)
err = chunk.Decrypt(key, "")
err, _ = chunk.Decrypt(key, "")
if err != nil {
t.Errorf("Failed to decrypt the data: %v", err)
continue

View File

@@ -57,11 +57,14 @@ type ChunkOperator struct {
allowFailures bool // Whether to fail on download error, or continue
NumberOfFailedChunks int64 // The number of chunks that can't be downloaded
rewriteChunks bool // Whether to rewrite corrupted chunks when erasure coding is enabled
UploadCompletionFunc func(chunk *Chunk, chunkIndex int, inCache bool, chunkSize int, uploadSize int)
}
// CreateChunkOperator creates a new ChunkOperator.
func CreateChunkOperator(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int, allowFailures bool) *ChunkOperator {
func CreateChunkOperator(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, rewriteChunks bool, threads int,
allowFailures bool) *ChunkOperator {
operator := &ChunkOperator{
config: config,
@@ -74,8 +77,9 @@ func CreateChunkOperator(config *Config, storage Storage, snapshotCache *FileSto
stopChannel: make(chan bool),
collectionLock: &sync.Mutex{},
startTime: time.Now().Unix(),
allowFailures: allowFailures,
rewriteChunks: rewriteChunks,
}
// Start the operator goroutines
@@ -331,24 +335,34 @@ func (operator *ChunkOperator) DownloadChunk(threadIndex int, task ChunkTask) {
atomic.AddInt64(&operator.NumberOfFailedChunks, 1)
if operator.allowFailures {
chunk.isBroken = true
task.completionFunc(chunk, task.chunkIndex)
}
}
chunkPath := ""
fossilPath := ""
filePath := ""
const MaxDownloadAttempts = 3
for downloadAttempt := 0; ; downloadAttempt++ {
exist := false
var err error
// Find the chunk by ID first.
chunkPath, exist, _, err := operator.storage.FindChunk(threadIndex, chunkID, false)
chunkPath, exist, _, err = operator.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return
}
if !exist {
if exist {
filePath = chunkPath
} else {
// No chunk is found. Have to find it in the fossil pool again.
fossilPath, exist, _, err := operator.storage.FindChunk(threadIndex, chunkID, true)
fossilPath, exist, _, err = operator.storage.FindChunk(threadIndex, chunkID, true)
if err != nil {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
@@ -383,20 +397,11 @@ func (operator *ChunkOperator) DownloadChunk(threadIndex int, task ChunkTask) {
return
}
// We can't download the fossil directly. We have to turn it back into a regular chunk and try
// downloading again.
err = operator.storage.MoveFile(threadIndex, fossilPath, chunkPath)
if err != nil {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Failed to resurrect chunk %s: %v", chunkID, err)
return
}
LOG_WARN("DOWNLOAD_RESURRECT", "Fossil %s has been resurrected", chunkID)
continue
filePath = fossilPath
LOG_WARN("DOWNLOAD_FOSSIL", "Chunk %s is a fossil", chunkID)
}
err = operator.storage.DownloadFile(threadIndex, chunkPath, chunk)
err = operator.storage.DownloadFile(threadIndex, filePath, chunk)
if err != nil {
_, isHubic := operator.storage.(*HubicStorage)
// Retry on EOF or if it is a Hubic backend as it may return 404 even when the chunk exists
@@ -412,7 +417,8 @@ func (operator *ChunkOperator) DownloadChunk(threadIndex int, task ChunkTask) {
}
}
err = chunk.Decrypt(operator.config.ChunkKey, task.chunkHash)
rewriteNeeded := false
err, rewriteNeeded = chunk.Decrypt(operator.config.ChunkKey, task.chunkHash)
if err != nil {
if downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to decrypt the chunk %s: %v; retrying", chunkID, err)
@@ -440,6 +446,38 @@ func (operator *ChunkOperator) DownloadChunk(threadIndex int, task ChunkTask) {
}
}
if rewriteNeeded && operator.rewriteChunks {
if filePath != fossilPath {
fossilPath = filePath + ".fsl"
err := operator.storage.MoveFile(threadIndex, chunkPath, fossilPath)
if err != nil {
LOG_WARN("CHUNK_REWRITE", "Failed to fossilize the chunk %s: %v", task.chunkID, err)
} else {
LOG_TRACE("CHUNK_REWRITE", "The existing chunk %s has been marked as a fossil for rewrite", task.chunkID)
operator.collectionLock.Lock()
operator.fossils = append(operator.fossils, fossilPath)
operator.collectionLock.Unlock()
}
}
newChunk := operator.config.GetChunk()
newChunk.Reset(true)
newChunk.Write(chunk.GetBytes())
// Encrypt the chunk only after we know that it must be uploaded.
err = newChunk.Encrypt(operator.config.ChunkKey, chunk.GetHash(), task.isMetadata)
if err == nil {
// Re-upload the chunk
err = operator.storage.UploadFile(threadIndex, chunkPath, newChunk.GetBytes())
if err != nil {
LOG_WARN("CHUNK_REWRITE", "Failed to re-upload the chunk %s: %v", chunkID, err)
} else {
LOG_INFO("CHUNK_REWRITE", "The chunk %s has been re-uploaded", chunkID)
}
}
operator.config.PutChunk(newChunk)
}
break
}
@@ -489,7 +527,7 @@ func (operator *ChunkOperator) UploadChunk(threadIndex int, task ChunkTask) bool
chunk.VerifyID()
}
if task.isMetadata && operator.storage.IsCacheNeeded() {
if task.isMetadata && operator.snapshotCache != nil && operator.storage.IsCacheNeeded() {
// Save a copy to the local snapshot.
chunkPath, exist, _, err := operator.snapshotCache.FindChunk(threadIndex, chunkID, false)
if err != nil {

View File

@@ -87,7 +87,7 @@ func TestChunkOperator(t *testing.T) {
totalFileSize += chunk.GetLength()
}
chunkOperator := CreateChunkOperator(config, storage, nil, false, *testThreads, false)
chunkOperator := CreateChunkOperator(config, storage, nil, false, false, *testThreads, false)
chunkOperator.UploadCompletionFunc = func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
t.Logf("Chunk %s size %d (%d/%d) uploaded", chunk.GetID(), chunkSize, chunkIndex, len(chunks))
}

View File

@@ -35,6 +35,19 @@ var DEFAULT_KEY = []byte("duplicacy")
// standard zlib levels of -1 to 9.
var DEFAULT_COMPRESSION_LEVEL = 100
// zstd compression levels starting from 200
var ZSTD_COMPRESSION_LEVEL_FASTEST = 200
var ZSTD_COMPRESSION_LEVEL_DEFAULT = 201
var ZSTD_COMPRESSION_LEVEL_BETTER = 202
var ZSTD_COMPRESSION_LEVEL_BEST = 203
var ZSTD_COMPRESSION_LEVELS = map[string]int {
"fastest": ZSTD_COMPRESSION_LEVEL_FASTEST,
"default": ZSTD_COMPRESSION_LEVEL_DEFAULT,
"better": ZSTD_COMPRESSION_LEVEL_BETTER,
"best": ZSTD_COMPRESSION_LEVEL_BEST,
}
// The new banner of the config file (to differentiate from the old format where the salt and iterations are fixed)
var CONFIG_BANNER = "duplicacy\001"
@@ -156,10 +169,9 @@ func (config *Config) UnmarshalJSON(description []byte) (err error) {
return nil
}
func (config *Config) IsCompatiableWith(otherConfig *Config) bool {
func (config *Config) IsCompatibleWith(otherConfig *Config) bool {
return config.CompressionLevel == otherConfig.CompressionLevel &&
config.AverageChunkSize == otherConfig.AverageChunkSize &&
return config.AverageChunkSize == otherConfig.AverageChunkSize &&
config.MaximumChunkSize == otherConfig.MaximumChunkSize &&
config.MinimumChunkSize == otherConfig.MinimumChunkSize &&
bytes.Equal(config.ChunkSeed, otherConfig.ChunkSeed) &&
@@ -202,6 +214,14 @@ func (config *Config) Print() {
}
func (config *Config) PrintCompressionLevel() {
for name, level := range ZSTD_COMPRESSION_LEVELS {
if level == config.CompressionLevel {
LOG_INFO("COMPRESSION_LEVEL", "Zstd compression is enabled (level: %s)", name)
}
}
}
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
isEncrypted bool, copyFrom *Config, bitCopy bool) (config *Config) {
@@ -234,7 +254,6 @@ func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maxi
}
if copyFrom != nil {
config.CompressionLevel = copyFrom.CompressionLevel
config.AverageChunkSize = copyFrom.AverageChunkSize
config.MaximumChunkSize = copyFrom.MaximumChunkSize
@@ -244,6 +263,8 @@ func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maxi
config.HashKey = copyFrom.HashKey
if bitCopy {
config.CompressionLevel = copyFrom.CompressionLevel
config.IDKey = copyFrom.IDKey
config.ChunkKey = copyFrom.ChunkKey
config.FileKey = copyFrom.FileKey
@@ -294,7 +315,10 @@ func (config *Config) PutChunk(chunk *Chunk) {
}
func (config *Config) NewKeyedHasher(key []byte) hash.Hash {
if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
// Early versions of Duplicacy used SHA256 as the hash function for chunk IDs at the time when
// only zlib compression was supported. Later SHA256 was replaced by Blake2b and LZ4 was used
// for compression (with compression level set to 100).
if config.CompressionLevel >= DEFAULT_COMPRESSION_LEVEL {
hasher, err := blake2.New(&blake2.Config{Size: 32, Key: key})
if err != nil {
LOG_ERROR("HASH_KEY", "Invalid hash key: %x", key)
@@ -339,7 +363,7 @@ func (hasher *DummyHasher) BlockSize() int {
func (config *Config) NewFileHasher() hash.Hash {
if SkipFileHash {
return &DummyHasher{}
} else if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
} else if config.CompressionLevel >= DEFAULT_COMPRESSION_LEVEL {
hasher, _ := blake2.New(&blake2.Config{Size: 32})
return hasher
} else {
@@ -436,7 +460,7 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
}
// Decrypt the config file. masterKey == nil means no encryption.
err = configFile.Decrypt(masterKey, "")
err, _ = configFile.Decrypt(masterKey, "")
if err != nil {
return nil, false, fmt.Errorf("Failed to retrieve the config file: %v", err)
}

View File

@@ -21,11 +21,11 @@ type DropboxStorage struct {
}
// CreateDropboxStorage creates a dropbox storage object.
func CreateDropboxStorage(accessToken string, storageDir string, minimumNesting int, threads int) (storage *DropboxStorage, err error) {
func CreateDropboxStorage(refreshToken string, storageDir string, minimumNesting int, threads int) (storage *DropboxStorage, err error) {
var clients []*dropbox.Files
for i := 0; i < threads; i++ {
client := dropbox.NewFiles(dropbox.NewConfig(accessToken))
client := dropbox.NewFiles(dropbox.NewConfig("", refreshToken, "https://duplicacy.com/dropbox_refresh"))
clients = append(clients, client)
}

View File

@@ -550,7 +550,7 @@ func loadIncompleteSnapshot(snapshotID string, cachePath string) *EntryList {
}
}
LOG_INFO("INCOMPLETE_LOAD", "Previous incomlete backup contains %d files and %d chunks",
LOG_INFO("INCOMPLETE_LOAD", "Previous incomplete backup contains %d files and %d chunks",
entryList.NumberOfEntries, len(entryList.PreservedChunkLengths) + len(entryList.UploadedChunkHashes))
return entryList
@@ -559,7 +559,7 @@ func loadIncompleteSnapshot(snapshotID string, cachePath string) *EntryList {
// Delete the two incomplete files.
func deleteIncompleteSnapshot(cachePath string) {
for _, file := range []string{"incomplete_snapshot", "incomplete_chunks"} {
for _, file := range []string{"incomplete_snapshot", "incomplete_chunks", "incomplete_files"} {
filePath := path.Join(cachePath, file)
if _, err := os.Stat(filePath); err == nil {
err = os.Remove(filePath)
@@ -571,4 +571,4 @@ func deleteIncompleteSnapshot(cachePath string) {
}
}
}

View File

@@ -408,22 +408,21 @@ func CreateGCDStorage(tokenFile string, driveID string, storagePath string, thre
if len(driveID) == 0 {
driveID = GCDUserDrive
} else {
driveList, err := drive.NewTeamdrivesService(service).List().Do()
if err != nil {
return nil, fmt.Errorf("Failed to look up the drive id: %v", err)
}
found := false
for _, teamDrive := range driveList.TeamDrives {
if teamDrive.Id == driveID || teamDrive.Name == driveID {
driveID = teamDrive.Id
found = true
break
// In case the driveID is a name, convert it to an id
query := fmt.Sprintf("name='%s'", driveID)
driveList, err := drive.NewDrivesService(service).List().Q(query).Do()
if err == nil {
found := false
for _, drive := range driveList.Drives {
if drive.Name == driveID {
found = true
driveID = drive.Id
break
}
}
if !found {
return nil, fmt.Errorf("%s is not the id or name of a shared drive", driveID)
}
}
if !found {
return nil, fmt.Errorf("%s is not the id or name of a shared drive", driveID)
}
}
@@ -775,7 +774,7 @@ func (storage *GCDStorage) GetFileInfo(threadIndex int, filePath string) (exist
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
// We never download the fossil so there is no need to convert the path
fileID, err := storage.getIDFromPath(threadIndex, filePath, false)
fileID, err := storage.getIDFromPath(threadIndex, storage.convertFilePath(filePath), false)
if err != nil {
return err
}

View File

@@ -1,149 +0,0 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"testing"
crypto_rand "crypto/rand"
"math/rand"
)
func TestHubicClient(t *testing.T) {
hubicClient, err := NewHubicClient("hubic-token.json")
if err != nil {
t.Errorf("Failed to create the Hubic client: %v", err)
return
}
hubicClient.TestMode = true
existingFiles, err := hubicClient.ListEntries("")
for _, file := range existingFiles {
fmt.Printf("name: %s, isDir: %t\n", file.Name, file.Type == "application/directory")
}
testExists, _, _, err := hubicClient.GetFileInfo("test")
if err != nil {
t.Errorf("Failed to list the test directory: %v", err)
return
}
if !testExists {
err = hubicClient.CreateDirectory("test")
if err != nil {
t.Errorf("Failed to create the test directory: %v", err)
return
}
}
test1Exists, _, _, err := hubicClient.GetFileInfo("test/test1")
if err != nil {
t.Errorf("Failed to list the test1 directory: %v", err)
return
}
if !test1Exists {
err = hubicClient.CreateDirectory("test/test1")
if err != nil {
t.Errorf("Failed to create the test1 directory: %v", err)
return
}
}
test2Exists, _, _, err := hubicClient.GetFileInfo("test/test2")
if err != nil {
t.Errorf("Failed to list the test2 directory: %v", err)
return
}
if !test2Exists {
err = hubicClient.CreateDirectory("test/test2")
if err != nil {
t.Errorf("Failed to create the test2 directory: %v", err)
return
}
}
numberOfFiles := 20
maxFileSize := 64 * 1024
for i := 0; i < numberOfFiles; i++ {
content := make([]byte, rand.Int()%maxFileSize+1)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
hasher := sha256.New()
hasher.Write(content)
filename := hex.EncodeToString(hasher.Sum(nil))
fmt.Printf("file: %s\n", filename)
err = hubicClient.UploadFile("test/test1/"+filename, content, 100)
if err != nil {
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
t.Errorf("Failed to upload the file %s: %v", filename, err)
return
}
}
}
entries, err := hubicClient.ListEntries("test/test1")
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
for _, entry := range entries {
exists, isDir, size, err := hubicClient.GetFileInfo("test/test1/" + entry.Name)
fmt.Printf("%s exists: %t, isDir: %t, size: %d, err: %v\n", "test/test1/"+entry.Name, exists, isDir, size, err)
err = hubicClient.MoveFile("test/test1/"+entry.Name, "test/test2/"+entry.Name)
if err != nil {
t.Errorf("Failed to move %s: %v", entry.Name, err)
return
}
}
entries, err = hubicClient.ListEntries("test/test2")
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
for _, entry := range entries {
readCloser, _, err := hubicClient.DownloadFile("test/test2/" + entry.Name)
if err != nil {
t.Errorf("Error downloading file %s: %v", entry.Name, err)
return
}
hasher := sha256.New()
io.Copy(hasher, readCloser)
hash := hex.EncodeToString(hasher.Sum(nil))
if hash != entry.Name {
t.Errorf("File %s, hash %s", entry.Name, hash)
}
readCloser.Close()
}
for _, entry := range entries {
err = hubicClient.DeleteFile("test/test2/" + entry.Name)
if err != nil {
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
return
}
}
}

View File

@@ -5,6 +5,7 @@
package duplicacy
import (
"context"
"bytes"
"encoding/json"
"fmt"
@@ -39,6 +40,7 @@ type OneDriveClient struct {
TokenFile string
Token *oauth2.Token
OAConfig *oauth2.Config
TokenLock *sync.Mutex
IsConnected bool
@@ -49,7 +51,7 @@ type OneDriveClient struct {
APIURL string
}
func NewOneDriveClient(tokenFile string, isBusiness bool) (*OneDriveClient, error) {
func NewOneDriveClient(tokenFile string, isBusiness bool, client_id string, client_secret string, drive_id string) (*OneDriveClient, error) {
description, err := ioutil.ReadFile(tokenFile)
if err != nil {
@@ -65,16 +67,34 @@ func NewOneDriveClient(tokenFile string, isBusiness bool) (*OneDriveClient, erro
HTTPClient: http.DefaultClient,
TokenFile: tokenFile,
Token: token,
OAConfig: nil,
TokenLock: &sync.Mutex{},
IsBusiness: isBusiness,
}
if (client_id != "") {
oneOauthConfig := oauth2.Config{
ClientID: client_id,
ClientSecret: client_secret,
Scopes: []string{"Files.ReadWrite", "offline_access"},
Endpoint: oauth2.Endpoint{
AuthURL: "https://login.microsoftonline.com/common/oauth2/v2.0/authorize",
TokenURL: "https://login.microsoftonline.com/common/oauth2/v2.0/token",
},
}
client.OAConfig = &oneOauthConfig
}
if isBusiness {
client.RefreshTokenURL = "https://duplicacy.com/odb_refresh"
client.APIURL = "https://graph.microsoft.com/v1.0/me"
client.APIURL = "https://graph.microsoft.com/v1.0/me/drive"
if drive_id != "" {
client.APIURL = "https://graph.microsoft.com/v1.0/drives/"+drive_id
}
} else {
client.RefreshTokenURL = "https://duplicacy.com/one_refresh"
client.APIURL = "https://api.onedrive.com/v1.0"
client.APIURL = "https://api.onedrive.com/v1.0/drive"
}
client.RefreshToken(false)
@@ -218,15 +238,25 @@ func (client *OneDriveClient) RefreshToken(force bool) (err error) {
return nil
}
readCloser, _, err := client.call(client.RefreshTokenURL, "POST", client.Token, "")
if err != nil {
return fmt.Errorf("failed to refresh the access token: %v", err)
}
if (client.OAConfig == nil) {
readCloser, _, err := client.call(client.RefreshTokenURL, "POST", client.Token, "")
if err != nil {
return fmt.Errorf("failed to refresh the access token: %v", err)
}
defer readCloser.Close()
defer readCloser.Close()
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
return err
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
return err
}
} else {
ctx := context.Background()
tokenSource := client.OAConfig.TokenSource(ctx, client.Token)
token, err := tokenSource.Token()
if err != nil {
return fmt.Errorf("failed to refresh the access token: %v", err)
}
client.Token = token
}
description, err := json.Marshal(client.Token)
@@ -258,9 +288,9 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
entries := []OneDriveEntry{}
url := client.APIURL + "/drive/root:/" + path + ":/children"
url := client.APIURL + "/root:/" + path + ":/children"
if path == "" {
url = client.APIURL + "/drive/root/children"
url = client.APIURL + "/root/children"
}
if client.TestMode {
url += "?top=8"
@@ -296,7 +326,8 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) {
url := client.APIURL + "/drive/root:/" + path
url := client.APIURL + "/root:/" + path
if path == "" { url = client.APIURL + "/root" }
url += "?select=id,name,size,folder"
readCloser, _, err := client.call(url, "GET", 0, "")
@@ -321,7 +352,7 @@ func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, err
func (client *OneDriveClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
url := client.APIURL + "/drive/items/root:/" + path + ":/content"
url := client.APIURL + "/items/root:/" + path + ":/content"
return client.call(url, "GET", 0, "")
}
@@ -331,7 +362,7 @@ func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit
// Upload file using the simple method; this is only possible for OneDrive Personal or if the file
// is smaller than 4MB for OneDrive Business
if !client.IsBusiness || (client.TestMode && rand.Int() % 2 == 0) {
url := client.APIURL + "/drive/root:/" + path + ":/content"
url := client.APIURL + "/root:/" + path + ":/content"
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
if err != nil {
@@ -365,7 +396,7 @@ func (client *OneDriveClient) CreateUploadSession(path string) (uploadURL string
},
}
readCloser, _, err := client.call(client.APIURL + "/drive/root:/" + path + ":/createUploadSession", "POST", input, "application/json")
readCloser, _, err := client.call(client.APIURL + "/root:/" + path + ":/createUploadSession", "POST", input, "application/json")
if err != nil {
return "", err
}
@@ -409,7 +440,7 @@ func (client *OneDriveClient) UploadFileSession(uploadURL string, content []byte
func (client *OneDriveClient) DeleteFile(path string) error {
url := client.APIURL + "/drive/root:/" + path
url := client.APIURL + "/root:/" + path
readCloser, _, err := client.call(url, "DELETE", 0, "")
if err != nil {
@@ -422,10 +453,10 @@ func (client *OneDriveClient) DeleteFile(path string) error {
func (client *OneDriveClient) MoveFile(path string, parent string) error {
url := client.APIURL + "/drive/root:/" + path
url := client.APIURL + "/root:/" + path
parentReference := make(map[string]string)
parentReference["path"] = "/drive/root:/" + parent
parentReference["path"] = "/root:/" + parent
parameters := make(map[string]interface{})
parameters["parentReference"] = parentReference
@@ -477,7 +508,7 @@ func (client *OneDriveClient) CreateDirectory(path string, name string) error {
return fmt.Errorf("The path '%s' is not a directory", path)
}
url = client.APIURL + "/drive/root:/" + path + ":/children"
url = client.APIURL + "/root:/" + path + ":/children"
}
parameters := make(map[string]interface{})

View File

@@ -1,145 +0,0 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"testing"
crypto_rand "crypto/rand"
"math/rand"
)
func TestOneDriveClient(t *testing.T) {
oneDriveClient, err := NewOneDriveClient("one-token.json", false)
if err != nil {
t.Errorf("Failed to create the OneDrive client: %v", err)
return
}
oneDriveClient.TestMode = true
existingFiles, err := oneDriveClient.ListEntries("")
for _, file := range existingFiles {
fmt.Printf("name: %s, isDir: %t\n", file.Name, len(file.Folder) != 0)
}
testID, _, _, err := oneDriveClient.GetFileInfo("test")
if err != nil {
t.Errorf("Failed to list the test directory: %v", err)
return
}
if testID == "" {
err = oneDriveClient.CreateDirectory("", "test")
if err != nil {
t.Errorf("Failed to create the test directory: %v", err)
return
}
}
test1ID, _, _, err := oneDriveClient.GetFileInfo("test/test1")
if err != nil {
t.Errorf("Failed to list the test1 directory: %v", err)
return
}
if test1ID == "" {
err = oneDriveClient.CreateDirectory("test", "test1")
if err != nil {
t.Errorf("Failed to create the test1 directory: %v", err)
return
}
}
test2ID, _, _, err := oneDriveClient.GetFileInfo("test/test2")
if err != nil {
t.Errorf("Failed to list the test2 directory: %v", err)
return
}
if test2ID == "" {
err = oneDriveClient.CreateDirectory("test", "test2")
if err != nil {
t.Errorf("Failed to create the test2 directory: %v", err)
return
}
}
numberOfFiles := 20
maxFileSize := 64 * 1024
for i := 0; i < numberOfFiles; i++ {
content := make([]byte, rand.Int()%maxFileSize+1)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
hasher := sha256.New()
hasher.Write(content)
filename := hex.EncodeToString(hasher.Sum(nil))
fmt.Printf("file: %s\n", filename)
err = oneDriveClient.UploadFile("test/test1/"+filename, content, 100)
if err != nil {
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
t.Errorf("Failed to upload the file %s: %v", filename, err)
return
}
}
}
entries, err := oneDriveClient.ListEntries("test/test1")
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
for _, entry := range entries {
err = oneDriveClient.MoveFile("test/test1/"+entry.Name, "test/test2")
if err != nil {
t.Errorf("Failed to move %s: %v", entry.Name, err)
return
}
}
entries, err = oneDriveClient.ListEntries("test/test2")
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
for _, entry := range entries {
readCloser, _, err := oneDriveClient.DownloadFile("test/test2/" + entry.Name)
if err != nil {
t.Errorf("Error downloading file %s: %v", entry.Name, err)
return
}
hasher := sha256.New()
io.Copy(hasher, readCloser)
hash := hex.EncodeToString(hasher.Sum(nil))
if hash != entry.Name {
t.Errorf("File %s, hash %s", entry.Name, hash)
}
readCloser.Close()
}
for _, entry := range entries {
err = oneDriveClient.DeleteFile("test/test2/" + entry.Name)
if err != nil {
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
return
}
}
}

View File

@@ -19,13 +19,13 @@ type OneDriveStorage struct {
}
// CreateOneDriveStorage creates an OneDrive storage object.
func CreateOneDriveStorage(tokenFile string, isBusiness bool, storagePath string, threads int) (storage *OneDriveStorage, err error) {
func CreateOneDriveStorage(tokenFile string, isBusiness bool, storagePath string, threads int, client_id string, client_secret string, drive_id string) (storage *OneDriveStorage, err error) {
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
storagePath = storagePath[:len(storagePath)-1]
}
client, err := NewOneDriveClient(tokenFile, isBusiness)
client, err := NewOneDriveClient(tokenFile, isBusiness, client_id, client_secret, drive_id)
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,250 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"io"
"os"
"fmt"
"net"
"path"
"time"
"strings"
"syscall"
"math/rand"
"github.com/hirochachacha/go-smb2"
)
// SambaStorage is a local on-disk file storage implementing the Storage interface.
type SambaStorage struct {
StorageBase
share *smb2.Share
storageDir string
numberOfThreads int
}
// CreateSambaStorage creates a file storage.
func CreateSambaStorage(server string, port int, username string, password string, shareName string, storageDir string, threads int) (storage *SambaStorage, err error) {
connection, err := net.Dial("tcp", fmt.Sprintf("%s:%d", server, port))
if err != nil {
return nil, err
}
dialer := &smb2.Dialer{
Initiator: &smb2.NTLMInitiator{
User: username,
Password: password,
},
}
client, err := dialer.Dial(connection)
if err != nil {
return nil, err
}
share, err := client.Mount(shareName)
if err != nil {
return nil, err
}
// Random number fo generating the temporary chunk file suffix.
rand.Seed(time.Now().UnixNano())
storage = &SambaStorage{
share: share,
numberOfThreads: threads,
}
exist, isDir, _, err := storage.GetFileInfo(0, storageDir)
if err != nil {
return nil, fmt.Errorf("Failed to check the storage path %s: %v", storageDir, err)
}
if !exist {
return nil, fmt.Errorf("The storage path %s does not exist", storageDir)
}
if !isDir {
return nil, fmt.Errorf("The storage path %s is not a directory", storageDir)
}
storage.storageDir = storageDir
storage.DerivedStorage = storage
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively).
func (storage *SambaStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
fullPath := path.Join(storage.storageDir, dir)
list, err := storage.share.ReadDir(fullPath)
if err != nil {
if os.IsNotExist(err) {
return nil, nil, nil
}
return nil, nil, err
}
for _, f := range list {
name := f.Name()
if (f.IsDir() || f.Mode() & os.ModeSymlink != 0) && name[len(name)-1] != '/' {
name += "/"
}
files = append(files, name)
sizes = append(sizes, f.Size())
}
return files, sizes, nil
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *SambaStorage) DeleteFile(threadIndex int, filePath string) (err error) {
err = storage.share.Remove(path.Join(storage.storageDir, filePath))
if err == nil || os.IsNotExist(err) {
return nil
} else {
return err
}
}
// MoveFile renames the file.
func (storage *SambaStorage) MoveFile(threadIndex int, from string, to string) (err error) {
return storage.share.Rename(path.Join(storage.storageDir, from), path.Join(storage.storageDir, to))
}
// CreateDirectory creates a new directory.
func (storage *SambaStorage) CreateDirectory(threadIndex int, dir string) (err error) {
fmt.Printf("Creating directory %s\n", dir)
err = storage.share.Mkdir(path.Join(storage.storageDir, dir), 0744)
if err != nil && os.IsExist(err) {
return nil
} else {
return err
}
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *SambaStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
stat, err := storage.share.Stat(path.Join(storage.storageDir, filePath))
if err != nil {
if os.IsNotExist(err) {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
return true, stat.IsDir(), stat.Size(), nil
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *SambaStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
file, err := storage.share.Open(path.Join(storage.storageDir, filePath))
if err != nil {
return err
}
defer file.Close()
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
return err
}
return nil
}
// UploadFile writes 'content' to the file at 'filePath'
func (storage *SambaStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
fullPath := path.Join(storage.storageDir, filePath)
if len(strings.Split(filePath, "/")) > 2 {
dir := path.Dir(fullPath)
stat, err := storage.share.Stat(dir)
if err != nil {
if !os.IsNotExist(err) {
return err
}
err = storage.share.MkdirAll(dir, 0744)
if err != nil {
return err
}
} else {
if !stat.IsDir() && stat.Mode() & os.ModeSymlink == 0 {
return fmt.Errorf("The path %s is not a directory or symlink", dir)
}
}
}
letters := "abcdefghijklmnopqrstuvwxyz"
suffix := make([]byte, 8)
for i := range suffix {
suffix[i] = letters[rand.Intn(len(letters))]
}
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
file, err := storage.share.Create(temporaryFile)
if err != nil {
return err
}
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
_, err = io.Copy(file, reader)
if err != nil {
file.Close()
return err
}
if err = file.Sync(); err != nil {
pathErr, ok := err.(*os.PathError)
isNotSupported := ok && pathErr.Op == "sync" && pathErr.Err == syscall.ENOTSUP
if !isNotSupported {
_ = file.Close()
return err
}
}
err = file.Close()
if err != nil {
return err
}
err = storage.share.Rename(temporaryFile, fullPath)
if err != nil {
if _, e := storage.share.Stat(fullPath); e == nil {
storage.share.Remove(temporaryFile)
return nil
} else {
return err
}
}
return nil
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *SambaStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *SambaStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *SambaStorage) IsStrongConsistent() bool { return true }
// If the storage supports fast listing of files names.
func (storage *SambaStorage) IsFastListing() bool { return false }
// Enable the test mode.
func (storage *SambaStorage) EnableTestMode() {}

View File

@@ -144,14 +144,14 @@ func (storage *SFTPStorage) retry(f func () error) error {
storage.clientLock.Lock()
connection, err := ssh.Dial("tcp", storage.serverAddress, storage.sftpConfig)
if err != nil {
LOG_WARN("SFT_RECONNECT", "Failed to connect to %s: %v; retrying", storage.serverAddress, err)
LOG_WARN("SFTP_RECONNECT", "Failed to connect to %s: %v; retrying", storage.serverAddress, err)
storage.clientLock.Unlock()
continue
}
client, err := sftp.NewClient(connection)
if err != nil {
LOG_WARN("SFT_RECONNECT", "Failed to create a new SFTP client to %s: %v; retrying", storage.serverAddress, err)
LOG_WARN("SFTP_RECONNECT", "Failed to create a new SFTP client to %s: %v; retrying", storage.serverAddress, err)
connection.Close()
storage.clientLock.Unlock()
continue

View File

@@ -15,7 +15,6 @@ import (
"strings"
"time"
"sort"
"bytes"
"github.com/vmihailenco/msgpack"
@@ -52,6 +51,7 @@ type Snapshot struct {
// CreateEmptySnapshot creates an empty snapshot.
func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
return &Snapshot{
Version: 1,
ID: id,
Revision: 0,
StartTime: time.Now().Unix(),
@@ -112,22 +112,27 @@ func (snapshot *Snapshot)ListRemoteFiles(config *Config, chunkOperator *ChunkOpe
}
var chunk *Chunk
reader := sequenceReader{
sequence: snapshot.FileSequence,
buffer: new(bytes.Buffer),
refillFunc: func(chunkHash string) []byte {
if chunk != nil {
config.PutChunk(chunk)
}
chunk = chunkOperator.Download(chunkHash, 0, true)
return chunk.GetBytes()
},
}
reader := NewSequenceReader(snapshot.FileSequence, func(chunkHash string) []byte {
if chunk != nil {
config.PutChunk(chunk)
}
chunk = chunkOperator.Download(chunkHash, 0, true)
return chunk.GetBytes()
})
if snapshot.Version == 0 {
defer func() {
if chunk != nil {
config.PutChunk(chunk)
}
} ()
// Normally if Version is 0 then the snapshot is created by CLI v2 but unfortunately CLI 3.0.1 does not set the
// version bit correctly when copying old backups. So we need to check the first byte -- if it is '[' then it is
// the old format. The new format starts with a string encoded in msgpack and the first byte can't be '['.
if snapshot.Version == 0 || reader.GetFirstByte() == '['{
LOG_INFO("SNAPSHOT_VERSION", "snapshot %s at revision %d is encoded in an old version format", snapshot.ID, snapshot.Revision)
files := make([]*Entry, 0)
decoder := json.NewDecoder(&reader)
decoder := json.NewDecoder(reader)
// read open bracket
_, err := decoder.Token()
@@ -156,7 +161,7 @@ func (snapshot *Snapshot)ListRemoteFiles(config *Config, chunkOperator *ChunkOpe
}
}
} else if snapshot.Version == 1 {
decoder := msgpack.NewDecoder(&reader)
decoder := msgpack.NewDecoder(reader)
lastEndChunk := 0
@@ -434,7 +439,7 @@ func (snapshot *Snapshot) MarshalJSON() ([]byte, error) {
object := make(map[string]interface{})
object["version"] = 1
object["version"] = snapshot.Version
object["id"] = snapshot.ID
object["revision"] = snapshot.Revision
object["options"] = snapshot.Options

View File

@@ -249,17 +249,27 @@ func (manager *SnapshotManager) DownloadSnapshot(snapshotID string, revision int
// the memory before passing them to the json unmarshaller.
type sequenceReader struct {
sequence []string
buffer *bytes.Buffer
buffer *bytes.Reader
index int
refillFunc func(hash string) []byte
}
func NewSequenceReader(sequence []string, refillFunc func(hash string) []byte) *sequenceReader {
newData := refillFunc(sequence[0])
return &sequenceReader{
sequence: sequence,
buffer: bytes.NewReader(newData),
index: 1,
refillFunc: refillFunc,
}
}
// Read reads a new chunk using the refill function when there is no more data in the buffer
func (reader *sequenceReader) Read(data []byte) (n int, err error) {
if len(reader.buffer.Bytes()) == 0 {
if reader.buffer.Len() == 0 {
if reader.index < len(reader.sequence) {
newData := reader.refillFunc(reader.sequence[reader.index])
reader.buffer.Write(newData)
reader.buffer = bytes.NewReader(newData)
reader.index++
} else {
return 0, io.EOF
@@ -269,15 +279,25 @@ func (reader *sequenceReader) Read(data []byte) (n int, err error) {
return reader.buffer.Read(data)
}
func (manager *SnapshotManager) CreateChunkOperator(resurrect bool, threads int, allowFailures bool) {
func (reader *sequenceReader) GetFirstByte() byte {
b, err := reader.buffer.ReadByte()
reader.buffer.UnreadByte()
if err != nil {
return 0
} else {
return b
}
}
func (manager *SnapshotManager) CreateChunkOperator(resurrect bool, rewriteChunks bool, threads int, allowFailures bool) {
if manager.chunkOperator == nil {
manager.chunkOperator = CreateChunkOperator(manager.config, manager.storage, manager.snapshotCache, resurrect, threads, allowFailures)
manager.chunkOperator = CreateChunkOperator(manager.config, manager.storage, manager.snapshotCache, resurrect, rewriteChunks, threads, allowFailures)
}
}
// DownloadSequence returns the content represented by a sequence of chunks.
func (manager *SnapshotManager) DownloadSequence(sequence []string) (content []byte) {
manager.CreateChunkOperator(false, 1, false)
manager.CreateChunkOperator(false, false, 1, false)
for _, chunkHash := range sequence {
chunk := manager.chunkOperator.Download(chunkHash, 0, true)
content = append(content, chunk.GetBytes()...)
@@ -428,7 +448,7 @@ func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, all
allFiles, _ := manager.ListAllFiles(manager.snapshotCache, chunkDir)
for _, file := range allFiles {
if file[len(file)-1] != '/' {
if len(file) > 0 && file[len(file)-1] != '/' {
chunkID := strings.Replace(file, "/", "", -1)
if _, found := chunks[chunkID]; !found {
LOG_DEBUG("SNAPSHOT_CLEAN", "Delete chunk %s from the snapshot cache", chunkID)
@@ -654,7 +674,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showFiles: %t, showChunks: %t",
snapshotID, revisionsToList, tag, showFiles, showChunks)
manager.CreateChunkOperator(false, 1, false)
manager.CreateChunkOperator(false, false, 1, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
@@ -760,9 +780,9 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
// CheckSnapshots checks if there is any problem with a snapshot.
func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToCheck []int, tag string, showStatistics bool, showTabular bool,
checkFiles bool, checkChunks, searchFossils bool, resurrect bool, threads int, allowFailures bool) bool {
checkFiles bool, checkChunks, searchFossils bool, resurrect bool, rewriteChunks bool, threads int, allowFailures bool) bool {
manager.CreateChunkOperator(resurrect, threads, allowFailures)
manager.CreateChunkOperator(resurrect, rewriteChunks, threads, allowFailures)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
@@ -999,6 +1019,8 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
numberOfVerifiedChunks := len(verifiedChunks)
saveVerifiedChunks := func() {
verifiedChunksLock.Lock()
defer verifiedChunksLock.Unlock()
if len(verifiedChunks) > numberOfVerifiedChunks {
var description []byte
description, err = json.Marshal(verifiedChunks)
@@ -1010,6 +1032,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
LOG_WARN("SNAPSHOT_VERIFY", "Failed to save the verified chunks file: %v", err)
} else {
LOG_INFO("SNAPSHOT_VERIFY", "Added %d chunks to the list of verified chunks", len(verifiedChunks) - numberOfVerifiedChunks)
numberOfVerifiedChunks = len(verifiedChunks)
}
}
}
@@ -1041,6 +1064,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
var totalDownloadedChunkSize int64
var totalDownloadedChunks int64
totalChunks := int64(len(chunkHashes))
lastSaveTime := time.Now().Unix()
chunkChannel := make(chan int, threads)
var wg sync.WaitGroup
@@ -1061,20 +1085,30 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
if chunk == nil {
continue
}
chunkID := manager.config.GetChunkIDFromHash(chunkHashes[chunkIndex])
verifiedChunksLock.Lock()
verifiedChunks[chunkID] = startTime.Unix()
verifiedChunksLock.Unlock()
downloadedChunkSize := atomic.AddInt64(&totalDownloadedChunkSize, int64(chunk.GetLength()))
downloadedChunks := atomic.AddInt64(&totalDownloadedChunks, 1)
if !chunk.isBroken {
chunkID := manager.config.GetChunkIDFromHash(chunkHashes[chunkIndex])
verifiedChunksLock.Lock()
now := time.Now().Unix()
verifiedChunks[chunkID] = now
if now > lastSaveTime + 5 * 60 {
lastSaveTime = now
verifiedChunksLock.Unlock()
saveVerifiedChunks()
} else {
verifiedChunksLock.Unlock()
}
elapsedTime := time.Now().Sub(startTime).Seconds()
speed := int64(float64(downloadedChunkSize) / elapsedTime)
remainingTime := int64(float64(totalChunks - downloadedChunks) / float64(downloadedChunks) * elapsedTime)
percentage := float64(downloadedChunks) / float64(totalChunks) * 100.0
LOG_INFO("VERIFY_PROGRESS", "Verified chunk %s (%d/%d), %sB/s %s %.1f%%",
chunkID, downloadedChunks, totalChunks, PrettySize(speed), PrettyTime(remainingTime), percentage)
downloadedChunkSize := atomic.AddInt64(&totalDownloadedChunkSize, int64(chunk.GetLength()))
downloadedChunks := atomic.AddInt64(&totalDownloadedChunks, 1)
elapsedTime := time.Now().Sub(startTime).Seconds()
speed := int64(float64(downloadedChunkSize) / elapsedTime)
remainingTime := int64(float64(totalChunks - downloadedChunks) / float64(downloadedChunks) * elapsedTime)
percentage := float64(downloadedChunks) / float64(totalChunks) * 100.0
LOG_INFO("VERIFY_PROGRESS", "Verified chunk %s (%d/%d), %sB/s %s %.1f%%",
chunkID, downloadedChunks, totalChunks, PrettySize(speed), PrettyTime(remainingTime), percentage)
}
manager.config.PutChunk(chunk)
}
@@ -1318,6 +1352,10 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool {
LOG_TRACE("SNAPSHOT_VERIFY", "%s", file.Path)
}
if lastChunk != nil {
manager.config.PutChunk(lastChunk)
}
if corruptedFiles > 0 {
LOG_WARN("SNAPSHOT_VERIFY", "Snapshot %s at revision %d contains %d corrupted files",
snapshot.ID, snapshot.Revision, corruptedFiles)
@@ -1336,7 +1374,7 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, la
return true
}
manager.CreateChunkOperator(false, 1, false)
manager.CreateChunkOperator(false, false, 1, false)
fileHasher := manager.config.NewFileHasher()
alternateHash := false
@@ -1372,6 +1410,11 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, la
}
}
if chunk.isBroken {
*lastChunk = nil
return false
}
output(chunk.GetBytes()[start:end])
if alternateHash {
fileHasher.Write([]byte(hex.EncodeToString([]byte(hash))))
@@ -1465,7 +1508,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
LOG_DEBUG("DIFF_PARAMETERS", "top: %s, id: %s, revision: %v, path: %s, compareByHash: %t",
top, snapshotID, revisions, filePath, compareByHash)
manager.CreateChunkOperator(false, 1, false)
manager.CreateChunkOperator(false, false, 1, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
@@ -1690,7 +1733,7 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
LOG_DEBUG("HISTORY_PARAMETERS", "top: %s, id: %s, revisions: %v, path: %s, showLocalHash: %t",
top, snapshotID, revisions, filePath, showLocalHash)
manager.CreateChunkOperator(false, 1, false)
manager.CreateChunkOperator(false, false, 1, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
@@ -1818,7 +1861,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
LOG_WARN("DELETE_OPTIONS", "Tags or retention policy will be ignored if at least one revision is specified")
}
manager.CreateChunkOperator(false, threads, false)
manager.CreateChunkOperator(false, false, threads, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
@@ -1963,10 +2006,6 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
// deletable.
for _, collectionName := range collections {
if collectOnly {
continue
}
matched := collectionRegex.FindStringSubmatch(collectionName)
if matched == nil {
continue
@@ -1977,6 +2016,10 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
maxCollectionNumber = collectionNumber
}
if collectOnly {
continue
}
collectionFile := path.Join(collectionDir, collectionName)
manager.fileChunk.Reset(false)
@@ -2396,8 +2439,14 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s
}
allFiles, _ := manager.ListAllFiles(manager.storage, chunkDir)
uniqueFiles := make(map[string]bool)
for _, file := range allFiles {
if file[len(file)-1] == '/' {
if _, found := uniqueFiles[file]; found {
continue
}
uniqueFiles[file] = true
if len(file) == 0 || file[len(file)-1] == '/' {
continue
}
@@ -2594,12 +2643,29 @@ func (manager *SnapshotManager) DownloadFile(path string, derivationKey string)
derivationKey = derivationKey[len(derivationKey)-64:]
}
err = manager.fileChunk.Decrypt(manager.config.FileKey, derivationKey)
err, rewriteNeeded := manager.fileChunk.Decrypt(manager.config.FileKey, derivationKey)
if err != nil {
LOG_ERROR("DOWNLOAD_DECRYPT", "Failed to decrypt the file %s: %v", path, err)
return nil
}
if rewriteNeeded && manager.chunkOperator.rewriteChunks {
newChunk := manager.config.GetChunk()
newChunk.Reset(true)
newChunk.Write(manager.fileChunk.GetBytes())
err = newChunk.Encrypt(manager.config.FileKey, derivationKey, true)
if err == nil {
err = manager.storage.UploadFile(0, path, newChunk.GetBytes())
if err != nil {
LOG_WARN("DOWNLOAD_REWRITE", "Failed to re-uploaded the file %s: %v", path, err)
} else{
LOG_INFO("DOWNLOAD_REWRITE", "The file %s has been re-uploaded", path)
}
}
manager.config.PutChunk(newChunk)
}
err = manager.snapshotCache.UploadFile(0, path, manager.fileChunk.GetBytes())
if err != nil {
LOG_WARN("DOWNLOAD_FILE_CACHE", "Failed to add the file %s to the snapshot cache: %v", path, err)

View File

@@ -116,7 +116,7 @@ func createTestSnapshotManager(testDir string) *SnapshotManager {
func uploadTestChunk(manager *SnapshotManager, content []byte) string {
chunkOperator := CreateChunkOperator(manager.config, manager.storage, nil, false, testThreads, false)
chunkOperator := CreateChunkOperator(manager.config, manager.storage, nil, false, false, *testThreads, false)
chunkOperator.UploadCompletionFunc = func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
LOG_INFO("UPLOAD_CHUNK", "Chunk %s size %d uploaded", chunk.GetID(), chunkSize)
}
@@ -179,7 +179,7 @@ func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision in
func checkTestSnapshots(manager *SnapshotManager, expectedSnapshots int, expectedFossils int) {
manager.CreateChunkOperator(false, 1, false)
manager.CreateChunkOperator(false, false, 1, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
@@ -625,7 +625,7 @@ func TestPruneNewSnapshots(t *testing.T) {
// Now chunkHash1 wil be resurrected
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 4, 0)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false, false, 1, false)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false, false, false, 1, false)
}
// A fossil collection left by an aborted prune should be ignored if any supposedly deleted snapshot exists
@@ -674,7 +674,7 @@ func TestPruneGhostSnapshots(t *testing.T) {
// Run the prune again but the fossil collection should be igored, since revision 1 still exists
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 2)
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, false, true /*searchFossils*/, false, 1, false)
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, false, true /*searchFossils*/, false, false, 1, false)
// Prune snapshot 1 again
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
@@ -688,5 +688,5 @@ func TestPruneGhostSnapshots(t *testing.T) {
// Run the prune again and this time the fossil collection will be processed and the fossils removed
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 0)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false, false, 1, false)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false, false, false, 1, false)
}

View File

@@ -261,7 +261,8 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
return fileStorage
}
urlRegex := regexp.MustCompile(`^([\w-]+)://([\w\-@\.]+@)?([^/]+)(/(.+))?`)
// Added \! to matched[2] because OneDrive drive ids contain ! (e.g. "b!xxx")
urlRegex := regexp.MustCompile(`^([\w-]+)://([\w\-@\.\!]+@)?([^/]+)(/(.+))?`)
matched := urlRegex.FindStringSubmatch(storageURL)
@@ -540,7 +541,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
} else if matched[1] == "dropbox" {
storageDir := matched[3] + matched[5]
token := GetPassword(preference, "dropbox_token", "Enter Dropbox access token:", true, resetPassword)
token := GetPassword(preference, "dropbox_token", "Enter Dropbox refresh token:", true, resetPassword)
dropboxStorage, err := CreateDropboxStorage(token, storageDir, 1, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the dropbox storage: %v", err)
@@ -644,15 +645,40 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
SavePassword(preference, "gcd_token", tokenFile)
return gcdStorage
} else if matched[1] == "one" || matched[1] == "odb" {
// Handle writing directly to the root of the drive
// For odb://drive_id@/, drive_id@ is match[3] not match[2]
if matched[2] == "" && strings.HasSuffix(matched[3], "@") {
matched[2], matched[3] = matched[3], matched[2]
}
drive_id := matched[2]
if len(drive_id) > 0 {
drive_id = drive_id[:len(drive_id)-1]
}
storagePath := matched[3] + matched[4]
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
tokenFile := GetPassword(preference, matched[1] + "_token", prompt, true, resetPassword)
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads)
// client_id, just like tokenFile, can be stored in preferences
//prompt = fmt.Sprintf("Enter client_id for custom Azure app (if empty will use duplicacy.com one):")
client_id := GetPasswordFromPreference(preference, matched[1] + "_client_id")
client_secret := ""
if client_id != "" {
// client_secret should go into keyring
prompt = fmt.Sprintf("Enter client_secret for custom Azure app (if empty will use duplicacy.com one):")
client_secret = GetPassword(preference, matched[1] + "_client_secret", prompt, true, resetPassword)
}
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads, client_id, client_secret, drive_id)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, matched[1] + "_token", tokenFile)
if client_id != "" {
SavePassword(preference, matched[1] + "_client_secret", client_secret)
}
return oneDriveStorage
} else if matched[1] == "hubic" {
storagePath := matched[3] + matched[4]
@@ -714,6 +740,62 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
}
SavePassword(preference, "fabric_token", token)
return smeStorage
} else if matched[1] == "storj" {
satellite := matched[2] + matched[3]
bucket := matched[5]
storageDir := ""
index := strings.Index(bucket, "/")
if index >= 0 {
storageDir = bucket[index + 1:]
bucket = bucket[:index]
}
apiKey := GetPassword(preference, "storj_key", "Enter the API access key:", true, resetPassword)
passphrase := GetPassword(preference, "storj_passphrase", "Enter the passphrase:", true, resetPassword)
storjStorage, err := CreateStorjStorage(satellite, apiKey, passphrase, bucket, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Storj storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "storj_key", apiKey)
SavePassword(preference, "storj_passphrase", passphrase)
return storjStorage
} else if matched[1] == "smb" {
server := matched[3]
username := matched[2]
if username == "" {
LOG_ERROR("STORAGE_CREATE", "No username is provided to access the SAMBA storage")
return nil
}
username = username[:len(username)-1]
storageDir := matched[5]
port := 445
if strings.Contains(server, ":") {
index := strings.Index(server, ":")
port, _ = strconv.Atoi(server[index+1:])
server = server[:index]
}
if !strings.Contains(storageDir, "/") {
LOG_ERROR("STORAGE_CREATE", "No share name specified for the SAMBA storage")
return nil
}
index := strings.Index(storageDir, "/")
shareName := storageDir[:index]
storageDir = storageDir[index+1:]
prompt := fmt.Sprintf("Enter the SAMBA password:")
password := GetPassword(preference, "smb_password", prompt, true, resetPassword)
sambaStorage, err := CreateSambaStorage(server, port, username, password, shareName, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the SAMBA storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "smb_password", password)
return sambaStorage
} else {
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
return nil

View File

@@ -136,15 +136,15 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "one" {
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads, "", "", "")
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "odb" {
storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads)
storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads, "", "", "")
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "one" {
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads, "", "", "")
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "hubic" {
@@ -169,6 +169,28 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "storj" {
storage, err := CreateStorjStorage(config["satellite"], config["key"], config["passphrase"], config["bucket"], config["storage_path"], threads)
if err != nil {
return nil, err
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "storj" {
storage, err := CreateStorjStorage(config["satellite"], config["key"], config["passphrase"], config["bucket"], config["storage_path"], threads)
if err != nil {
return nil, err
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "smb" {
port, _ := strconv.Atoi(config["port"])
storage, err := CreateSambaStorage(config["server"], port, config["username"], config["password"], config["share"], config["storage_path"], threads)
if err != nil {
return nil, err
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
}
return nil, fmt.Errorf("Invalid storage named: %s", *testStorageName)

View File

@@ -0,0 +1,184 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"fmt"
"io"
"context"
"storj.io/uplink"
)
// StorjStorage is a storage backend for Storj.
type StorjStorage struct {
StorageBase
project *uplink.Project
bucket string
storageDir string
numberOfThreads int
}
// CreateStorjStorage creates a Storj storage.
func CreateStorjStorage(satellite string, apiKey string, passphrase string,
bucket string, storageDir string, threads int) (storage *StorjStorage, err error) {
ctx := context.Background()
access, err := uplink.RequestAccessWithPassphrase(ctx, satellite, apiKey, passphrase)
if err != nil {
return nil, fmt.Errorf("cannot request the access grant: %v", err)
}
project, err := uplink.OpenProject(ctx, access)
if err != nil {
return nil, fmt.Errorf("cannot open the project: %v", err)
}
_, err = project.StatBucket(ctx, bucket)
if err != nil {
return nil, fmt.Errorf("cannot found the bucket: %v", err)
}
if storageDir != "" && storageDir[len(storageDir) - 1] != '/' {
storageDir += "/"
}
storage = &StorjStorage {
project: project,
bucket: bucket,
storageDir: storageDir,
numberOfThreads: threads,
}
storage.DerivedStorage = storage
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively).
func (storage *StorjStorage) ListFiles(threadIndex int, dir string) (
files []string, sizes []int64, err error) {
fullPath := storage.storageDir + dir
if fullPath != "" && fullPath[len(fullPath) - 1] != '/' {
fullPath += "/"
}
options := uplink.ListObjectsOptions {
Prefix: fullPath,
System: true, // request SystemMetadata which includes ContentLength
}
objects := storage.project.ListObjects(context.Background(), storage.bucket, &options)
for objects.Next() {
if objects.Err() != nil {
return nil, nil, objects.Err()
}
item := objects.Item()
name := item.Key[len(fullPath):]
size := item.System.ContentLength
if item.IsPrefix {
if name != "" && name[len(name) - 1] != '/' {
name += "/"
size = 0
}
}
files = append(files, name)
sizes = append(sizes, size)
}
return files, sizes, nil
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *StorjStorage) DeleteFile(threadIndex int, filePath string) (err error) {
_, err = storage.project.DeleteObject(context.Background(), storage.bucket,
storage.storageDir + filePath)
return err
}
// MoveFile renames the file.
func (storage *StorjStorage) MoveFile(threadIndex int, from string, to string) (err error) {
err = storage.project.MoveObject(context.Background(), storage.bucket,
storage.storageDir + from, storage.bucket, storage.storageDir + to, nil)
return err
}
// CreateDirectory creates a new directory.
func (storage *StorjStorage) CreateDirectory(threadIndex int, dir string) (err error) {
return nil
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *StorjStorage) GetFileInfo(threadIndex int, filePath string) (
exist bool, isDir bool, size int64, err error) {
info, err := storage.project.StatObject(context.Background(), storage.bucket,
storage.storageDir + filePath)
if info == nil {
return false, false, 0, nil
} else if err != nil {
return false, false, 0, err
} else {
return true, info.IsPrefix, info.System.ContentLength, nil
}
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *StorjStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
file, err := storage.project.DownloadObject(context.Background(), storage.bucket,
storage.storageDir + filePath, nil)
if err != nil {
return err
}
defer file.Close()
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
return err
}
return nil
}
// UploadFile writes 'content' to the file at 'filePath'
func (storage *StorjStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
file, err := storage.project.UploadObject(context.Background(), storage.bucket,
storage.storageDir + filePath, nil)
if err != nil {
return err
}
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
_, err = io.Copy(file, reader)
if err != nil {
return err
}
err = file.Commit()
if err != nil {
return err
}
return nil
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *StorjStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *StorjStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *StorjStorage) IsStrongConsistent() bool { return false }
// If the storage supports fast listing of files names.
func (storage *StorjStorage) IsFastListing() bool { return true }
// Enable the test mode.
func (storage *StorjStorage) EnableTestMode() {}

View File

@@ -5,16 +5,18 @@
package duplicacy
import (
"context"
"strconv"
"strings"
"time"
"github.com/ncw/swift"
"github.com/ncw/swift/v2"
)
type SwiftStorage struct {
StorageBase
ctx context.Context
connection *swift.Connection
container string
storageDir string
@@ -106,6 +108,8 @@ func CreateSwiftStorage(storageURL string, key string, threads int) (storage *Sw
arguments["protocol"] = "https"
}
ctx, _ := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
// Please refer to https://godoc.org/github.com/ncw/swift#Connection
connection := swift.Connection{
Domain: arguments["domain"],
@@ -129,17 +133,18 @@ func CreateSwiftStorage(storageURL string, key string, threads int) (storage *Sw
TrustId: arguments["trust_id"],
}
err = connection.Authenticate()
err = connection.Authenticate(ctx)
if err != nil {
return nil, err
}
_, _, err = connection.Container(container)
_, _, err = connection.Container(ctx, container)
if err != nil {
return nil, err
}
storage = &SwiftStorage{
ctx: ctx,
connection: &connection,
container: container,
storageDir: storageDir,
@@ -168,7 +173,7 @@ func (storage *SwiftStorage) ListFiles(threadIndex int, dir string) (files []str
options.Delimiter = '/'
}
objects, err := storage.connection.ObjectsAll(storage.container, &options)
objects, err := storage.connection.ObjectsAll(storage.ctx, storage.container, &options)
if err != nil {
return nil, nil, err
}
@@ -190,12 +195,12 @@ func (storage *SwiftStorage) ListFiles(threadIndex int, dir string) (files []str
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *SwiftStorage) DeleteFile(threadIndex int, filePath string) (err error) {
return storage.connection.ObjectDelete(storage.container, storage.storageDir+filePath)
return storage.connection.ObjectDelete(storage.ctx, storage.container, storage.storageDir+filePath)
}
// MoveFile renames the file.
func (storage *SwiftStorage) MoveFile(threadIndex int, from string, to string) (err error) {
return storage.connection.ObjectMove(storage.container, storage.storageDir+from,
return storage.connection.ObjectMove(storage.ctx, storage.container, storage.storageDir+from,
storage.container, storage.storageDir+to)
}
@@ -207,7 +212,7 @@ func (storage *SwiftStorage) CreateDirectory(threadIndex int, dir string) (err e
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *SwiftStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
object, _, err := storage.connection.Object(storage.container, storage.storageDir+filePath)
object, _, err := storage.connection.Object(storage.ctx, storage.container, storage.storageDir+filePath)
if err != nil {
if err == swift.ObjectNotFound {
@@ -223,7 +228,7 @@ func (storage *SwiftStorage) GetFileInfo(threadIndex int, filePath string) (exis
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *SwiftStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
file, _, err := storage.connection.ObjectOpen(storage.container, storage.storageDir+filePath, false, nil)
file, _, err := storage.connection.ObjectOpen(storage.ctx, storage.container, storage.storageDir+filePath, false, nil)
if err != nil {
return err
}
@@ -234,7 +239,7 @@ func (storage *SwiftStorage) DownloadFile(threadIndex int, filePath string, chun
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *SwiftStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.threads)
_, err = storage.connection.ObjectPut(storage.container, storage.storageDir+filePath, reader, true, "", "application/duplicacy", nil)
_, err = storage.connection.ObjectPut(storage.ctx, storage.container, storage.storageDir+filePath, reader, true, "", "application/duplicacy", nil)
return err
}