1
0
mirror of https://github.com/gilbertchen/duplicacy synced 2025-12-06 00:03:38 +00:00

Compare commits

...

45 Commits

Author SHA1 Message Date
Gilbert Chen
d6f5336784 Bump version to 3.0.0 2022-10-05 22:21:17 -04:00
Gilbert Chen
4b47ea55e4 Bump version to 2.8.0 2022-10-04 12:49:50 -04:00
Gilbert Chen
5c35ef799a Switch to go modules 2022-10-04 12:48:58 -04:00
Gilbert Chen
2c63d32142 Use swift V2 2022-10-04 12:47:38 -04:00
Gilbert Chen
6009f64b66 Add a storage backend for Storj
The url format is storj://satellite/bucket/path.  You can get the
satellite along with the api access key when requesting an Access
Grant of type API Access.
2022-09-30 10:30:06 -04:00
Gilbert Chen
cde660ee9f Use long-lived refresh token for the Dropbox backend
The refresh token can be downloaded from https://duplicacy.com/dropbox_start
2022-08-12 22:17:06 -04:00
Gilbert Chen
54952cef26 Fixed a bug that referenced uninitialized operator.snapshotCache 2022-07-10 12:23:11 -04:00
Gilbert Chen
fc2386f9cc Initialize startTime correctly in CreateChunkOperator 2022-06-09 23:28:29 -04:00
Gilbert Chen
0d8a37f9f3 Dependency change: update github.com/ncw/swift to v2.0.1 2022-04-08 23:16:51 -04:00
gilbertchen
345fc5ed87 Merge pull request #626 from markfeit/swift-v2
Swift v2
2022-04-08 22:29:30 -04:00
Gilbert Chen
8df529dffe Fixed the type of a test parameter 2022-04-07 23:34:48 -04:00
gilbertchen
f2d6de3fff Merge pull request #625 from gilbertchen/memory_optimization
Rewrite the backup procedure to reduce memory usage
2022-04-07 23:26:46 -04:00
Gilbert Chen
fede9c74b5 Merge branch 'master' into memory_optimization 2022-04-07 23:26:14 -04:00
Gilbert Chen
a953c4ec28 Don't parse test parameters in init()
This is to make test parameter parsing work with newer versions
of Go
2022-04-07 22:31:18 -04:00
gilbertchen
f52dcf761b Merge branch 'master' into memory_optimization 2022-03-31 15:05:50 -04:00
Gilbert Chen
ade669d14e Update totalChunkSize in the chunk operator to show stats during restore 2022-03-04 16:53:40 -05:00
Mark Feit
4743c7ba0d DOn't cancel the context and use a sane deadline. 2021-12-17 10:09:36 -05:00
Mark Feit
590d3b1b5b More development 2021-12-04 10:56:47 -05:00
Mark Feit
0590daff85 More dev 2021-12-04 10:53:34 -05:00
Mark Feit
95b1227d93 Use empty context 2021-12-04 10:48:01 -05:00
Mark Feit
1661caeb92 More fixups 2021-12-04 10:37:11 -05:00
Mark Feit
041ba944c4 Typo 2021-12-04 10:24:31 -05:00
Mark Feit
934c2515cc Import context 2021-12-04 10:23:42 -05:00
Mark Feit
c363d21954 First cut of Swift v2 2021-12-04 10:11:19 -05:00
Gilbert Chen
d9f6545d63 Rewrite the backup procedure to reduce memory usage
Main changes:

* Change the listing order of files/directories so that the local and remote
  snapshots can be compared on-the-fly.

* Introduce a new struct called EntryList that maintains a list of
  files/directories, which are kept in memory when the number is lower, and
  serialized into a file when there are too many.

* EntryList can also be turned into an on-disk incomplete snapshot quickly,
  to support fast-resume on next run.

* ChunkOperator can now download and upload chunks, thus replacing original
  ChunkDownloader and ChunkUploader.  The new ChunkDownloader is only used
  to prefetch chunks during the restore operation.
2021-10-24 23:34:49 -04:00
Gilbert Chen
68b60499d7 Add a global option to print memory usage
This option, -print-memory-usage, will print memory usage every second while
the program is running.
2021-10-15 20:45:53 -04:00
Gilbert Chen
cacf6618d2 Download a fossil directly instead of turning it back to a chunk first
This is to avoid the read-after-rename consistency issue where the effect
of renaming may not be observed by the subsequent attempt to download the
just renamed chunk.
2021-10-08 14:04:56 -04:00
Gilbert Chen
e43e848d47 Find the storage path in shared folders first when connecting to Google Drive
When connecting to Google Drive with a service account key, only files in the
service account's own hidden drive space are listable.  This change finds
the given storage path among shared folders first so that folders from the user
space can be made accessible via service account.
2021-03-09 22:46:23 -05:00
gilbertchen
fd1b7e1d20 Merge pull request #612 from gilbertchen/gcd_impersonate
Support GCD impersonation via modified service account file
2021-03-09 10:24:08 -05:00
Gilbert Chen
f83e4f3c44 Fix SNAPSHOT_INACTIVE log message 2021-01-28 00:06:49 -05:00
gilbertchen
ecf5191400 Update README.md 2021-01-04 14:19:51 -05:00
gilbertchen
ba091fbe42 Add a link to the paper 2021-01-04 14:17:48 -05:00
Gilbert Chen
ee9355b974 Check in the paper accepted to IEEE Transactions on Cloud Computing 2021-01-04 10:18:04 -05:00
Gilbert Chen
4cfecf12f8 Show the path in the error when a subdirectory can't be listed 2021-01-04 10:17:11 -05:00
Gilbert Chen
4104c2f934 Exit with code 2 when an invalid command is provided 2021-01-04 10:16:10 -05:00
Gilbert Chen
41a8f657c4 Add test storage for StorageMadeEasy's File Fabric 2020-11-23 14:38:15 -05:00
Gilbert Chen
474f07e5cc Support GCD impersonation via modified service account file 2020-11-23 09:44:10 -05:00
Gilbert Chen
175adb14cb Bump version to 2.7.2 2020-11-15 23:20:11 -05:00
Gilbert Chen
ae706e3dcf Update dependency (for gilbertchen/go-dropbox and github.com/pkg/xattr) 2020-11-15 23:19:20 -05:00
Gilbert Chen
5eed6c65f6 Validate the repository id for the init and add command
Only letter, numbers, dashes, and underscores are allowed.
2020-11-04 21:32:07 -05:00
Gilbert Chen
bec3a0edcd Fixed a bug that caused a fresh restore to fail without the -overwrite option
When restoring a file that doesn't exit locally, if the file is large (>100M)
Duplicacy will create an empty sparse file.  But this newly created file will
be mistaken for a local copy and hence the restore will fail with a message
suggesting the -overwrite option.
2020-11-03 10:57:47 -05:00
Gilbert Chen
b392302c06 Use github.com/pkg/xattr for reading/writing extended attributes.
The one previously used, github.com/redsift/xattr, is old and can only process
user-defined extended attributes, not system ones.
2020-10-16 21:16:05 -04:00
Gilbert Chen
7c36311aa9 Change snapshot source path from / to /System/Volumes/Data
Also use a regex to extract the snapshot date from tmutil output.
2020-10-11 15:23:09 -04:00
Gilbert Chen
7f834e84f6 Don't attemp to load verified_chunks when it doesn't exist. 2020-10-09 14:22:45 -04:00
Gilbert Chen
d7c1903d5a Skip chunks already verified in previous runs for check -chunks.
This is done by storing the list of verified chunks in a file
`.duplicacy/cache/<storage>/verified_chunks`.
2020-10-08 19:59:39 -04:00
34 changed files with 3541 additions and 2226 deletions

270
Gopkg.lock generated
View File

@@ -1,270 +0,0 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "cloud.google.com/go"
packages = ["compute/metadata","iam","internal","internal/optional","internal/version","storage"]
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
version = "v0.16.0"
[[projects]]
name = "github.com/Azure/go-autorest"
packages = ["autorest","autorest/adal","autorest/azure","autorest/date","logger","version"]
revision = "9bc4033dd347c7f416fca46b2f42a043dc1fbdf6"
version = "v10.15.5"
[[projects]]
branch = "master"
name = "github.com/aryann/difflib"
packages = ["."]
revision = "e206f873d14a916d3d26c40ab667bca123f365a3"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = ["aws","aws/arn","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/processcreds","aws/credentials/stscreds","aws/csm","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/context","internal/ini","internal/s3err","internal/sdkio","internal/sdkmath","internal/sdkrand","internal/sdkuri","internal/shareddefaults","internal/strings","internal/sync/singleflight","private/protocol","private/protocol/eventstream","private/protocol/eventstream/eventstreamapi","private/protocol/json/jsonutil","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/internal/arn","service/sts","service/sts/stsiface"]
revision = "851d5ffb66720c2540cc68020d4d8708950686c8"
version = "v1.30.7"
[[projects]]
name = "github.com/bkaradzic/go-lz4"
packages = ["."]
revision = "74ddf82598bc4745b965729e9c6a463bedd33049"
version = "v1.0.0"
[[projects]]
name = "github.com/dgrijalva/jwt-go"
packages = ["."]
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
version = "v3.2.0"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/azure-sdk-for-go"
packages = ["storage","version"]
revision = "8fd4663cab7c7c1c46d00449291c92ad23b0d0d9"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/cli"
packages = ["."]
revision = "1de0a1836ce9c3ae1bf737a0869c4f04f28a7f98"
[[projects]]
name = "github.com/gilbertchen/go-dropbox"
packages = ["."]
revision = "0baa9015ac2547d8b69b2e88c709aa90cfb8fbc1"
[[projects]]
name = "github.com/gilbertchen/go-ole"
packages = ["."]
revision = "0e87ea779d9deb219633b828a023b32e1244dd57"
version = "v1.2.0"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/go.dbus"
packages = ["."]
revision = "8591994fa32f1dbe3fa9486bc6f4d4361ac16649"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/goamz"
packages = ["aws","s3"]
revision = "eada9f4e8cc2a45db775dee08a2c37597ce4760a"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/gopass"
packages = ["."]
revision = "bf9dde6d0d2c004a008c27aaee91170c786f6db8"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/keyring"
packages = ["."]
revision = "8855f5632086e51468cd7ce91056f8da69687ef6"
[[projects]]
branch = "master"
name = "github.com/gilbertchen/xattr"
packages = ["."]
revision = "68e7a6806b0137a396d7d05601d7403ae1abac58"
[[projects]]
branch = "master"
name = "github.com/golang/groupcache"
packages = ["lru"]
revision = "8c9f03a8e57eb486e42badaed3fb287da51807ba"
[[projects]]
name = "github.com/golang/protobuf"
packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
revision = "84668698ea25b64748563aa20726db66a6b8d299"
version = "v1.3.5"
[[projects]]
name = "github.com/googleapis/gax-go"
packages = [".","v2"]
revision = "c8a15bac9b9fe955bd9f900272f9a306465d28cf"
version = "v2.0.3"
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "c2b33e84"
[[projects]]
name = "github.com/klauspost/cpuid"
packages = ["."]
revision = "750c0591dbbd50ef88371c665ad49e426a4b830b"
version = "v1.3.1"
[[projects]]
name = "github.com/klauspost/reedsolomon"
packages = ["."]
revision = "7daa20bf74337a939c54f892a2eca9d9b578eb7f"
version = "v1.9.9"
[[projects]]
name = "github.com/kr/fs"
packages = ["."]
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
version = "v0.1.0"
[[projects]]
name = "github.com/marstr/guid"
packages = ["."]
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/minio/blake2b-simd"
packages = ["."]
revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4"
[[projects]]
name = "github.com/minio/highwayhash"
packages = ["."]
revision = "86a2a969d04373bf05ca722517d30fb1c9a3e4f9"
version = "v1.0.1"
[[projects]]
branch = "master"
name = "github.com/mmcloughlin/avo"
packages = ["attr","build","buildtags","gotypes","internal/prnt","internal/stack","ir","operand","pass","printer","reg","src","x86"]
revision = "443f81d771042b019379ae4bfcd0a591cb47c88a"
[[projects]]
name = "github.com/ncw/swift"
packages = ["."]
revision = "3e1a09f21340e4828e7265aa89f4dc1495fa7ccc"
version = "v1.0.50"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "614d223910a179a466c1767a985424175c39b465"
version = "v0.9.1"
[[projects]]
name = "github.com/pkg/sftp"
packages = ["."]
revision = "5616182052227b951e76d9c9b79a616c608bd91b"
version = "v1.11.0"
[[projects]]
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.2.0"
[[projects]]
branch = "master"
name = "github.com/vaughan0/go-ini"
packages = ["."]
revision = "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
[[projects]]
name = "go.opencensus.io"
packages = [".","internal","internal/tagencoding","metric/metricdata","metric/metricproducer","plugin/ochttp","plugin/ochttp/propagation/b3","resource","stats","stats/internal","stats/view","tag","trace","trace/internal","trace/propagation","trace/tracestate"]
revision = "d835ff86be02193d324330acdb7d65546b05f814"
version = "v0.22.3"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["blowfish","chacha20","curve25519","ed25519","ed25519/internal/edwards25519","internal/subtle","pbkdf2","poly1305","ssh","ssh/agent","ssh/internal/bcrypt_pbkdf","ssh/terminal"]
revision = "056763e48d71961566155f089ac0f02f1dda9b5a"
[[projects]]
name = "golang.org/x/mod"
packages = ["semver"]
revision = "859b3ef565e237f9f1a0fb6b55385c497545680d"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["context","context/ctxhttp","http/httpguts","http2","http2/hpack","idna","internal/timeseries","trace"]
revision = "d3edc9973b7eb1fb302b0ff2c62357091cea9a30"
[[projects]]
name = "golang.org/x/oauth2"
packages = [".","google","internal","jws","jwt"]
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["cpu","unix","windows"]
revision = "59c9f1ba88faf592b225274f69c5ef1e4ebacf82"
[[projects]]
name = "golang.org/x/text"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/language","internal/language/compact","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
version = "v0.3.2"
[[projects]]
branch = "master"
name = "golang.org/x/tools"
packages = ["go/ast/astutil","go/gcexportdata","go/internal/gcimporter","go/internal/packagesdriver","go/packages","go/types/typeutil","internal/event","internal/event/core","internal/event/keys","internal/event/label","internal/gocommand","internal/packagesinternal","internal/typesinternal"]
revision = "5d1fdd8fa3469142b9369713b23d8413d6d83189"
[[projects]]
branch = "master"
name = "golang.org/x/xerrors"
packages = [".","internal"]
revision = "5ec99f83aff198f5fbd629d6c8d8eb38a04218ca"
[[projects]]
name = "google.golang.org/api"
packages = ["drive/v3","googleapi","googleapi/transport","internal","internal/gensupport","internal/third_party/uritemplates","iterator","option","option/internaloption","storage/v1","transport/cert","transport/http","transport/http/internal/propagation"]
revision = "52f0532eadbcc6f6b82d6f5edf66e610d10bfde6"
version = "v0.21.0"
[[projects]]
name = "google.golang.org/appengine"
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
revision = "971852bfffca25b069c31162ae8f247a3dba083b"
version = "v1.6.5"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status","googleapis/type/expr"]
revision = "baae70f3302d3efdff74db41e48a5d476d036906"
[[projects]]
name = "google.golang.org/grpc"
packages = [".","attributes","backoff","balancer","balancer/base","balancer/roundrobin","binarylog/grpc_binarylog_v1","codes","connectivity","credentials","credentials/internal","encoding","encoding/proto","grpclog","internal","internal/backoff","internal/balancerload","internal/binarylog","internal/buffer","internal/channelz","internal/envconfig","internal/grpclog","internal/grpcrand","internal/grpcsync","internal/grpcutil","internal/resolver/dns","internal/resolver/passthrough","internal/syscall","internal/transport","keepalive","metadata","naming","peer","resolver","serviceconfig","stats","status","tap"]
revision = "ac54eec90516cee50fc6b9b113b34628a85f976f"
version = "v1.28.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "0e6ea2be64dedc36cb9192f1d410917ea72896302011e55b6df5e4c00c1c2f1c"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -1,98 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
name = "cloud.google.com/go"
version = "0.16.0"
[[constraint]]
branch = "master"
name = "github.com/aryann/difflib"
[[constraint]]
name = "github.com/aws/aws-sdk-go"
version = "1.30.7"
[[constraint]]
name = "github.com/bkaradzic/go-lz4"
version = "1.0.0"
[[constraint]]
name = "github.com/gilbertchen/azure-sdk-for-go"
branch = "master"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/cli"
[[constraint]]
name = "github.com/gilbertchen/go-dropbox"
revision = "0baa9015ac2547d8b69b2e88c709aa90cfb8fbc1"
[[constraint]]
name = "github.com/gilbertchen/go-ole"
version = "1.2.0"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/goamz"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/gopass"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/keyring"
[[constraint]]
branch = "master"
name = "github.com/gilbertchen/xattr"
[[constraint]]
branch = "master"
name = "github.com/minio/blake2b-simd"
[[constraint]]
name = "github.com/pkg/sftp"
version = "1.10.1"
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[[constraint]]
name = "golang.org/x/oauth2"
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
[[constraint]]
name = "google.golang.org/api"
version = "0.21.0"
[[constraint]]
name = "google.golang.org/grpc"
version = "1.28.0"

View File

@@ -1,6 +1,8 @@
# Duplicacy: A lock-free deduplication cloud backup tool
Duplicacy is a new generation cross-platform cloud backup tool based on the idea of [Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy/wiki/Lock-Free-Deduplication).
Duplicacy is a new generation cross-platform cloud backup tool based on the idea of [Lock-Free Deduplication](https://github.com/gilbertchen/duplicacy/wiki/Lock-Free-Deduplication).
Our paper explaining the inner workings of Duplicacy has been accepted by [IEEE Transactions on Cloud Computing](https://ieeexplore.ieee.org/document/9310668) and will appear in a future issue this year. The final draft version is available [here](https://github.com/gilbertchen/duplicacy/blob/master/duplicacy_paper.pdf) for those who don't have IEEE subscriptions.
This repository hosts source code, design documents, and binary releases of the command line version of Duplicacy. There is also a Web GUI frontend built for Windows, macOS, and Linux, available from https://duplicacy.com.

View File

@@ -147,6 +147,10 @@ func setGlobalOptions(context *cli.Context) {
duplicacy.SetLoggingLevel(duplicacy.DEBUG)
}
if context.GlobalBool("print-memory-usage") {
go duplicacy.PrintMemoryUsage()
}
ScriptEnabled = true
if context.GlobalBool("no-script") {
ScriptEnabled = false
@@ -274,6 +278,13 @@ func configRepository(context *cli.Context, init bool) {
}
}
snapshotIDRegex := regexp.MustCompile(`^[A-Za-z0-9_\-]+$`)
matched := snapshotIDRegex.FindStringSubmatch(snapshotID)
if matched == nil {
duplicacy.LOG_ERROR("PREFERENCE_INVALID", "'%s' is an invalid snapshot id", snapshotID)
return
}
var repository string
var err error
@@ -774,7 +785,10 @@ func backupRepository(context *cli.Context) {
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SetDryRun(dryRun)
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS, vssTimeout, enumOnly)
metadataChunkSize := context.Int("metadata-chunk-size")
maximumInMemoryEntries := context.Int("max-in-memory-entries")
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS, vssTimeout, enumOnly, metadataChunkSize, maximumInMemoryEntries)
runScript(context, preference.Name, "post")
}
@@ -1499,6 +1513,19 @@ func main() {
Name: "enum-only",
Usage: "enumerate the repository recursively and then exit",
},
cli.IntFlag{
Name: "metadata-chunk-size",
Value: 1024 * 1024,
Usage: "the average size of metadata chunks (defaults to 1M)",
Argument: "<size>",
},
cli.IntFlag{
Name: "max-in-memory-entries",
Value: 1024 * 1024,
Usage: "the maximum number of entries kept in memory (defaults to 1M)",
Argument: "<number>",
},
},
Usage: "Save a snapshot of the repository to the storage",
ArgsUsage: " ",
@@ -2173,13 +2200,23 @@ func main() {
Usage: "suppress logs with the specified id",
Argument: "<id>",
},
cli.BoolFlag{
Name: "print-memory-usage",
Usage: "print memory usage every second",
},
}
app.HideVersion = true
app.Name = "duplicacy"
app.HelpName = "duplicacy"
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
app.Version = "2.7.1" + " (" + GitCommit + ")"
app.Version = "3.0.0" + " (" + GitCommit + ")"
// Exit with code 2 if an invalid command is provided
app.CommandNotFound = func(context *cli.Context, command string) {
fmt.Fprintf(context.App.Writer, "Invalid command: %s\n", command)
os.Exit(2)
}
// If the program is interrupted, call the RunAtError function.
c := make(chan os.Signal, 1)

BIN
duplicacy_paper.pdf Normal file

Binary file not shown.

69
go.mod Normal file
View File

@@ -0,0 +1,69 @@
module github.com/gilbertchen/duplicacy
go 1.19
require (
cloud.google.com/go v0.38.0
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a
github.com/aws/aws-sdk-go v1.30.7
github.com/bkaradzic/go-lz4 v1.0.0
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible
github.com/gilbertchen/cli v1.2.1-0.20160223210219-1de0a1836ce9
github.com/gilbertchen/go-dropbox v0.0.0-20221004154447-61204091e804
github.com/gilbertchen/go-ole v1.2.0
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2
github.com/gilbertchen/gopass v0.0.0-20170109162249-bf9dde6d0d2c
github.com/gilbertchen/keyring v0.0.0-20221004152639-1661cbebc508
github.com/gilbertchen/xattr v0.0.0-20160926155429-68e7a6806b01
github.com/klauspost/reedsolomon v1.9.9
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1
github.com/minio/highwayhash v1.0.1
github.com/ncw/swift/v2 v2.0.1
github.com/pkg/sftp v1.11.0
github.com/pkg/xattr v0.4.1
github.com/vmihailenco/msgpack v4.0.4+incompatible
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
google.golang.org/api v0.21.0
storj.io/uplink v1.9.0
)
require (
github.com/Azure/go-autorest v10.15.5+incompatible // indirect
github.com/calebcase/tmpfile v1.0.3 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
github.com/dnaeon/go-vcr v1.2.0 // indirect
github.com/goamz/goamz v0.0.0-20180131231218-8b901b531db8 // indirect
github.com/godbus/dbus v4.1.0+incompatible // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/golang/protobuf v1.3.5 // indirect
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/jmespath/go-jmespath v0.3.0 // indirect
github.com/klauspost/cpuid v1.3.1 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/marstr/guid v1.1.0 // indirect
github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/satori/go.uuid v1.2.0 // indirect
github.com/segmentio/go-env v1.1.0 // indirect
github.com/spacemonkeygo/monkit/v3 v3.0.17 // indirect
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec // indirect
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 // indirect
github.com/zeebo/errs v1.3.0 // indirect
go.opencensus.io v0.22.3 // indirect
golang.org/x/mod v0.4.2 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/tools v0.1.1 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
google.golang.org/appengine v1.6.5 // indirect
google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d // indirect
google.golang.org/grpc v1.28.1 // indirect
storj.io/common v0.0.0-20220414110316-a5cb7172d6bf // indirect
storj.io/drpc v0.0.30 // indirect
)

260
go.sum Normal file
View File

@@ -0,0 +1,260 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
github.com/Azure/go-autorest v10.15.5+incompatible h1:vdxx6wM1rVkKt/3niByPVjguoLWkWImOcJNvEykgBzY=
github.com/Azure/go-autorest v10.15.5+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a h1:pv34s756C4pEXnjgPfGYgdhg/ZdajGhyOvzx8k+23nw=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
github.com/aws/aws-sdk-go v1.30.7 h1:IaXfqtioP6p9SFAnNfsqdNczbR5UNbYqvcZUSsCAdTY=
github.com/aws/aws-sdk-go v1.30.7/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk=
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
github.com/calebcase/tmpfile v1.0.3 h1:BZrOWZ79gJqQ3XbAQlihYZf/YCV0H4KPIdM5K5oMpJo=
github.com/calebcase/tmpfile v1.0.3/go.mod h1:UAUc01aHeC+pudPagY/lWvt2qS9ZO5Zzof6/tIUzqeI=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI=
github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible h1:2fZxTUw5D9uGWnYTsU/obVavn+1qTF+TsVok3U8uN2Q=
github.com/gilbertchen/azure-sdk-for-go v14.1.2-0.20180323033227-8fd4663cab7c+incompatible/go.mod h1:qsVRCpBUm2l0eMUeI9wZ47yzra2+lv2YkGhMZpzBVUc=
github.com/gilbertchen/cli v1.2.1-0.20160223210219-1de0a1836ce9 h1:uMgtTp4sRJ7kMQMF3xEKeFntf3XatwkLNL/byj8v97g=
github.com/gilbertchen/cli v1.2.1-0.20160223210219-1de0a1836ce9/go.mod h1:WOnN3JdZiZwUaYtLH2DRxe5PpD43wuOIvc/Wem/39M0=
github.com/gilbertchen/go-dropbox v0.0.0-20221004154447-61204091e804 h1:JZ0P02xoeaITbKLFAdBfiH8SNNvKGE2Y/RLdYtWoEVE=
github.com/gilbertchen/go-dropbox v0.0.0-20221004154447-61204091e804/go.mod h1:85+2CRHC/klHy4vEM+TYtbhDo2wMjPa4JNdVzUHsDIk=
github.com/gilbertchen/go-ole v1.2.0 h1:ay65uwxo6w8UVOxN0+fuCqUXGaXxbmkGs5m4uY6e1Zw=
github.com/gilbertchen/go-ole v1.2.0/go.mod h1:NNiozp7QxhyGmHxxNdFKIcVaINvJFTAjBJ2gYzh8fsg=
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2 h1:VDPwi3huqeJBtymgLOvPAP4S2gbSSK/UrWVwRbRAmnw=
github.com/gilbertchen/goamz v0.0.0-20170712012135-eada9f4e8cc2/go.mod h1:AoxJeh8meXUrSWBLiq9BJvYMd9RAAGgEUU0gSkNedRY=
github.com/gilbertchen/gopass v0.0.0-20170109162249-bf9dde6d0d2c h1:0SR0aXvil/eQReU0olxp/j04B+Y/47fjDMotIxaAgKo=
github.com/gilbertchen/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:HDsXH7AAfDsfYYX0te4zsNbnwVvZ2RtLEOCjN4y84jw=
github.com/gilbertchen/keyring v0.0.0-20221004152639-1661cbebc508 h1:SqTyk5KkNXp7zTdTttIZSDcTrL5uau4K/2OpKvgBZVI=
github.com/gilbertchen/keyring v0.0.0-20221004152639-1661cbebc508/go.mod h1:w/pisxUZezf2XzU9Ewjphcf6q1mZtOzKPHhJiuc8cag=
github.com/gilbertchen/xattr v0.0.0-20160926155429-68e7a6806b01 h1:LqwS9qL6SrDkp0g0iwUkETrDdtB9gTKaIbSn9imUq5o=
github.com/gilbertchen/xattr v0.0.0-20160926155429-68e7a6806b01/go.mod h1:TMlibuxKfkdtHyltooAw7+DHqRpaXs9nxaffk00Sh1Q=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/goamz/goamz v0.0.0-20180131231218-8b901b531db8 h1:G1U0vew/vA/1/hBmf1XNeyIzJJbPFVv+kb+HPl6rj6c=
github.com/goamz/goamz v0.0.0-20180131231218-8b901b531db8/go.mod h1:/Ya1YZsqLQp17bDgHdyE9/XBR1uIH1HKasTvLxcoM/A=
github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4=
github.com/godbus/dbus v4.1.0+incompatible/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20211108044417-e9b028704de0 h1:rsq1yB2xiFLDYYaYdlGBsSkwVzsCo500wMhxvW5A/bk=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/cpuid v1.2.4/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s=
github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4=
github.com/klauspost/reedsolomon v1.9.9 h1:qCL7LZlv17xMixl55nq2/Oa1Y86nfO8EqDfv2GHND54=
github.com/klauspost/reedsolomon v1.9.9/go.mod h1:O7yFFHiQwDR6b2t63KPUpccPtNdp5ADgh1gg4fd12wo=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1 h1:lYpkrQH5ajf0OXOcUbGjvZxxijuBwbbmlSxLiuofa+g=
github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ=
github.com/minio/highwayhash v1.0.1 h1:dZ6IIu8Z14VlC0VpfKofAhCy74wu/Qb5gcn52yWoz/0=
github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104 h1:ULR/QWMgcgRiZLUjSSJMU+fW+RDMstRdmnDWj9Q+AsA=
github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104/go.mod h1:wqKykBG2QzQDJEzvRkcS8x6MiSJkF52hXZsXcjaB3ls=
github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8=
github.com/ncw/swift/v2 v2.0.1 h1:q1IN8hNViXEv8Zvg3Xdis4a3c4IlIGezkYz09zQL5J0=
github.com/ncw/swift/v2 v2.0.1/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.11.0 h1:4Zv0OGbpkg4yNuUtH0s8rvoYxRCNyT29NVUo6pgPmxI=
github.com/pkg/sftp v1.11.0/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/xattr v0.4.1 h1:dhclzL6EqOXNaPDWqoeb9tIxATfBSmjqL0b4DpSjwRw=
github.com/pkg/xattr v0.4.1/go.mod h1:W2cGD0TBEus7MkUgv0tNZ9JutLtVO3cXu+IBRuHqnFs=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/segmentio/go-env v1.1.0 h1:AGJ7OnCx9M5NWpkYPGYELS6III/pFSnAs1GvKWStiEo=
github.com/segmentio/go-env v1.1.0/go.mod h1:pEKO2ieHe8zF098OMaAHw21SajMuONlnI/vJNB3pB7I=
github.com/spacemonkeygo/monkit/v3 v3.0.17 h1:rqIuLhRUr2UtS3WNVbPY/BwvjlwKVvSOVY5p0QVocxE=
github.com/spacemonkeygo/monkit/v3 v3.0.17/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec h1:DGmKwyZwEB8dI7tbLt/I/gQuP559o/0FrAkHKlQM/Ks=
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw=
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3 h1:zMsHhfK9+Wdl1F7sIKLyx3wrOFofpb3rWFbA4HgcK5k=
github.com/vivint/infectious v0.0.0-20200605153912-25a574ae18a3/go.mod h1:R0Gbuw7ElaGSLOZUSwBm/GgVwMd30jWxBDdAyMOeTuc=
github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI=
github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs=
github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
golang.org/x/arch v0.0.0-20190909030613-46d78d1859ac/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838 h1:71vQrMauZZhcTVK6KdYM+rklehEEwb3E+ZhaE5jrPrE=
golang.org/x/crypto v0.0.0-20220131195533-30dcbda58838/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181021155630-eda9bb28ed51/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t3cHp9ZpwcdjqyEWLlo=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200425043458-8463f397d07c/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.21.0 h1:zS+Q/CJJnVlXpXQVIz+lH0ZT2lBuT2ac7XD8Y/3w6hY=
google.golang.org/api v0.21.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d h1:I7Vuu5Ejagca+VcgfBINHke3xwjCTYnIG4Q57fv0wYY=
google.golang.org/genproto v0.0.0-20200409111301-baae70f3302d/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.1 h1:C1QC6KzgSiLyBabDi87BbjaGreoRgGUF5nOyvfrAZ1k=
google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
storj.io/common v0.0.0-20220414110316-a5cb7172d6bf h1:D5xZTDOlTTQWdAWeKKm2pFLcz1sceH+f/pVAcYB9jL8=
storj.io/common v0.0.0-20220414110316-a5cb7172d6bf/go.mod h1:LBJrpAqL4MNSrhGEwc8SJ+tIVtgfCtFEZqDy6/0j67A=
storj.io/drpc v0.0.30 h1:jqPe4T9KEu3CDBI05A2hCMgMSHLtd/E0N0yTF9QreIE=
storj.io/drpc v0.0.30/go.mod h1:6rcOyR/QQkSTX/9L5ZGtlZaE2PtXTTZl8d+ulSeeYEg=
storj.io/uplink v1.9.0 h1:Zg1kX1VqOQIKm0yAukteKpLuT68Be3euyNRML612ERM=
storj.io/uplink v1.9.0/go.mod h1:f6D8306j5mnRHnPDKWCiwtPM6ukyGg77to9LaAY9l6k=

File diff suppressed because it is too large Load Diff

View File

@@ -235,12 +235,12 @@ func TestBackupManager(t *testing.T) {
dataShards := 0
parityShards := 0
if testErasureCoding {
if *testErasureCoding {
dataShards = 5
parityShards = 2
}
if testFixedChunkSize {
if *testFixedChunkSize {
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "", dataShards, parityShards) {
t.Errorf("Failed to initialize the storage")
}
@@ -257,7 +257,7 @@ func TestBackupManager(t *testing.T) {
backupManager.SetupSnapshotCache("default")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false, 1024, 1024)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
failedFiles := backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
@@ -282,7 +282,7 @@ func TestBackupManager(t *testing.T) {
modifyFile(testDir+"/repository1/dir1/file3", 0.3)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false, 1024, 1024)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
failedFiles = backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
@@ -303,7 +303,7 @@ func TestBackupManager(t *testing.T) {
os.Mkdir(testDir+"/repository1/dir2/dir3", 0700)
os.Mkdir(testDir+"/repository1/dir4", 0700)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "third", false, false, 0, false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "third", false, false, 0, false, 1024, 1024)
time.Sleep(time.Duration(delay) * time.Second)
// Create some directories and files under repository2 that will be deleted during restore
@@ -368,7 +368,7 @@ func TestBackupManager(t *testing.T) {
}
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3} /*tag*/, "",
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1 /*allowFailures*/, false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false)
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false, 1024, 1024)
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, nil /*tags*/, nil /*retentions*/, nil,
/*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
@@ -533,7 +533,7 @@ func TestPersistRestore(t *testing.T) {
unencBackupManager.SetupSnapshotCache("default")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
unencBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
unencBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false, 1024, 1024)
time.Sleep(time.Duration(delay) * time.Second)
@@ -543,7 +543,7 @@ func TestPersistRestore(t *testing.T) {
encBackupManager.SetupSnapshotCache("default")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
encBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
encBackupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false, 1024, 1024)
time.Sleep(time.Duration(delay) * time.Second)

View File

@@ -29,29 +29,29 @@ func benchmarkSplit(reader *bytes.Reader, fileSize int64, chunkSize int, compres
config.HashKey = DEFAULT_KEY
config.IDKey = DEFAULT_KEY
maker := CreateChunkMaker(config, false)
maker := CreateFileChunkMaker(config, false)
startTime := float64(time.Now().UnixNano()) / 1e9
numberOfChunks := 0
reader.Seek(0, os.SEEK_SET)
maker.ForEachChunk(reader,
func(chunk *Chunk, final bool) {
if compression {
key := ""
if encryption {
key = "0123456789abcdef0123456789abcdef"
}
err := chunk.Encrypt([]byte(key), "", false)
if err != nil {
LOG_ERROR("BENCHMARK_ENCRYPT", "Failed to encrypt the chunk: %v", err)
}
chunkFunc := func(chunk *Chunk) {
if compression {
key := ""
if encryption {
key = "0123456789abcdef0123456789abcdef"
}
config.PutChunk(chunk)
numberOfChunks++
},
func(size int64, hash string) (io.Reader, bool) {
return nil, false
})
err := chunk.Encrypt([]byte(key), "", false)
if err != nil {
LOG_ERROR("BENCHMARK_ENCRYPT", "Failed to encrypt the chunk: %v", err)
}
}
config.PutChunk(chunk)
numberOfChunks++
}
maker.AddData(reader, chunkFunc)
maker.AddData(nil, chunkFunc)
runningTime := float64(time.Now().UnixNano())/1e9 - startTime
speed := int64(float64(fileSize) / runningTime)

View File

@@ -65,8 +65,8 @@ type Chunk struct {
config *Config // Every chunk is associated with a Config object. Which hashing algorithm to use is determined
// by the config
isSnapshot bool // Indicates if the chunk is a snapshot chunk (instead of a file chunk). This is only used by RSA
// encryption, where a snapshot chunk is not encrypted by RSA
isMetadata bool // Indicates if the chunk is a metadata chunk (instead of a file chunk). This is primarily used by RSA
// encryption, where a metadata chunk is not encrypted by RSA
isBroken bool // Indicates the chunk did not download correctly. This is only used for -persist (allowFailures) mode
}
@@ -127,7 +127,7 @@ func (chunk *Chunk) Reset(hashNeeded bool) {
chunk.hash = nil
chunk.id = ""
chunk.size = 0
chunk.isSnapshot = false
chunk.isMetadata = false
chunk.isBroken = false
}
@@ -186,7 +186,7 @@ func (chunk *Chunk) VerifyID() {
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapshot bool) (err error) {
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isMetadata bool) (err error) {
var aesBlock cipher.Block
var gcm cipher.AEAD
@@ -203,8 +203,8 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
key := encryptionKey
usingRSA := false
// Enable RSA encryption only when the chunk is not a snapshot chunk
if chunk.config.rsaPublicKey != nil && !isSnapshot && !chunk.isSnapshot {
// Enable RSA encryption only when the chunk is not a metadata chunk
if chunk.config.rsaPublicKey != nil && !isMetadata && !chunk.isMetadata {
randomKey := make([]byte, 32)
_, err := rand.Read(randomKey)
if err != nil {

View File

@@ -62,7 +62,7 @@ func TestChunkBasic(t *testing.T) {
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
maxSize := 1000000
if testRSAEncryption {
if *testRSAEncryption {
privateKey, err := rsa.GenerateKey(crypto_rand.Reader, 2048)
if err != nil {
t.Errorf("Failed to generate a random private key: %v", err)
@@ -71,7 +71,7 @@ func TestChunkBasic(t *testing.T) {
config.rsaPublicKey = privateKey.Public().(*rsa.PublicKey)
}
if testErasureCoding {
if *testErasureCoding {
config.DataShards = 5
config.ParityShards = 2
}
@@ -98,7 +98,7 @@ func TestChunkBasic(t *testing.T) {
encryptedData := make([]byte, chunk.GetLength())
copy(encryptedData, chunk.GetBytes())
if testErasureCoding {
if *testErasureCoding {
offset := 24 + 32 * 7
start := rand.Int() % (len(encryptedData) - offset) + offset
length := (len(encryptedData) - offset) / 7

View File

@@ -5,7 +5,6 @@
package duplicacy
import (
"io"
"sync/atomic"
"time"
)
@@ -20,78 +19,47 @@ type ChunkDownloadTask struct {
isDownloading bool // 'true' means the chunk has been downloaded or is being downloaded
}
// ChunkDownloadCompletion represents the nofication when a chunk has been downloaded.
type ChunkDownloadCompletion struct {
chunkIndex int // The index of this chunk in the chunk list
chunk *Chunk // The chunk that has been downloaded
chunk *Chunk
chunkIndex int
}
// ChunkDownloader is capable of performing multi-threaded downloading. Chunks to be downloaded are first organized
// ChunkDownloader is a wrapper of ChunkOperator and is only used by the restore procedure.capable of performing multi-threaded downloading. Chunks to be downloaded are first organized
// as a list of ChunkDownloadTasks, with only the chunkHash field initialized. When a chunk is needed, the
// corresponding ChunkDownloadTask is sent to the dowloading goroutine. Once a chunk is downloaded, it will be
// inserted in the completed task list.
type ChunkDownloader struct {
operator *ChunkOperator
totalChunkSize int64 // Total chunk size
downloadedChunkSize int64 // Downloaded chunk size
config *Config // Associated config
storage Storage // Download from this storage
snapshotCache *FileStorage // Used as cache if not nil; usually for downloading snapshot chunks
showStatistics bool // Show a stats log for each chunk if true
threads int // Number of threads
allowFailures bool // Whether to failfast on download error, or continue
taskList []ChunkDownloadTask // The list of chunks to be downloaded
completedTasks map[int]bool // Store downloaded chunks
lastChunkIndex int // a monotonically increasing number indicating the last chunk to be downloaded
taskQueue chan ChunkDownloadTask // Downloading goroutines are waiting on this channel for input
stopChannel chan bool // Used to stop the dowloading goroutines
completionChannel chan ChunkDownloadCompletion // A downloading goroutine sends back the chunk via this channel after downloading
startTime int64 // The time it starts downloading
numberOfDownloadedChunks int // The number of chunks that have been downloaded
numberOfDownloadingChunks int // The number of chunks still being downloaded
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
NumberOfFailedChunks int // The number of chunks that can't be downloaded
}
func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int, allowFailures bool) *ChunkDownloader {
func CreateChunkDownloader(operator *ChunkOperator) *ChunkDownloader {
downloader := &ChunkDownloader{
config: config,
storage: storage,
snapshotCache: snapshotCache,
showStatistics: showStatistics,
threads: threads,
allowFailures: allowFailures,
operator: operator,
taskList: nil,
completedTasks: make(map[int]bool),
lastChunkIndex: 0,
taskQueue: make(chan ChunkDownloadTask, threads),
stopChannel: make(chan bool),
completionChannel: make(chan ChunkDownloadCompletion),
startTime: time.Now().Unix(),
}
// Start the downloading goroutines
for i := 0; i < downloader.threads; i++ {
go func(threadIndex int) {
defer CatchLogException()
for {
select {
case task := <-downloader.taskQueue:
downloader.Download(threadIndex, task)
case <-downloader.stopChannel:
return
}
}
}(i)
}
return downloader
}
@@ -127,26 +95,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files []*Entry)
maximumChunks = file.EndChunk - file.StartChunk
}
}
}
// AddChunk adds a single chunk the download list.
func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
task := ChunkDownloadTask{
chunkIndex: len(downloader.taskList),
chunkHash: chunkHash,
chunkLength: 0,
needed: true,
isDownloading: false,
}
downloader.taskList = append(downloader.taskList, task)
if downloader.numberOfActiveChunks < downloader.threads {
downloader.taskQueue <- task
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
downloader.taskList[len(downloader.taskList)-1].isDownloading = true
}
return len(downloader.taskList) - 1
downloader.operator.totalChunkSize = downloader.totalChunkSize
}
// Prefetch adds up to 'threads' chunks needed by a file to the download list
@@ -159,20 +108,22 @@ func (downloader *ChunkDownloader) Prefetch(file *Entry) {
task := &downloader.taskList[i]
if task.needed {
if !task.isDownloading {
if downloader.numberOfActiveChunks >= downloader.threads {
if downloader.numberOfActiveChunks >= downloader.operator.threads {
return
}
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching %s chunk %s", file.Path,
downloader.config.GetChunkIDFromHash(task.chunkHash))
downloader.taskQueue <- *task
downloader.operator.config.GetChunkIDFromHash(task.chunkHash))
downloader.operator.DownloadAsync(task.chunkHash, i, false, func (chunk *Chunk, chunkIndex int) {
downloader.completionChannel <- ChunkDownloadCompletion { chunk: chunk, chunkIndex: chunkIndex }
})
task.isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
}
} else {
LOG_DEBUG("DOWNLOAD_PREFETCH", "%s chunk %s is not needed", file.Path,
downloader.config.GetChunkIDFromHash(task.chunkHash))
downloader.operator.config.GetChunkIDFromHash(task.chunkHash))
}
}
}
@@ -186,7 +137,7 @@ func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
for i := range downloader.completedTasks {
if i < chunkIndex && downloader.taskList[i].chunk != nil {
downloader.config.PutChunk(downloader.taskList[i].chunk)
downloader.operator.config.PutChunk(downloader.taskList[i].chunk)
downloader.taskList[i].chunk = nil
delete(downloader.completedTasks, i)
downloader.numberOfActiveChunks--
@@ -222,8 +173,10 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
// If we haven't started download the specified chunk, download it now
if !downloader.taskList[chunkIndex].isDownloading {
LOG_DEBUG("DOWNLOAD_FETCH", "Fetching chunk %s",
downloader.config.GetChunkIDFromHash(downloader.taskList[chunkIndex].chunkHash))
downloader.taskQueue <- downloader.taskList[chunkIndex]
downloader.operator.config.GetChunkIDFromHash(downloader.taskList[chunkIndex].chunkHash))
downloader.operator.DownloadAsync(downloader.taskList[chunkIndex].chunkHash, chunkIndex, false, func (chunk *Chunk, chunkIndex int) {
downloader.completionChannel <- ChunkDownloadCompletion { chunk: chunk, chunkIndex: chunkIndex }
})
downloader.taskList[chunkIndex].isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
@@ -231,7 +184,7 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
// We also need to look ahead and prefetch other chunks as many as permitted by the number of threads
for i := chunkIndex + 1; i < len(downloader.taskList); i++ {
if downloader.numberOfActiveChunks >= downloader.threads {
if downloader.numberOfActiveChunks >= downloader.operator.threads {
break
}
task := &downloader.taskList[i]
@@ -240,8 +193,10 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
}
if !task.isDownloading {
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching chunk %s", downloader.config.GetChunkIDFromHash(task.chunkHash))
downloader.taskQueue <- *task
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching chunk %s", downloader.operator.config.GetChunkIDFromHash(task.chunkHash))
downloader.operator.DownloadAsync(task.chunkHash, task.chunkIndex, false, func (chunk *Chunk, chunkIndex int) {
downloader.completionChannel <- ChunkDownloadCompletion { chunk: chunk, chunkIndex: chunkIndex }
})
task.isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
@@ -255,9 +210,6 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
downloader.numberOfDownloadedChunks++
downloader.numberOfDownloadingChunks--
if completion.chunk.isBroken {
downloader.NumberOfFailedChunks++
}
}
return downloader.taskList[chunkIndex].chunk
}
@@ -281,13 +233,10 @@ func (downloader *ChunkDownloader) WaitForCompletion() {
// Wait for a completion event first
if downloader.numberOfActiveChunks > 0 {
completion := <-downloader.completionChannel
downloader.config.PutChunk(completion.chunk)
downloader.operator.config.PutChunk(completion.chunk)
downloader.numberOfActiveChunks--
downloader.numberOfDownloadedChunks++
downloader.numberOfDownloadingChunks--
if completion.chunk.isBroken {
downloader.NumberOfFailedChunks++
}
}
// Pass the tasks one by one to the download queue
@@ -297,222 +246,13 @@ func (downloader *ChunkDownloader) WaitForCompletion() {
downloader.lastChunkIndex++
continue
}
downloader.taskQueue <- *task
downloader.operator.DownloadAsync(task.chunkHash, task.chunkIndex, false, func (chunk *Chunk, chunkIndex int) {
downloader.completionChannel <- ChunkDownloadCompletion { chunk: chunk, chunkIndex: chunkIndex }
})
task.isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
downloader.lastChunkIndex++
}
}
}
// Stop terminates all downloading goroutines
func (downloader *ChunkDownloader) Stop() {
for downloader.numberOfDownloadingChunks > 0 {
completion := <-downloader.completionChannel
downloader.completedTasks[completion.chunkIndex] = true
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
downloader.numberOfDownloadedChunks++
downloader.numberOfDownloadingChunks--
if completion.chunk.isBroken {
downloader.NumberOfFailedChunks++
}
}
for i := range downloader.completedTasks {
downloader.config.PutChunk(downloader.taskList[i].chunk)
downloader.taskList[i].chunk = nil
downloader.numberOfActiveChunks--
}
for i := 0; i < downloader.threads; i++ {
downloader.stopChannel <- true
}
}
// Download downloads a chunk from the storage.
func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadTask) bool {
cachedPath := ""
chunk := downloader.config.GetChunk()
chunkID := downloader.config.GetChunkIDFromHash(task.chunkHash)
if downloader.snapshotCache != nil && downloader.storage.IsCacheNeeded() {
var exist bool
var err error
// Reset the chunk with a hasher -- we're reading from the cache where chunk are not encrypted or compressed
chunk.Reset(true)
cachedPath, exist, _, err = downloader.snapshotCache.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
} else if exist {
err = downloader.snapshotCache.DownloadFile(0, cachedPath, chunk)
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to load the chunk %s from the snapshot cache: %v", chunkID, err)
} else {
actualChunkID := chunk.GetID()
if actualChunkID != chunkID {
LOG_WARN("DOWNLOAD_CACHE_CORRUPTED",
"The chunk %s load from the snapshot cache has a hash id of %s", chunkID, actualChunkID)
} else {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been loaded from the snapshot cache", chunkID)
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
return false
}
}
}
}
// Reset the chunk without a hasher -- the downloaded content will be encrypted and/or compressed and the hasher
// will be set up before the encryption
chunk.Reset(false)
// If failures are allowed, complete the task properly
completeFailedChunk := func(chunk *Chunk) {
if downloader.allowFailures {
chunk.isBroken = true
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
}
}
const MaxDownloadAttempts = 3
for downloadAttempt := 0; ; downloadAttempt++ {
// Find the chunk by ID first.
chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
completeFailedChunk(chunk)
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return false
}
if !exist {
// No chunk is found. Have to find it in the fossil pool again.
fossilPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, true)
if err != nil {
completeFailedChunk(chunk)
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return false
}
if !exist {
retry := false
// Retry for Hubic or WebDAV as it may return 404 even when the chunk exists
if _, ok := downloader.storage.(*HubicStorage); ok {
retry = true
}
if _, ok := downloader.storage.(*WebDAVStorage); ok {
retry = true
}
if retry && downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to find the chunk %s; retrying", chunkID)
continue
}
completeFailedChunk(chunk)
// A chunk is not found. This is a serious error and hopefully it will never happen.
if err != nil {
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
} else {
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
}
return false
}
// We can't download the fossil directly. We have to turn it back into a regular chunk and try
// downloading again.
err = downloader.storage.MoveFile(threadIndex, fossilPath, chunkPath)
if err != nil {
completeFailedChunk(chunk)
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to resurrect chunk %s: %v", chunkID, err)
return false
}
LOG_WARN("DOWNLOAD_RESURRECT", "Fossil %s has been resurrected", chunkID)
continue
}
err = downloader.storage.DownloadFile(threadIndex, chunkPath, chunk)
if err != nil {
_, isHubic := downloader.storage.(*HubicStorage)
// Retry on EOF or if it is a Hubic backend as it may return 404 even when the chunk exists
if (err == io.ErrUnexpectedEOF || isHubic) && downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to download the chunk %s: %v; retrying", chunkID, err)
chunk.Reset(false)
continue
} else {
completeFailedChunk(chunk)
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to download the chunk %s: %v", chunkID, err)
return false
}
}
err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash)
if err != nil {
if downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to decrypt the chunk %s: %v; retrying", chunkID, err)
chunk.Reset(false)
continue
} else {
completeFailedChunk(chunk)
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_DECRYPT", "Failed to decrypt the chunk %s: %v", chunkID, err)
return false
}
}
actualChunkID := chunk.GetID()
if actualChunkID != chunkID {
if downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "The chunk %s has a hash id of %s; retrying", chunkID, actualChunkID)
chunk.Reset(false)
continue
} else {
completeFailedChunk(chunk)
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
return false
}
}
break
}
if len(cachedPath) > 0 {
// Save a copy to the local snapshot cache
err := downloader.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes())
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to add the chunk %s to the snapshot cache: %v", chunkID, err)
}
}
downloadedChunkSize := atomic.AddInt64(&downloader.downloadedChunkSize, int64(chunk.GetLength()))
if (downloader.showStatistics || IsTracing()) && downloader.totalChunkSize > 0 {
now := time.Now().Unix()
if now <= downloader.startTime {
now = downloader.startTime + 1
}
speed := downloadedChunkSize / (now - downloader.startTime)
remainingTime := int64(0)
if speed > 0 {
remainingTime = (downloader.totalChunkSize-downloadedChunkSize)/speed + 1
}
percentage := float32(downloadedChunkSize * 1000 / downloader.totalChunkSize)
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
task.chunkIndex+1, chunk.GetLength(),
PrettySize(speed), PrettyTime(remainingTime), percentage/10)
} else {
LOG_DEBUG("CHUNK_DOWNLOAD", "Chunk %s has been downloaded", chunkID)
}
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
return true
}
}

View File

@@ -25,15 +25,20 @@ type ChunkMaker struct {
bufferSize int
bufferStart int
minimumReached bool
hashSum uint64
chunk *Chunk
config *Config
hashOnly bool
hashOnlyChunk *Chunk
}
// CreateChunkMaker creates a chunk maker. 'randomSeed' is used to generate the character-to-integer table needed by
// buzhash.
func CreateChunkMaker(config *Config, hashOnly bool) *ChunkMaker {
func CreateFileChunkMaker(config *Config, hashOnly bool) *ChunkMaker {
size := 1
for size*2 <= config.AverageChunkSize {
size *= 2
@@ -67,6 +72,33 @@ func CreateChunkMaker(config *Config, hashOnly bool) *ChunkMaker {
}
maker.buffer = make([]byte, 2*config.MinimumChunkSize)
maker.bufferStart = 0
maker.bufferSize = 0
maker.startNewChunk()
return maker
}
// CreateMetaDataChunkMaker creates a chunk maker that always uses the variable-sized chunking algorithm
func CreateMetaDataChunkMaker(config *Config, chunkSize int) *ChunkMaker {
size := 1
for size*2 <= chunkSize {
size *= 2
}
if size != chunkSize {
LOG_FATAL("CHUNK_SIZE", "Invalid metadata chunk size: %d is not a power of 2", chunkSize)
return nil
}
maker := CreateFileChunkMaker(config, false)
maker.hashMask = uint64(chunkSize - 1)
maker.maximumChunkSize = chunkSize * 4
maker.minimumChunkSize = chunkSize / 4
maker.bufferCapacity = 2 * maker.minimumChunkSize
maker.buffer = make([]byte, maker.bufferCapacity)
return maker
}
@@ -90,62 +122,50 @@ func (maker *ChunkMaker) buzhashUpdate(sum uint64, out byte, in byte, length int
return rotateLeftByOne(sum) ^ rotateLeft(maker.randomTable[out], uint(length)) ^ maker.randomTable[in]
}
// ForEachChunk reads data from 'reader'. If EOF is encountered, it will call 'nextReader' to ask for next file. If
// 'nextReader' returns false, it will process remaining data in the buffer and then quit. When a chunk is identified,
// it will call 'endOfChunk' to return the chunk size and a boolean flag indicating if it is the last chunk.
func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *Chunk, final bool),
nextReader func(size int64, hash string) (io.Reader, bool)) {
func (maker *ChunkMaker) startNewChunk() (chunk *Chunk) {
maker.hashSum = 0
maker.minimumReached = false
if maker.hashOnly {
maker.chunk = maker.hashOnlyChunk
maker.chunk.Reset(true)
} else {
maker.chunk = maker.config.GetChunk()
maker.chunk.Reset(true)
}
return
}
maker.bufferStart = 0
maker.bufferSize = 0
var minimumReached bool
var hashSum uint64
var chunk *Chunk
func (maker *ChunkMaker) AddData(reader io.Reader, sendChunk func(*Chunk)) (int64, string) {
isEOF := false
fileSize := int64(0)
fileHasher := maker.config.NewFileHasher()
// Start a new chunk.
startNewChunk := func() {
hashSum = 0
minimumReached = false
if maker.hashOnly {
chunk = maker.hashOnlyChunk
chunk.Reset(true)
} else {
chunk = maker.config.GetChunk()
chunk.Reset(true)
}
}
// Move data from the buffer to the chunk.
fill := func(count int) {
if maker.bufferStart+count < maker.bufferCapacity {
chunk.Write(maker.buffer[maker.bufferStart : maker.bufferStart+count])
maker.chunk.Write(maker.buffer[maker.bufferStart : maker.bufferStart+count])
maker.bufferStart += count
maker.bufferSize -= count
} else {
chunk.Write(maker.buffer[maker.bufferStart:])
chunk.Write(maker.buffer[:count-(maker.bufferCapacity-maker.bufferStart)])
maker.chunk.Write(maker.buffer[maker.bufferStart:])
maker.chunk.Write(maker.buffer[:count-(maker.bufferCapacity-maker.bufferStart)])
maker.bufferStart = count - (maker.bufferCapacity - maker.bufferStart)
maker.bufferSize -= count
}
}
startNewChunk()
var err error
isEOF := false
if maker.minimumChunkSize == maker.maximumChunkSize {
if maker.bufferCapacity < maker.minimumChunkSize {
maker.buffer = make([]byte, maker.minimumChunkSize)
if reader == nil {
return 0, ""
}
for {
maker.startNewChunk()
maker.bufferStart = 0
for maker.bufferStart < maker.minimumChunkSize && !isEOF {
count, err := reader.Read(maker.buffer[maker.bufferStart:maker.minimumChunkSize])
@@ -153,7 +173,7 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
if err != nil {
if err != io.EOF {
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
return
return 0, ""
} else {
isEOF = true
}
@@ -161,26 +181,15 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
maker.bufferStart += count
}
fileHasher.Write(maker.buffer[:maker.bufferStart])
fileSize += int64(maker.bufferStart)
chunk.Write(maker.buffer[:maker.bufferStart])
if maker.bufferStart > 0 {
fileHasher.Write(maker.buffer[:maker.bufferStart])
fileSize += int64(maker.bufferStart)
maker.chunk.Write(maker.buffer[:maker.bufferStart])
sendChunk(maker.chunk)
}
if isEOF {
var ok bool
reader, ok = nextReader(fileSize, hex.EncodeToString(fileHasher.Sum(nil)))
if !ok {
endOfChunk(chunk, true)
return
} else {
endOfChunk(chunk, false)
startNewChunk()
fileSize = 0
fileHasher = maker.config.NewFileHasher()
isEOF = false
}
} else {
endOfChunk(chunk, false)
startNewChunk()
return fileSize, hex.EncodeToString(fileHasher.Sum(nil))
}
}
@@ -189,7 +198,7 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
for {
// If the buffer still has some space left and EOF is not seen, read more data.
for maker.bufferSize < maker.bufferCapacity && !isEOF {
for maker.bufferSize < maker.bufferCapacity && !isEOF && reader != nil {
start := maker.bufferStart + maker.bufferSize
count := maker.bufferCapacity - start
if start >= maker.bufferCapacity {
@@ -201,7 +210,7 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
if err != nil && err != io.EOF {
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
return
return 0, ""
}
maker.bufferSize += count
@@ -210,54 +219,55 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
// if EOF is seen, try to switch to next file and continue
if err == io.EOF {
var ok bool
reader, ok = nextReader(fileSize, hex.EncodeToString(fileHasher.Sum(nil)))
if !ok {
isEOF = true
} else {
fileSize = 0
fileHasher = maker.config.NewFileHasher()
isEOF = false
}
isEOF = true
break
}
}
// No eough data to meet the minimum chunk size requirement, so just return as a chunk.
if maker.bufferSize < maker.minimumChunkSize {
fill(maker.bufferSize)
endOfChunk(chunk, true)
return
if reader == nil {
fill(maker.bufferSize)
if maker.chunk.GetLength() > 0 {
sendChunk(maker.chunk)
}
return 0, ""
} else if isEOF {
return fileSize, hex.EncodeToString(fileHasher.Sum(nil))
} else {
continue
}
}
// Minimum chunk size has been reached. Calculate the buzhash for the minimum size chunk.
if !minimumReached {
if !maker.minimumReached {
bytes := maker.minimumChunkSize
if maker.bufferStart+bytes < maker.bufferCapacity {
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:maker.bufferStart+bytes])
maker.hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:maker.bufferStart+bytes])
} else {
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:])
hashSum = maker.buzhashSum(hashSum,
maker.hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:])
maker.hashSum = maker.buzhashSum(maker.hashSum,
maker.buffer[:bytes-(maker.bufferCapacity-maker.bufferStart)])
}
if (hashSum & maker.hashMask) == 0 {
if (maker.hashSum & maker.hashMask) == 0 {
// This is a minimum size chunk
fill(bytes)
endOfChunk(chunk, false)
startNewChunk()
sendChunk(maker.chunk)
maker.startNewChunk()
continue
}
minimumReached = true
maker.minimumReached = true
}
// Now check the buzhash of the data in the buffer, shifting one byte at a time.
bytes := maker.bufferSize - maker.minimumChunkSize
isEOC := false
maxSize := maker.maximumChunkSize - chunk.GetLength()
for i := 0; i < maker.bufferSize-maker.minimumChunkSize; i++ {
isEOC := false // chunk boundary found
maxSize := maker.maximumChunkSize - maker.chunk.GetLength()
for i := 0; i < bytes; i++ {
out := maker.bufferStart + i
if out >= maker.bufferCapacity {
out -= maker.bufferCapacity
@@ -267,8 +277,8 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
in -= maker.bufferCapacity
}
hashSum = maker.buzhashUpdate(hashSum, maker.buffer[out], maker.buffer[in], maker.minimumChunkSize)
if (hashSum&maker.hashMask) == 0 || i == maxSize-maker.minimumChunkSize-1 {
maker.hashSum = maker.buzhashUpdate(maker.hashSum, maker.buffer[out], maker.buffer[in], maker.minimumChunkSize)
if (maker.hashSum&maker.hashMask) == 0 || i == maxSize-maker.minimumChunkSize-1 {
// A chunk is completed.
bytes = i + 1 + maker.minimumChunkSize
isEOC = true
@@ -277,21 +287,20 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
}
fill(bytes)
if isEOC {
if isEOF && maker.bufferSize == 0 {
endOfChunk(chunk, true)
return
sendChunk(maker.chunk)
maker.startNewChunk()
} else {
if reader == nil {
fill(maker.minimumChunkSize)
sendChunk(maker.chunk)
maker.startNewChunk()
return 0, ""
}
endOfChunk(chunk, false)
startNewChunk()
continue
}
if isEOF {
fill(maker.bufferSize)
endOfChunk(chunk, true)
return
return fileSize, hex.EncodeToString(fileHasher.Sum(nil))
}
}
}

View File

@@ -7,14 +7,12 @@ package duplicacy
import (
"bytes"
crypto_rand "crypto/rand"
"io"
"math/rand"
"sort"
"testing"
)
func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunkSize,
bufferCapacity int) ([]string, int) {
func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunkSize int) ([]string, int) {
config := CreateConfig()
@@ -27,14 +25,12 @@ func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunk
config.HashKey = DEFAULT_KEY
config.IDKey = DEFAULT_KEY
maker := CreateChunkMaker(config, false)
maker := CreateFileChunkMaker(config, false)
var chunks []string
totalChunkSize := 0
totalFileSize := int64(0)
//LOG_INFO("CHUNK_SPLIT", "bufferCapacity: %d", bufferCapacity)
buffers := make([]*bytes.Buffer, n)
sizes := make([]int, n)
sizes[0] = 0
@@ -42,7 +38,7 @@ func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunk
same := true
for same {
same = false
sizes[i] = rand.Int() % n
sizes[i] = rand.Int() % len(content)
for j := 0; j < i; j++ {
if sizes[i] == sizes[j] {
same = true
@@ -59,22 +55,17 @@ func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunk
}
buffers[n-1] = bytes.NewBuffer(content[sizes[n-1]:])
i := 0
chunkFunc := func(chunk *Chunk) {
chunks = append(chunks, chunk.GetHash())
totalChunkSize += chunk.GetLength()
config.PutChunk(chunk)
}
maker.ForEachChunk(buffers[0],
func(chunk *Chunk, final bool) {
//LOG_INFO("CHUNK_SPLIT", "i: %d, chunk: %s, size: %d", i, chunk.GetHash(), size)
chunks = append(chunks, chunk.GetHash())
totalChunkSize += chunk.GetLength()
},
func(size int64, hash string) (io.Reader, bool) {
totalFileSize += size
i++
if i >= len(buffers) {
return nil, false
}
return buffers[i], true
})
for _, buffer := range buffers {
fileSize, _ := maker.AddData(buffer, chunkFunc)
totalFileSize += fileSize
}
maker.AddData(nil, chunkFunc)
if totalFileSize != int64(totalChunkSize) {
LOG_ERROR("CHUNK_SPLIT", "total chunk size: %d, total file size: %d", totalChunkSize, totalFileSize)
@@ -96,35 +87,28 @@ func TestChunkMaker(t *testing.T) {
continue
}
chunkArray1, totalSize1 := splitIntoChunks(content, 10, 32, 64, 16, 32)
chunkArray1, totalSize1 := splitIntoChunks(content, 10, 32, 64, 16)
capacities := [...]int{32, 33, 34, 61, 62, 63, 64, 65, 66, 126, 127, 128, 129, 130,
255, 256, 257, 511, 512, 513, 1023, 1024, 1025,
32, 48, 64, 128, 256, 512, 1024, 2048}
//capacities := [...]int { 32 }
for _, n := range [...]int{6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} {
chunkArray2, totalSize2 := splitIntoChunks(content, n, 32, 64, 16)
for _, capacity := range capacities {
if totalSize1 != totalSize2 {
t.Errorf("[size %d] total size is %d instead of %d",
size, totalSize2, totalSize1)
}
for _, n := range [...]int{6, 7, 8, 9, 10} {
chunkArray2, totalSize2 := splitIntoChunks(content, n, 32, 64, 16, capacity)
if totalSize1 != totalSize2 {
t.Errorf("[size %d, capacity %d] total size is %d instead of %d",
size, capacity, totalSize2, totalSize1)
}
if len(chunkArray1) != len(chunkArray2) {
t.Errorf("[size %d, capacity %d] number of chunks is %d instead of %d",
size, capacity, len(chunkArray2), len(chunkArray1))
} else {
for i := 0; i < len(chunkArray1); i++ {
if chunkArray1[i] != chunkArray2[i] {
t.Errorf("[size %d, capacity %d, chunk %d] chunk is different", size, capacity, i)
}
if len(chunkArray1) != len(chunkArray2) {
t.Errorf("[size %d] number of chunks is %d instead of %d",
size, len(chunkArray2), len(chunkArray1))
} else {
for i := 0; i < len(chunkArray1); i++ {
if chunkArray1[i] != chunkArray2[i] {
t.Errorf("[size %d, chunk %d] chunk is different", size, i)
}
}
}
}
}

View File

@@ -5,6 +5,7 @@
package duplicacy
import (
"io"
"sync"
"sync/atomic"
"time"
@@ -12,42 +13,69 @@ import (
// These are operations that ChunkOperator will perform.
const (
ChunkOperationFind = 0
ChunkOperationDelete = 1
ChunkOperationFossilize = 2
ChunkOperationResurrect = 3
ChunkOperationDownload = 0
ChunkOperationUpload = 1
ChunkOperationDelete = 2
ChunkOperationFossilize = 3
ChunkOperationResurrect = 4
ChunkOperationFind = 5
)
// ChunkOperatorTask is used to pass parameters for different kinds of chunk operations.
type ChunkOperatorTask struct {
operation int // The type of operation
chunkID string // The chunk id
filePath string // The path of the chunk file; it may be empty
// ChunkTask is used to pass parameters for different kinds of chunk operations.
type ChunkTask struct {
operation int // The type of operation
chunkID string // The chunk id
chunkHash string // The chunk hash
chunkIndex int // The chunk index
filePath string // The path of the chunk file; it may be empty
isMetadata bool
chunk *Chunk
completionFunc func(chunk *Chunk, chunkIndex int)
}
// ChunkOperator is capable of performing multi-threaded operations on chunks.
type ChunkOperator struct {
numberOfActiveTasks int64 // The number of chunks that are being operated on
storage Storage // This storage
threads int // Number of threads
taskQueue chan ChunkOperatorTask // Operating goroutines are waiting on this channel for input
stopChannel chan bool // Used to stop all the goroutines
config *Config // Associated config
storage Storage // This storage
snapshotCache *FileStorage
showStatistics bool
threads int // Number of threads
taskQueue chan ChunkTask // Operating goroutines are waiting on this channel for input
stopChannel chan bool // Used to stop all the goroutines
fossils []string // For fossilize operation, the paths of the fossils are stored in this slice
fossilsLock *sync.Mutex // The lock for 'fossils'
numberOfActiveTasks int64 // The number of chunks that are being operated on
fossils []string // For fossilize operation, the paths of the fossils are stored in this slice
collectionLock *sync.Mutex // The lock for accessing 'fossils'
startTime int64 // The time it starts downloading
totalChunkSize int64 // Total chunk size
downloadedChunkSize int64 // Downloaded chunk size
allowFailures bool // Whether to fail on download error, or continue
NumberOfFailedChunks int64 // The number of chunks that can't be downloaded
UploadCompletionFunc func(chunk *Chunk, chunkIndex int, inCache bool, chunkSize int, uploadSize int)
}
// CreateChunkOperator creates a new ChunkOperator.
func CreateChunkOperator(storage Storage, threads int) *ChunkOperator {
func CreateChunkOperator(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int, allowFailures bool) *ChunkOperator {
operator := &ChunkOperator{
config: config,
storage: storage,
snapshotCache: snapshotCache,
showStatistics: showStatistics,
threads: threads,
taskQueue: make(chan ChunkOperatorTask, threads*4),
taskQueue: make(chan ChunkTask, threads),
stopChannel: make(chan bool),
fossils: make([]string, 0),
fossilsLock: &sync.Mutex{},
collectionLock: &sync.Mutex{},
startTime: time.Now().Unix(),
allowFailures: allowFailures,
}
// Start the operator goroutines
@@ -84,38 +112,78 @@ func (operator *ChunkOperator) Stop() {
atomic.AddInt64(&operator.numberOfActiveTasks, int64(-1))
}
func (operator *ChunkOperator) AddTask(operation int, chunkID string, filePath string) {
func (operator *ChunkOperator) WaitForCompletion() {
task := ChunkOperatorTask{
operation: operation,
chunkID: chunkID,
filePath: filePath,
for atomic.LoadInt64(&operator.numberOfActiveTasks) > 0 {
time.Sleep(100 * time.Millisecond)
}
operator.taskQueue <- task
atomic.AddInt64(&operator.numberOfActiveTasks, int64(1))
}
func (operator *ChunkOperator) Find(chunkID string) {
operator.AddTask(ChunkOperationFind, chunkID, "")
func (operator *ChunkOperator) AddTask(operation int, chunkID string, chunkHash string, filePath string, chunkIndex int, chunk *Chunk, isMetadata bool, completionFunc func(*Chunk, int)) {
task := ChunkTask {
operation: operation,
chunkID: chunkID,
chunkHash: chunkHash,
chunkIndex: chunkIndex,
filePath: filePath,
chunk: chunk,
isMetadata: isMetadata,
completionFunc: completionFunc,
}
operator.taskQueue <- task
atomic.AddInt64(&operator.numberOfActiveTasks, int64(1))
return
}
func (operator *ChunkOperator) Download(chunkHash string, chunkIndex int, isMetadata bool) *Chunk {
chunkID := operator.config.GetChunkIDFromHash(chunkHash)
completionChannel := make(chan *Chunk)
completionFunc := func(chunk *Chunk, chunkIndex int) {
completionChannel <- chunk
}
operator.AddTask(ChunkOperationDownload, chunkID, chunkHash, "", chunkIndex, nil, isMetadata, completionFunc)
return <- completionChannel
}
func (operator *ChunkOperator) DownloadAsync(chunkHash string, chunkIndex int, isMetadata bool, completionFunc func(*Chunk, int)) {
chunkID := operator.config.GetChunkIDFromHash(chunkHash)
operator.AddTask(ChunkOperationDownload, chunkID, chunkHash, "", chunkIndex, nil, isMetadata, completionFunc)
}
func (operator *ChunkOperator) Upload(chunk *Chunk, chunkIndex int, isMetadata bool) {
chunkHash := chunk.GetHash()
chunkID := operator.config.GetChunkIDFromHash(chunkHash)
operator.AddTask(ChunkOperationUpload, chunkID, chunkHash, "", chunkIndex, chunk, isMetadata, nil)
}
func (operator *ChunkOperator) Delete(chunkID string, filePath string) {
operator.AddTask(ChunkOperationDelete, chunkID, filePath)
operator.AddTask(ChunkOperationDelete, chunkID, "", filePath, 0, nil, false, nil)
}
func (operator *ChunkOperator) Fossilize(chunkID string, filePath string) {
operator.AddTask(ChunkOperationFossilize, chunkID, filePath)
operator.AddTask(ChunkOperationFossilize, chunkID, "", filePath, 0, nil, false, nil)
}
func (operator *ChunkOperator) Resurrect(chunkID string, filePath string) {
operator.AddTask(ChunkOperationResurrect, chunkID, filePath)
operator.AddTask(ChunkOperationResurrect, chunkID, "", filePath, 0, nil, false, nil)
}
func (operator *ChunkOperator) Run(threadIndex int, task ChunkOperatorTask) {
func (operator *ChunkOperator) Run(threadIndex int, task ChunkTask) {
defer func() {
atomic.AddInt64(&operator.numberOfActiveTasks, int64(-1))
}()
if task.operation == ChunkOperationDownload {
operator.DownloadChunk(threadIndex, task)
return
} else if task.operation == ChunkOperationUpload {
operator.UploadChunk(threadIndex, task)
return
}
// task.filePath may be empty. If so, find the chunk first.
if task.operation == ChunkOperationDelete || task.operation == ChunkOperationFossilize {
if task.filePath == "" {
@@ -132,9 +200,9 @@ func (operator *ChunkOperator) Run(threadIndex int, task ChunkOperatorTask) {
fossilPath, exist, _, _ := operator.storage.FindChunk(threadIndex, task.chunkID, true)
if exist {
LOG_WARN("CHUNK_FOSSILIZE", "Chunk %s is already a fossil", task.chunkID)
operator.fossilsLock.Lock()
operator.collectionLock.Lock()
operator.fossils = append(operator.fossils, fossilPath)
operator.fossilsLock.Unlock()
operator.collectionLock.Unlock()
} else {
LOG_ERROR("CHUNK_FIND", "Chunk %s does not exist in the storage", task.chunkID)
}
@@ -175,17 +243,17 @@ func (operator *ChunkOperator) Run(threadIndex int, task ChunkOperatorTask) {
if err == nil {
LOG_TRACE("CHUNK_DELETE", "Deleted chunk file %s as the fossil already exists", task.chunkID)
}
operator.fossilsLock.Lock()
operator.collectionLock.Lock()
operator.fossils = append(operator.fossils, fossilPath)
operator.fossilsLock.Unlock()
operator.collectionLock.Unlock()
} else {
LOG_ERROR("CHUNK_DELETE", "Failed to fossilize the chunk %s: %v", task.chunkID, err)
}
} else {
LOG_TRACE("CHUNK_FOSSILIZE", "The chunk %s has been marked as a fossil", task.chunkID)
operator.fossilsLock.Lock()
operator.collectionLock.Lock()
operator.fossils = append(operator.fossils, fossilPath)
operator.fossilsLock.Unlock()
operator.collectionLock.Unlock()
}
} else if task.operation == ChunkOperationResurrect {
chunkPath, exist, _, err := operator.storage.FindChunk(threadIndex, task.chunkID, false)
@@ -207,3 +275,267 @@ func (operator *ChunkOperator) Run(threadIndex int, task ChunkOperatorTask) {
}
}
}
// Download downloads a chunk from the storage.
func (operator *ChunkOperator) DownloadChunk(threadIndex int, task ChunkTask) {
cachedPath := ""
chunk := operator.config.GetChunk()
chunk.isMetadata = task.isMetadata
chunkID := task.chunkID
defer func() {
if chunk != nil {
operator.config.PutChunk(chunk)
}
} ()
if task.isMetadata && operator.snapshotCache != nil {
var exist bool
var err error
// Reset the chunk with a hasher -- we're reading from the cache where chunk are not encrypted or compressed
chunk.Reset(true)
cachedPath, exist, _, err = operator.snapshotCache.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
} else if exist {
err = operator.snapshotCache.DownloadFile(0, cachedPath, chunk)
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to load the chunk %s from the snapshot cache: %v", chunkID, err)
} else {
actualChunkID := chunk.GetID()
if actualChunkID != chunkID {
LOG_WARN("DOWNLOAD_CACHE_CORRUPTED",
"The chunk %s load from the snapshot cache has a hash id of %s", chunkID, actualChunkID)
} else {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been loaded from the snapshot cache", chunkID)
task.completionFunc(chunk, task.chunkIndex)
chunk = nil
return
}
}
}
}
// Reset the chunk without a hasher -- the downloaded content will be encrypted and/or compressed and the hasher
// will be set up before the encryption
chunk.Reset(false)
chunk.isMetadata = task.isMetadata
// If failures are allowed, complete the task properly
completeFailedChunk := func() {
atomic.AddInt64(&operator.NumberOfFailedChunks, 1)
if operator.allowFailures {
task.completionFunc(chunk, task.chunkIndex)
}
}
const MaxDownloadAttempts = 3
for downloadAttempt := 0; ; downloadAttempt++ {
// Find the chunk by ID first.
chunkPath, exist, _, err := operator.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return
}
if !exist {
// No chunk is found. Have to find it in the fossil pool again.
fossilPath, exist, _, err := operator.storage.FindChunk(threadIndex, chunkID, true)
if err != nil {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return
}
if !exist {
retry := false
// Retry for Hubic or WebDAV as it may return 404 even when the chunk exists
if _, ok := operator.storage.(*HubicStorage); ok {
retry = true
}
if _, ok := operator.storage.(*WebDAVStorage); ok {
retry = true
}
if retry && downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to find the chunk %s; retrying", chunkID)
continue
}
// A chunk is not found. This is a serious error and hopefully it will never happen.
completeFailedChunk()
if err != nil {
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
} else {
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
}
return
}
// We can't download the fossil directly. We have to turn it back into a regular chunk and try
// downloading again.
err = operator.storage.MoveFile(threadIndex, fossilPath, chunkPath)
if err != nil {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Failed to resurrect chunk %s: %v", chunkID, err)
return
}
LOG_WARN("DOWNLOAD_RESURRECT", "Fossil %s has been resurrected", chunkID)
continue
}
err = operator.storage.DownloadFile(threadIndex, chunkPath, chunk)
if err != nil {
_, isHubic := operator.storage.(*HubicStorage)
// Retry on EOF or if it is a Hubic backend as it may return 404 even when the chunk exists
if (err == io.ErrUnexpectedEOF || isHubic) && downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to download the chunk %s: %v; retrying", chunkID, err)
chunk.Reset(false)
chunk.isMetadata = task.isMetadata
continue
} else {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CHUNK", "Failed to download the chunk %s: %v", chunkID, err)
return
}
}
err = chunk.Decrypt(operator.config.ChunkKey, task.chunkHash)
if err != nil {
if downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "Failed to decrypt the chunk %s: %v; retrying", chunkID, err)
chunk.Reset(false)
chunk.isMetadata = task.isMetadata
continue
} else {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_DECRYPT", "Failed to decrypt the chunk %s: %v", chunkID, err)
return
}
}
actualChunkID := chunk.GetID()
if actualChunkID != chunkID {
if downloadAttempt < MaxDownloadAttempts {
LOG_WARN("DOWNLOAD_RETRY", "The chunk %s has a hash id of %s; retrying", chunkID, actualChunkID)
chunk.Reset(false)
chunk.isMetadata = task.isMetadata
continue
} else {
completeFailedChunk()
LOG_WERROR(operator.allowFailures, "DOWNLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
return
}
}
break
}
if chunk.isMetadata && len(cachedPath) > 0 {
// Save a copy to the local snapshot cache
err := operator.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes())
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to add the chunk %s to the snapshot cache: %v", chunkID, err)
}
}
downloadedChunkSize := atomic.AddInt64(&operator.downloadedChunkSize, int64(chunk.GetLength()))
if (operator.showStatistics || IsTracing()) && operator.totalChunkSize > 0 {
now := time.Now().Unix()
if now <= operator.startTime {
now = operator.startTime + 1
}
speed := downloadedChunkSize / (now - operator.startTime)
remainingTime := int64(0)
if speed > 0 {
remainingTime = (operator.totalChunkSize-downloadedChunkSize)/speed + 1
}
percentage := float32(downloadedChunkSize * 1000 / operator.totalChunkSize)
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
task.chunkIndex+1, chunk.GetLength(),
PrettySize(speed), PrettyTime(remainingTime), percentage/10)
} else {
LOG_DEBUG("CHUNK_DOWNLOAD", "Chunk %s has been downloaded", chunkID)
}
task.completionFunc(chunk, task.chunkIndex)
chunk = nil
return
}
// UploadChunk is called by the task goroutines to perform the actual uploading
func (operator *ChunkOperator) UploadChunk(threadIndex int, task ChunkTask) bool {
chunk := task.chunk
chunkID := task.chunkID
chunkSize := chunk.GetLength()
// For a snapshot chunk, verify that its chunk id is correct
if task.isMetadata {
chunk.VerifyID()
}
if task.isMetadata && operator.snapshotCache != nil && operator.storage.IsCacheNeeded() {
// Save a copy to the local snapshot.
chunkPath, exist, _, err := operator.snapshotCache.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_WARN("UPLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
} else if exist {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s already exists in the snapshot cache", chunkID)
} else if err = operator.snapshotCache.UploadFile(threadIndex, chunkPath, chunk.GetBytes()); err != nil {
LOG_WARN("UPLOAD_CACHE", "Failed to save the chunk %s to the snapshot cache: %v", chunkID, err)
} else {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been saved to the snapshot cache", chunkID)
}
}
// This returns the path the chunk file should be at.
chunkPath, exist, _, err := operator.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to find the path for the chunk %s: %v", chunkID, err)
return false
}
if exist {
// Chunk deduplication by name in effect here.
LOG_DEBUG("CHUNK_DUPLICATE", "Chunk %s already exists", chunkID)
operator.UploadCompletionFunc(chunk, task.chunkIndex, false, chunkSize, 0)
return false
}
// Encrypt the chunk only after we know that it must be uploaded.
err = chunk.Encrypt(operator.config.ChunkKey, chunk.GetHash(), task.isMetadata)
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to encrypt the chunk %s: %v", chunkID, err)
return false
}
if !operator.config.dryRun {
err = operator.storage.UploadFile(threadIndex, chunkPath, chunk.GetBytes())
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to upload the chunk %s: %v", chunkID, err)
return false
}
LOG_DEBUG("CHUNK_UPLOAD", "Chunk %s has been uploaded", chunkID)
} else {
LOG_DEBUG("CHUNK_UPLOAD", "Uploading was skipped for chunk %s", chunkID)
}
operator.UploadCompletionFunc(chunk, task.chunkIndex, false, chunkSize, chunk.GetLength())
return true
}

View File

@@ -15,11 +15,11 @@ import (
"math/rand"
)
func TestUploaderAndDownloader(t *testing.T) {
func TestChunkOperator(t *testing.T) {
rand.Seed(time.Now().UnixNano())
setTestingT(t)
SetLoggingLevel(INFO)
SetLoggingLevel(DEBUG)
defer func() {
if r := recover(); r != nil {
@@ -38,7 +38,7 @@ func TestUploaderAndDownloader(t *testing.T) {
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
t.Logf("storage: %s", testStorageName)
t.Logf("storage: %s", *testStorageName)
storage, err := loadStorage(testDir, 1)
if err != nil {
@@ -46,7 +46,7 @@ func TestUploaderAndDownloader(t *testing.T) {
return
}
storage.EnableTestMode()
storage.SetRateLimits(testRateLimit, testRateLimit)
storage.SetRateLimits(*testRateLimit, *testRateLimit)
for _, dir := range []string{"chunks", "snapshots"} {
err = storage.CreateDirectory(0, dir)
@@ -59,7 +59,7 @@ func TestUploaderAndDownloader(t *testing.T) {
numberOfChunks := 100
maxChunkSize := 64 * 1024
if testQuickMode {
if *testQuickMode {
numberOfChunks = 10
}
@@ -87,35 +87,25 @@ func TestUploaderAndDownloader(t *testing.T) {
totalFileSize += chunk.GetLength()
}
completionFunc := func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
chunkOperator := CreateChunkOperator(config, storage, nil, false, *testThreads, false)
chunkOperator.UploadCompletionFunc = func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
t.Logf("Chunk %s size %d (%d/%d) uploaded", chunk.GetID(), chunkSize, chunkIndex, len(chunks))
}
chunkUploader := CreateChunkUploader(config, storage, nil, testThreads, nil)
chunkUploader.completionFunc = completionFunc
chunkUploader.Start()
for i, chunk := range chunks {
chunkUploader.StartChunk(chunk, i)
chunkOperator.Upload(chunk, i, false)
}
chunkUploader.Stop()
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads, false)
chunkDownloader.totalChunkSize = int64(totalFileSize)
for _, chunk := range chunks {
chunkDownloader.AddChunk(chunk.GetHash())
}
chunkOperator.WaitForCompletion()
for i, chunk := range chunks {
downloaded := chunkDownloader.WaitForChunk(i)
downloaded := chunkOperator.Download(chunk.GetHash(), i, false)
if downloaded.GetID() != chunk.GetID() {
t.Errorf("Uploaded: %s, downloaded: %s", chunk.GetID(), downloaded.GetID())
}
}
chunkDownloader.Stop()
chunkOperator.Stop()
for _, file := range listChunks(storage) {
err = storage.DeleteFile(0, "chunks/"+file)

View File

@@ -1,151 +0,0 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"sync/atomic"
"time"
)
// ChunkUploadTask represents a chunk to be uploaded.
type ChunkUploadTask struct {
chunk *Chunk
chunkIndex int
}
// ChunkUploader uploads chunks to the storage using one or more uploading goroutines. Chunks are added
// by the call to StartChunk(), and then passed to the uploading goroutines. The completion function is
// called when the downloading is completed. Note that ChunkUploader does not release chunks to the
// chunk pool; instead
type ChunkUploader struct {
config *Config // Associated config
storage Storage // Download from this storage
snapshotCache *FileStorage // Used as cache if not nil; usually for uploading snapshot chunks
threads int // Number of uploading goroutines
taskQueue chan ChunkUploadTask // Uploading goroutines are listening on this channel for upload jobs
stopChannel chan bool // Used to terminate uploading goroutines
numberOfUploadingTasks int32 // The number of uploading tasks
// Uploading goroutines call this function after having downloaded chunks
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)
}
// CreateChunkUploader creates a chunk uploader.
func CreateChunkUploader(config *Config, storage Storage, snapshotCache *FileStorage, threads int,
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)) *ChunkUploader {
uploader := &ChunkUploader{
config: config,
storage: storage,
snapshotCache: snapshotCache,
threads: threads,
taskQueue: make(chan ChunkUploadTask, 1),
stopChannel: make(chan bool),
completionFunc: completionFunc,
}
return uploader
}
// Starts starts uploading goroutines.
func (uploader *ChunkUploader) Start() {
for i := 0; i < uploader.threads; i++ {
go func(threadIndex int) {
defer CatchLogException()
for {
select {
case task := <-uploader.taskQueue:
uploader.Upload(threadIndex, task)
case <-uploader.stopChannel:
return
}
}
}(i)
}
}
// StartChunk sends a chunk to be uploaded to a waiting uploading goroutine. It may block if all uploading goroutines are busy.
func (uploader *ChunkUploader) StartChunk(chunk *Chunk, chunkIndex int) {
atomic.AddInt32(&uploader.numberOfUploadingTasks, 1)
uploader.taskQueue <- ChunkUploadTask{
chunk: chunk,
chunkIndex: chunkIndex,
}
}
// Stop stops all uploading goroutines.
func (uploader *ChunkUploader) Stop() {
for atomic.LoadInt32(&uploader.numberOfUploadingTasks) > 0 {
time.Sleep(100 * time.Millisecond)
}
for i := 0; i < uploader.threads; i++ {
uploader.stopChannel <- false
}
}
// Upload is called by the uploading goroutines to perform the actual uploading
func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) bool {
chunk := task.chunk
chunkSize := chunk.GetLength()
chunkID := chunk.GetID()
// For a snapshot chunk, verify that its chunk id is correct
if uploader.snapshotCache != nil {
chunk.VerifyID()
}
if uploader.snapshotCache != nil && uploader.storage.IsCacheNeeded() {
// Save a copy to the local snapshot.
chunkPath, exist, _, err := uploader.snapshotCache.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_WARN("UPLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
} else if exist {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s already exists in the snapshot cache", chunkID)
} else if err = uploader.snapshotCache.UploadFile(threadIndex, chunkPath, chunk.GetBytes()); err != nil {
LOG_WARN("UPLOAD_CACHE", "Failed to save the chunk %s to the snapshot cache: %v", chunkID, err)
} else {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been saved to the snapshot cache", chunkID)
}
}
// This returns the path the chunk file should be at.
chunkPath, exist, _, err := uploader.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to find the path for the chunk %s: %v", chunkID, err)
return false
}
if exist {
// Chunk deduplication by name in effect here.
LOG_DEBUG("CHUNK_DUPLICATE", "Chunk %s already exists", chunkID)
uploader.completionFunc(chunk, task.chunkIndex, true, chunkSize, 0)
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
return false
}
// Encrypt the chunk only after we know that it must be uploaded.
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash(), uploader.snapshotCache != nil)
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to encrypt the chunk %s: %v", chunkID, err)
return false
}
if !uploader.config.dryRun {
err = uploader.storage.UploadFile(threadIndex, chunkPath, chunk.GetBytes())
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to upload the chunk %s: %v", chunkID, err)
return false
}
LOG_DEBUG("CHUNK_UPLOAD", "Chunk %s has been uploaded", chunkID)
} else {
LOG_DEBUG("CHUNK_UPLOAD", "Uploading was skipped for chunk %s", chunkID)
}
uploader.completionFunc(chunk, task.chunkIndex, false, chunkSize, chunk.GetLength())
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
return true
}

View File

@@ -21,11 +21,11 @@ type DropboxStorage struct {
}
// CreateDropboxStorage creates a dropbox storage object.
func CreateDropboxStorage(accessToken string, storageDir string, minimumNesting int, threads int) (storage *DropboxStorage, err error) {
func CreateDropboxStorage(refreshToken string, storageDir string, minimumNesting int, threads int) (storage *DropboxStorage, err error) {
var clients []*dropbox.Files
for i := 0; i < threads; i++ {
client := dropbox.NewFiles(dropbox.NewConfig(accessToken))
client := dropbox.NewFiles(dropbox.NewConfig("", refreshToken, "https://duplicacy.com/dropbox_refresh"))
clients = append(clients, client)
}

View File

@@ -16,6 +16,11 @@ import (
"strconv"
"strings"
"time"
"bytes"
"crypto/sha256"
"github.com/vmihailenco/msgpack"
)
// This is the hidden directory in the repository for storing various files.
@@ -45,7 +50,7 @@ type Entry struct {
EndChunk int
EndOffset int
Attributes map[string][]byte
Attributes *map[string][]byte
}
// CreateEntry creates an entry from file properties.
@@ -93,6 +98,27 @@ func CreateEntryFromFileInfo(fileInfo os.FileInfo, directory string) *Entry {
return entry
}
func (entry *Entry) Copy() *Entry {
return &Entry{
Path: entry.Path,
Size: entry.Size,
Time: entry.Time,
Mode: entry.Mode,
Link: entry.Link,
Hash: entry.Hash,
UID: entry.UID,
GID: entry.GID,
StartChunk: entry.StartChunk,
StartOffset: entry.StartOffset,
EndChunk: entry.EndChunk,
EndOffset: entry.EndOffset,
Attributes: entry.Attributes,
}
}
// CreateEntryFromJSON creates an entry from a json description.
func (entry *Entry) UnmarshalJSON(description []byte) (err error) {
@@ -175,17 +201,17 @@ func (entry *Entry) UnmarshalJSON(description []byte) (err error) {
if attributes, ok := value.(map[string]interface{}); !ok {
return fmt.Errorf("Attributes are invalid for file '%s' in the snapshot", entry.Path)
} else {
entry.Attributes = make(map[string][]byte)
entry.Attributes = &map[string][]byte{}
for name, object := range attributes {
if object == nil {
entry.Attributes[name] = []byte("")
(*entry.Attributes)[name] = []byte("")
} else if attributeInBase64, ok := object.(string); !ok {
return fmt.Errorf("Attribute '%s' is invalid for file '%s' in the snapshot", name, entry.Path)
} else if attribute, err := base64.StdEncoding.DecodeString(attributeInBase64); err != nil {
return fmt.Errorf("Failed to decode attribute '%s' for file '%s' in the snapshot: %v",
name, entry.Path, err)
} else {
entry.Attributes[name] = attribute
(*entry.Attributes)[name] = attribute
}
}
}
@@ -244,7 +270,7 @@ func (entry *Entry) convertToObject(encodeName bool) map[string]interface{} {
object["gid"] = entry.GID
}
if len(entry.Attributes) > 0 {
if entry.Attributes != nil && len(*entry.Attributes) > 0 {
object["attributes"] = entry.Attributes
}
@@ -259,6 +285,197 @@ func (entry *Entry) MarshalJSON() ([]byte, error) {
return description, err
}
var _ msgpack.CustomEncoder = (*Entry)(nil)
var _ msgpack.CustomDecoder = (*Entry)(nil)
func (entry *Entry) EncodeMsgpack(encoder *msgpack.Encoder) error {
err := encoder.EncodeString(entry.Path)
if err != nil {
return err
}
err = encoder.EncodeInt(entry.Size)
if err != nil {
return err
}
err = encoder.EncodeInt(entry.Time)
if err != nil {
return err
}
err = encoder.EncodeInt(int64(entry.Mode))
if err != nil {
return err
}
err = encoder.EncodeString(entry.Link)
if err != nil {
return err
}
err = encoder.EncodeString(entry.Hash)
if err != nil {
return err
}
err = encoder.EncodeInt(int64(entry.StartChunk))
if err != nil {
return err
}
err = encoder.EncodeInt(int64(entry.StartOffset))
if err != nil {
return err
}
err = encoder.EncodeInt(int64(entry.EndChunk))
if err != nil {
return err
}
err = encoder.EncodeInt(int64(entry.EndOffset))
if err != nil {
return err
}
err = encoder.EncodeInt(int64(entry.UID))
if err != nil {
return err
}
err = encoder.EncodeInt(int64(entry.GID))
if err != nil {
return err
}
var numberOfAttributes int64
if entry.Attributes != nil {
numberOfAttributes = int64(len(*entry.Attributes))
}
err = encoder.EncodeInt(numberOfAttributes)
if err != nil {
return err
}
if entry.Attributes != nil {
attributes := make([]string, numberOfAttributes)
i := 0
for attribute := range *entry.Attributes {
attributes[i] = attribute
i++
}
sort.Strings(attributes)
for _, attribute := range attributes {
err = encoder.EncodeString(attribute)
if err != nil {
return err
}
err = encoder.EncodeString(string((*entry.Attributes)[attribute]))
if err != nil {
return err
}
}
}
return nil
}
func (entry *Entry) DecodeMsgpack(decoder *msgpack.Decoder) error {
var err error
entry.Path, err = decoder.DecodeString()
if err != nil {
return err
}
entry.Size, err = decoder.DecodeInt64()
if err != nil {
return err
}
entry.Time, err = decoder.DecodeInt64()
if err != nil {
return err
}
mode, err := decoder.DecodeInt64()
if err != nil {
return err
}
entry.Mode = uint32(mode)
entry.Link, err = decoder.DecodeString()
if err != nil {
return err
}
entry.Hash, err = decoder.DecodeString()
if err != nil {
return err
}
startChunk, err := decoder.DecodeInt()
if err != nil {
return err
}
entry.StartChunk = int(startChunk)
startOffset, err := decoder.DecodeInt()
if err != nil {
return err
}
entry.StartOffset = int(startOffset)
endChunk, err := decoder.DecodeInt()
if err != nil {
return err
}
entry.EndChunk = int(endChunk)
endOffset, err := decoder.DecodeInt()
if err != nil {
return err
}
entry.EndOffset = int(endOffset)
uid, err := decoder.DecodeInt()
if err != nil {
return err
}
entry.UID = int(uid)
gid, err := decoder.DecodeInt()
if err != nil {
return err
}
entry.GID = int(gid)
numberOfAttributes, err := decoder.DecodeInt()
if err != nil {
return err
}
if numberOfAttributes > 0 {
entry.Attributes = &map[string][]byte{}
for i := 0; i < numberOfAttributes; i++ {
attribute, err := decoder.DecodeString()
if err != nil {
return err
}
value, err := decoder.DecodeString()
if err != nil {
return err
}
(*entry.Attributes)[attribute] = []byte(value)
}
}
return nil
}
func (entry *Entry) IsFile() bool {
return entry.Mode&uint32(os.ModeType) == 0
}
@@ -271,10 +488,27 @@ func (entry *Entry) IsLink() bool {
return entry.Mode&uint32(os.ModeSymlink) != 0
}
func (entry *Entry) IsComplete() bool {
return entry.Size >= 0
}
func (entry *Entry) GetPermissions() os.FileMode {
return os.FileMode(entry.Mode) & fileModeMask
}
func (entry *Entry) GetParent() string {
path := entry.Path
if path != "" && path[len(path) - 1] == '/' {
path = path[:len(path) - 1]
}
i := strings.LastIndex(path, "/")
if i == -1 {
return ""
} else {
return path[:i]
}
}
func (entry *Entry) IsSameAs(other *Entry) bool {
return entry.Size == other.Size && entry.Time <= other.Time+1 && entry.Time >= other.Time-1
}
@@ -326,7 +560,7 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setO
}
}
if len(entry.Attributes) > 0 {
if entry.Attributes != nil && len(*entry.Attributes) > 0 {
entry.SetAttributesToFile(fullPath)
}
@@ -335,47 +569,62 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setO
// Return -1 if 'left' should appear before 'right', 1 if opposite, and 0 if they are the same.
// Files are always arranged before subdirectories under the same parent directory.
func (left *Entry) Compare(right *Entry) int {
path1 := left.Path
path2 := right.Path
func ComparePaths(left string, right string) int {
p := 0
for ; p < len(path1) && p < len(path2); p++ {
if path1[p] != path2[p] {
for ; p < len(left) && p < len(right); p++ {
if left[p] != right[p] {
break
}
}
// c1, c2 is the first byte that differs
// c1, c2 are the first bytes that differ
var c1, c2 byte
if p < len(path1) {
c1 = path1[p]
if p < len(left) {
c1 = left[p]
}
if p < len(path2) {
c2 = path2[p]
if p < len(right) {
c2 = right[p]
}
// c3, c4 indicates how the current component ends
// c3 == '/': the current component is a directory
// c3 != '/': the current component is the last one
// c3, c4 indicate how the current component ends
// c3 == '/': the current component is a directory; c3 != '/': the current component is the last one
c3 := c1
for i := p; c3 != '/' && i < len(path1); i++ {
c3 = path1[i]
// last1, last2 means if the current compoent is the last component
last1 := true
for i := p; i < len(left); i++ {
c3 = left[i]
if c3 == '/' {
last1 = i == len(left) - 1
break
}
}
c4 := c2
for i := p; c4 != '/' && i < len(path2); i++ {
c4 = path2[i]
last2 := true
for i := p; i < len(right); i++ {
c4 = right[i]
if c4 == '/' {
last2 = i == len(right) - 1
break
}
}
if last1 != last2 {
if last1 {
return -1
} else {
return 1
}
}
if c3 == '/' {
if c4 == '/' {
// We are comparing two directory components
if c1 == '/' {
// left is shorter
// Note that c2 maybe smaller than c1 but c1 is '/' which is counted
// as 0
// left is shorter; note that c2 maybe smaller than c1 but c1 should be treated as 0 therefore
// this is a special case that must be handled separately
return -1
} else if c2 == '/' {
// right is shorter
@@ -397,6 +646,10 @@ func (left *Entry) Compare(right *Entry) int {
}
}
func (left *Entry) Compare(right *Entry) int {
return ComparePaths(left.Path, right.Path)
}
// This is used to sort entries by their names.
type ByName []*Entry
@@ -443,7 +696,7 @@ func (files FileInfoCompare) Less(i, j int) bool {
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files.
func ListEntries(top string, path string, fileList *[]*Entry, patterns []string, nobackupFile string, discardAttributes bool, excludeByAttribute bool) (directoryList []*Entry,
func ListEntries(top string, path string, patterns []string, nobackupFile string, excludeByAttribute bool, listingChannel chan *Entry) (directoryList []*Entry,
skippedFiles []string, err error) {
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
@@ -478,8 +731,6 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
sort.Sort(FileInfoCompare(files))
entries := make([]*Entry, 0, 4)
for _, f := range files {
if f.Name() == DUPLICACY_DIRECTORY {
continue
@@ -520,11 +771,9 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
}
}
if !discardAttributes {
entry.ReadAttributes(top)
}
entry.ReadAttributes(top)
if excludeByAttribute && excludedByAttribute(entry.Attributes) {
if excludeByAttribute && entry.Attributes != nil && excludedByAttribute(*entry.Attributes) {
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded by attribute", entry.Path)
continue
}
@@ -535,20 +784,20 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
continue
}
entries = append(entries, entry)
if entry.IsDir() {
directoryList = append(directoryList, entry)
} else {
listingChannel <- entry
}
}
// For top level directory we need to sort again because symlinks may have been changed
if path == "" {
sort.Sort(ByName(entries))
sort.Sort(ByName(directoryList))
}
for _, entry := range entries {
if entry.IsDir() {
directoryList = append(directoryList, entry)
} else {
*fileList = append(*fileList, entry)
}
for _, entry := range directoryList {
listingChannel <- entry
}
for i, j := 0, len(directoryList)-1; i < j; i, j = i+1, j-1 {
@@ -597,3 +846,100 @@ func (entry *Entry) Diff(chunkHashes []string, chunkLengths []int,
return modifiedLength
}
func (entry *Entry) EncodeWithHash(encoder *msgpack.Encoder) error {
entryBytes, err := msgpack.Marshal(entry)
if err != nil {
return err
}
hash := sha256.Sum256(entryBytes)
err = encoder.EncodeBytes(entryBytes)
if err != nil {
return err
}
err = encoder.EncodeBytes(hash[:])
if err != nil {
return err
}
return nil
}
func DecodeEntryWithHash(decoder *msgpack.Decoder) (*Entry, error) {
entryBytes, err := decoder.DecodeBytes()
if err != nil {
return nil, err
}
hashBytes, err := decoder.DecodeBytes()
if err != nil {
return nil, err
}
expectedHash := sha256.Sum256(entryBytes)
if bytes.Compare(expectedHash[:], hashBytes) != 0 {
return nil, fmt.Errorf("corrupted file metadata")
}
var entry Entry
err = msgpack.Unmarshal(entryBytes, &entry)
if err != nil {
return nil, err
}
return &entry, nil
}
func (entry *Entry) check(chunkLengths []int) error {
if entry.Size < 0 {
return fmt.Errorf("The file %s hash an invalid size (%d)", entry.Path, entry.Size)
}
if !entry.IsFile() || entry.Size == 0 {
return nil
}
if entry.StartChunk < 0 {
return fmt.Errorf("The file %s starts at chunk %d", entry.Path, entry.StartChunk)
}
if entry.EndChunk >= len(chunkLengths) {
return fmt.Errorf("The file %s ends at chunk %d while the number of chunks is %d",
entry.Path, entry.EndChunk, len(chunkLengths))
}
if entry.EndChunk < entry.StartChunk {
return fmt.Errorf("The file %s starts at chunk %d and ends at chunk %d",
entry.Path, entry.StartChunk, entry.EndChunk)
}
if entry.StartOffset >= chunkLengths[entry.StartChunk] {
return fmt.Errorf("The file %s starts at offset %d of chunk %d of length %d",
entry.Path, entry.StartOffset, entry.StartChunk, chunkLengths[entry.StartChunk])
}
if entry.EndOffset > chunkLengths[entry.EndChunk] {
return fmt.Errorf("The file %s ends at offset %d of chunk %d of length %d",
entry.Path, entry.EndOffset, entry.EndChunk, chunkLengths[entry.EndChunk])
}
fileSize := int64(0)
for i := entry.StartChunk; i <= entry.EndChunk; i++ {
start := 0
if i == entry.StartChunk {
start = entry.StartOffset
}
end := chunkLengths[i]
if i == entry.EndChunk {
end = entry.EndOffset
}
fileSize += int64(end - start)
}
if entry.Size != fileSize {
return fmt.Errorf("The file %s has a size of %d but the total size of chunks is %d",
entry.Path, entry.Size, fileSize)
}
return nil
}

View File

@@ -13,8 +13,11 @@ import (
"sort"
"strings"
"testing"
"bytes"
"encoding/json"
"github.com/gilbertchen/xattr"
"github.com/vmihailenco/msgpack"
)
func TestEntrySort(t *testing.T) {
@@ -27,19 +30,19 @@ func TestEntrySort(t *testing.T) {
"\xBB\xDDfile",
"\xFF\xDDfile",
"ab/",
"ab-/",
"ab0/",
"ab1/",
"ab/c",
"ab+/c-",
"ab+/c0",
"ab+/c/",
"ab+/c/d",
"ab+/c+/",
"ab+/c+/d",
"ab+/c0/",
"ab+/c/d",
"ab+/c+/d",
"ab+/c0/d",
"ab-/",
"ab-/c",
"ab0/",
"ab1/",
"ab1/c",
"ab1/\xBB\xDDfile",
"ab1/\xFF\xDDfile",
@@ -86,7 +89,7 @@ func TestEntrySort(t *testing.T) {
}
}
func TestEntryList(t *testing.T) {
func TestEntryOrder(t *testing.T) {
testDir := filepath.Join(os.TempDir(), "duplicacy_test")
os.RemoveAll(testDir)
@@ -98,16 +101,16 @@ func TestEntryList(t *testing.T) {
"ab0",
"ab1",
"ab+/",
"ab2/",
"ab3/",
"ab+/c",
"ab+/c+",
"ab+/c1",
"ab+/c-/",
"ab+/c-/d",
"ab+/c0/",
"ab+/c-/d",
"ab+/c0/d",
"ab2/",
"ab2/c",
"ab3/",
"ab3/c",
}
@@ -172,18 +175,24 @@ func TestEntryList(t *testing.T) {
directories = append(directories, CreateEntry("", 0, 0, 0))
entries := make([]*Entry, 0, 4)
entryChannel := make(chan *Entry, 1024)
entries = append(entries, CreateEntry("", 0, 0, 0))
for len(directories) > 0 {
directory := directories[len(directories)-1]
directories = directories[:len(directories)-1]
entries = append(entries, directory)
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, "", false, false)
subdirectories, _, err := ListEntries(testDir, directory.Path, nil, "", false, entryChannel)
if err != nil {
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
}
directories = append(directories, subdirectories...)
}
close(entryChannel)
for entry := range entryChannel {
entries = append(entries, entry)
}
entries = entries[1:]
for _, entry := range entries {
@@ -274,18 +283,25 @@ func TestEntryExcludeByAttribute(t *testing.T) {
directories = append(directories, CreateEntry("", 0, 0, 0))
entries := make([]*Entry, 0, 4)
entryChannel := make(chan *Entry, 1024)
entries = append(entries, CreateEntry("", 0, 0, 0))
for len(directories) > 0 {
directory := directories[len(directories)-1]
directories = directories[:len(directories)-1]
entries = append(entries, directory)
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, "", false, excludeByAttribute)
subdirectories, _, err := ListEntries(testDir, directory.Path, nil, "", excludeByAttribute, entryChannel)
if err != nil {
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
}
directories = append(directories, subdirectories...)
}
close(entryChannel)
for entry := range entryChannel {
entries = append(entries, entry)
}
entries = entries[1:]
for _, entry := range entries {
@@ -327,3 +343,33 @@ func TestEntryExcludeByAttribute(t *testing.T) {
}
}
func TestEntryEncoding(t *testing.T) {
buffer := new(bytes.Buffer)
encoder := msgpack.NewEncoder(buffer)
entry1 := CreateEntry("abcd", 1, 2, 0700)
err := encoder.Encode(entry1)
if err != nil {
t.Errorf("Failed to encode the entry: %v", err)
return
}
t.Logf("msgpack size: %d\n", len(buffer.Bytes()))
decoder := msgpack.NewDecoder(buffer)
description, _ := json.Marshal(entry1)
t.Logf("json size: %d\n", len(description))
var entry2 Entry
err = decoder.Decode(&entry2)
if err != nil {
t.Errorf("Failed to decode the entry: %v", err)
return
}
if entry1.Path != entry2.Path || entry1.Size != entry2.Size || entry1.Time != entry2.Time {
t.Error("Decoded entry is different than the original one")
}
}

574
src/duplicacy_entrylist.go Normal file
View File

@@ -0,0 +1,574 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"encoding/hex"
"encoding/binary"
"fmt"
"os"
"io"
"path"
"crypto/sha256"
"crypto/rand"
"sync"
"github.com/vmihailenco/msgpack"
)
// This struct stores information about a file entry that has been modified
type ModifiedEntry struct {
Path string
Size int64
Hash string
}
// EntryList is basically a list of entries, which can be kept in the memory, or serialized to a disk file,
// depending on if maximumInMemoryEntries is reached.
//
// The idea behind the on-disk entry list is that entries are written to a disk file as they are coming in.
// Entries that have been modified and thus need to be uploaded will have their Incomplete bit set (i.e.,
// with a size of -1). When the limit is reached, entries are moved to a disk file but ModifiedEntries and
// UploadedChunks are still kept in memory. When later entries are read from the entry list, incomplete
// entries are back-annotated with info from ModifiedEntries and UploadedChunk* before sending them out.
type EntryList struct {
onDiskFile *os.File // the file to store entries
encoder *msgpack.Encoder // msgpack encoder for entry serialization
entries []*Entry // in-memory entry list
SnapshotID string // the snapshot id
Token string // this unique random token makes sure we read/write
// the same entry list
ModifiedEntries []ModifiedEntry // entries that will be uploaded
UploadedChunkHashes []string // chunks from entries that have been uploaded
UploadedChunkLengths []int // chunk lengths from entries that have been uploaded
uploadedChunkLock sync.Mutex // lock for UploadedChunkHashes and UploadedChunkLengths
PreservedChunkHashes []string // chunks from entries not changed
PreservedChunkLengths []int // chunk lengths from entries not changed
Checksum string // checksum of all entries to detect disk corruption
maximumInMemoryEntries int // max in-memory entries
NumberOfEntries int64 // number of entries (not including directories and links)
cachePath string // the directory for the on-disk file
// These 3 variables are used in entry infomation back-annotation
modifiedEntryIndex int // points to the current modified entry
uploadedChunkIndex int // counter for upload chunks
uploadedChunkOffset int // the start offset for the current modified entry
}
// Create a new entry list
func CreateEntryList(snapshotID string, cachePath string, maximumInMemoryEntries int) (*EntryList, error) {
token := make([]byte, 16)
_, err := rand.Read(token)
if err != nil {
return nil, fmt.Errorf("Failed to create a random token: %v", err)
}
entryList := &EntryList {
SnapshotID: snapshotID,
maximumInMemoryEntries: maximumInMemoryEntries,
cachePath: cachePath,
Token: string(token),
}
return entryList, nil
}
// Create the on-disk entry list file
func (entryList *EntryList)createOnDiskFile() error {
file, err := os.OpenFile(path.Join(entryList.cachePath, "incomplete_files"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return fmt.Errorf("Failed to create on disk entry list: %v", err)
}
entryList.onDiskFile = file
entryList.encoder = msgpack.NewEncoder(file)
err = entryList.encoder.EncodeString(entryList.Token)
if err != nil {
return fmt.Errorf("Failed to create on disk entry list: %v", err)
}
for _, entry := range entryList.entries {
err = entry.EncodeWithHash(entryList.encoder)
if err != nil {
return err
}
}
return nil
}
// Add an entry to the entry list
func (entryList *EntryList)AddEntry(entry *Entry) error {
if !entry.IsDir() && !entry.IsLink() {
entryList.NumberOfEntries++
}
if !entry.IsComplete() {
if entry.IsDir() || entry.IsLink() {
entry.Size = 0
} else {
modifiedEntry := ModifiedEntry {
Path: entry.Path,
Size: -1,
}
entryList.ModifiedEntries = append(entryList.ModifiedEntries, modifiedEntry)
}
}
if entryList.onDiskFile != nil {
return entry.EncodeWithHash(entryList.encoder)
} else {
entryList.entries = append(entryList.entries, entry)
if entryList.maximumInMemoryEntries >= 0 && len(entryList.entries) > entryList.maximumInMemoryEntries {
err := entryList.createOnDiskFile()
if err != nil {
return err
}
}
}
return nil
}
// Add a preserved chunk that belongs to files that have not been modified
func (entryList *EntryList)AddPreservedChunk(chunkHash string, chunkSize int) {
entryList.PreservedChunkHashes = append(entryList.PreservedChunkHashes, chunkHash)
entryList.PreservedChunkLengths = append(entryList.PreservedChunkLengths, chunkSize)
}
// Add a chunk just uploaded (that belongs to files that have been modified)
func (entryList *EntryList)AddUploadedChunk(chunkIndex int, chunkHash string, chunkSize int) {
entryList.uploadedChunkLock.Lock()
for len(entryList.UploadedChunkHashes) <= chunkIndex {
entryList.UploadedChunkHashes = append(entryList.UploadedChunkHashes, "")
}
for len(entryList.UploadedChunkLengths) <= chunkIndex {
entryList.UploadedChunkLengths = append(entryList.UploadedChunkLengths, 0)
}
entryList.UploadedChunkHashes[chunkIndex] = chunkHash
entryList.UploadedChunkLengths[chunkIndex] = chunkSize
entryList.uploadedChunkLock.Unlock()
}
// Close the on-disk file
func (entryList *EntryList) CloseOnDiskFile() error {
if entryList.onDiskFile == nil {
return nil
}
err := entryList.onDiskFile.Sync()
if err != nil {
return err
}
err = entryList.onDiskFile.Close()
if err != nil {
return err
}
entryList.onDiskFile = nil
return nil
}
// Return the length of the `index`th chunk
func (entryList *EntryList) getChunkLength(index int) int {
if index < len(entryList.PreservedChunkLengths) {
return entryList.PreservedChunkLengths[index]
} else {
return entryList.UploadedChunkLengths[index - len(entryList.PreservedChunkLengths)]
}
}
// Sanity check for each entry
func (entryList *EntryList) checkEntry(entry *Entry) error {
if entry.Size < 0 {
return fmt.Errorf("the file %s hash an invalid size (%d)", entry.Path, entry.Size)
}
if !entry.IsFile() || entry.Size == 0 {
return nil
}
numberOfChunks := len(entryList.PreservedChunkLengths) + len(entryList.UploadedChunkLengths)
if entry.StartChunk < 0 {
return fmt.Errorf("the file %s starts at chunk %d", entry.Path, entry.StartChunk)
}
if entry.EndChunk >= numberOfChunks {
return fmt.Errorf("the file %s ends at chunk %d while the number of chunks is %d",
entry.Path, entry.EndChunk, numberOfChunks)
}
if entry.EndChunk < entry.StartChunk {
return fmt.Errorf("the file %s starts at chunk %d and ends at chunk %d",
entry.Path, entry.StartChunk, entry.EndChunk)
}
if entry.StartOffset >= entryList.getChunkLength(entry.StartChunk) {
return fmt.Errorf("the file %s starts at offset %d of chunk %d with a length of %d",
entry.Path, entry.StartOffset, entry.StartChunk, entryList.getChunkLength(entry.StartChunk))
}
if entry.EndOffset > entryList.getChunkLength(entry.EndChunk) {
return fmt.Errorf("the file %s ends at offset %d of chunk %d with a length of %d",
entry.Path, entry.EndOffset, entry.EndChunk, entryList.getChunkLength(entry.EndChunk))
}
fileSize := int64(0)
for i := entry.StartChunk; i <= entry.EndChunk; i++ {
start := 0
if i == entry.StartChunk {
start = entry.StartOffset
}
end := entryList.getChunkLength(i)
if i == entry.EndChunk {
end = entry.EndOffset
}
fileSize += int64(end - start)
}
if entry.Size != fileSize {
return fmt.Errorf("the file %s has a size of %d but the total size of chunks is %d",
entry.Path, entry.Size, fileSize)
}
return nil
}
// An incomplete entry (with a size of -1) does not have 'startChunk', 'startOffset', 'endChunk', and 'endOffset'. This function
// is to fill in these information before sending the entry out.
func (entryList *EntryList) fillAndSendEntry(entry *Entry, entryOut func(*Entry)error) (skipped bool, err error) {
if entry.IsComplete() {
err := entryList.checkEntry(entry)
if err != nil {
return false, err
}
return false, entryOut(entry)
}
if entryList.modifiedEntryIndex >= len(entryList.ModifiedEntries) {
return false, fmt.Errorf("Unexpected file index %d (%d modified files)", entryList.modifiedEntryIndex, len(entryList.ModifiedEntries))
}
modifiedEntry := &entryList.ModifiedEntries[entryList.modifiedEntryIndex]
entryList.modifiedEntryIndex++
if modifiedEntry.Path != entry.Path {
return false, fmt.Errorf("Unexpected file path %s when expecting %s", modifiedEntry.Path, entry.Path)
}
if modifiedEntry.Size <= 0 {
return true, nil
}
entry.Size = modifiedEntry.Size
entry.Hash = modifiedEntry.Hash
entry.StartChunk = entryList.uploadedChunkIndex + len(entryList.PreservedChunkHashes)
entry.StartOffset = entryList.uploadedChunkOffset
entry.EndChunk = entry.StartChunk
endOffset := int64(entry.StartOffset) + entry.Size
for entryList.uploadedChunkIndex < len(entryList.UploadedChunkLengths) && endOffset > int64(entryList.UploadedChunkLengths[entryList.uploadedChunkIndex]) {
endOffset -= int64(entryList.UploadedChunkLengths[entryList.uploadedChunkIndex])
entry.EndChunk++
entryList.uploadedChunkIndex++
}
if entryList.uploadedChunkIndex >= len(entryList.UploadedChunkLengths) {
return false, fmt.Errorf("File %s has not been completely uploaded", entry.Path)
}
entry.EndOffset = int(endOffset)
entryList.uploadedChunkOffset = entry.EndOffset
if entry.EndOffset == entryList.UploadedChunkLengths[entryList.uploadedChunkIndex] {
entryList.uploadedChunkIndex++
entryList.uploadedChunkOffset = 0
}
err = entryList.checkEntry(entry)
if err != nil {
return false, err
}
return false, entryOut(entry)
}
// Iterate through the entries in this entry list
func (entryList *EntryList) ReadEntries(entryOut func(*Entry)error) (error) {
entryList.modifiedEntryIndex = 0
entryList.uploadedChunkIndex = 0
entryList.uploadedChunkOffset = 0
if entryList.onDiskFile == nil {
for _, entry := range entryList.entries {
skipped, err := entryList.fillAndSendEntry(entry.Copy(), entryOut)
if err != nil {
return err
}
if skipped {
continue
}
}
} else {
_, err := entryList.onDiskFile.Seek(0, os.SEEK_SET)
if err != nil {
return err
}
decoder := msgpack.NewDecoder(entryList.onDiskFile)
_, err = decoder.DecodeString()
if err != nil {
return err
}
for _, err = decoder.PeekCode(); err == nil; _, err = decoder.PeekCode() {
entry, err := DecodeEntryWithHash(decoder)
if err != nil {
return err
}
skipped, err := entryList.fillAndSendEntry(entry, entryOut)
if err != nil {
return err
}
if skipped {
continue
}
}
if err != io.EOF {
return err
}
}
return nil
}
// When saving an incomplete snapshot, the on-disk entry list ('incomplete_files') is renamed to
// 'incomplete_snapshot', and this EntryList struct is saved as 'incomplete_chunks'.
func (entryList *EntryList) SaveIncompleteSnapshot() {
entryList.uploadedChunkLock.Lock()
defer entryList.uploadedChunkLock.Unlock()
if entryList.onDiskFile == nil {
err := entryList.createOnDiskFile()
if err != nil {
LOG_WARN("INCOMPLETE_SAVE", "Failed to create the incomplete snapshot file: %v", err)
return
}
for _, entry := range entryList.entries {
err = entry.EncodeWithHash(entryList.encoder)
if err != nil {
LOG_WARN("INCOMPLETE_SAVE", "Failed to save the entry %s: %v", entry.Path, err)
return
}
}
}
err := entryList.onDiskFile.Close()
if err != nil {
LOG_WARN("INCOMPLETE_SAVE", "Failed to close the on-disk file: %v", err)
return
}
filePath := path.Join(entryList.cachePath, "incomplete_snapshot")
if _, err := os.Stat(filePath); err == nil {
err = os.Remove(filePath)
if err != nil {
LOG_WARN("INCOMPLETE_REMOVE", "Failed to remove previous incomplete snapshot: %v", err)
}
}
err = os.Rename(path.Join(entryList.cachePath, "incomplete_files"), filePath)
if err != nil {
LOG_WARN("INCOMPLETE_SAVE", "Failed to rename the incomplete snapshot file: %v", err)
return
}
chunkFile := path.Join(entryList.cachePath, "incomplete_chunks")
file, err := os.OpenFile(chunkFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
LOG_WARN("INCOMPLETE_SAVE", "Failed to create the incomplete chunk file: %v", err)
return
}
defer file.Close()
encoder := msgpack.NewEncoder(file)
entryList.Checksum = entryList.CalculateChecksum()
err = encoder.Encode(entryList)
if err != nil {
LOG_WARN("INCOMPLETE_SAVE", "Failed to save the incomplete snapshot: %v", err)
return
}
LOG_INFO("INCOMPLETE_SAVE", "Incomplete snapshot saved to %s", filePath)
}
// Calculate a checksum for this entry list
func (entryList *EntryList) CalculateChecksum() string{
hasher := sha256.New()
for _, s := range entryList.UploadedChunkHashes {
hasher.Write([]byte(s))
}
buffer := make([]byte, 8)
for _, i := range entryList.UploadedChunkLengths {
binary.LittleEndian.PutUint64(buffer, uint64(i))
hasher.Write(buffer)
}
for _, s := range entryList.PreservedChunkHashes {
hasher.Write([]byte(s))
}
for _, i := range entryList.PreservedChunkLengths {
binary.LittleEndian.PutUint64(buffer, uint64(i))
hasher.Write(buffer)
}
for _, entry := range entryList.ModifiedEntries {
binary.LittleEndian.PutUint64(buffer, uint64(entry.Size))
hasher.Write(buffer)
hasher.Write([]byte(entry.Hash))
}
return hex.EncodeToString(hasher.Sum(nil))
}
// Check if all chunks exist in 'chunkCache'
func (entryList *EntryList) CheckChunks(config *Config, chunkCache map[string]bool) bool {
for _, chunkHash := range entryList.UploadedChunkHashes {
chunkID := config.GetChunkIDFromHash(chunkHash)
if _, ok := chunkCache[chunkID]; !ok {
return false
}
}
for _, chunkHash := range entryList.PreservedChunkHashes {
chunkID := config.GetChunkIDFromHash(chunkHash)
if _, ok := chunkCache[chunkID]; !ok {
return false
}
}
return true
}
// Recover the on disk file from 'incomplete_snapshot', and restore the EntryList struct
// from 'incomplete_chunks'
func loadIncompleteSnapshot(snapshotID string, cachePath string) *EntryList {
onDiskFilePath := path.Join(cachePath, "incomplete_snapshot")
entryListFilePath := path.Join(cachePath, "incomplete_chunks")
if _, err := os.Stat(onDiskFilePath); os.IsNotExist(err) {
return nil
}
if _, err := os.Stat(entryListFilePath); os.IsNotExist(err) {
return nil
}
entryList := &EntryList {}
entryListFile, err := os.OpenFile(entryListFilePath, os.O_RDONLY, 0600)
if err != nil {
LOG_WARN("INCOMPLETE_LOAD", "Failed to open the incomplete snapshot: %v", err)
return nil
}
defer entryListFile.Close()
decoder := msgpack.NewDecoder(entryListFile)
err = decoder.Decode(&entryList)
if err != nil {
LOG_WARN("INCOMPLETE_LOAD", "Failed to load the incomplete snapshot: %v", err)
return nil
}
checksum := entryList.CalculateChecksum()
if checksum != entryList.Checksum {
LOG_WARN("INCOMPLETE_LOAD", "Failed to load the incomplete snapshot: checksum mismatched")
return nil
}
onDiskFile, err := os.OpenFile(onDiskFilePath, os.O_RDONLY, 0600)
if err != nil {
LOG_WARN("INCOMPLETE_LOAD", "Failed to open the on disk file for the incomplete snapshot: %v", err)
return nil
}
decoder = msgpack.NewDecoder(onDiskFile)
token, err := decoder.DecodeString()
if err != nil {
LOG_WARN("INCOMPLETE_LOAD", "Failed to read the token for the incomplete snapshot: %v", err)
onDiskFile.Close()
return nil
}
if token != entryList.Token {
LOG_WARN("INCOMPLETE_LOAD", "Mismatched tokens in the incomplete snapshot")
onDiskFile.Close()
return nil
}
entryList.onDiskFile = onDiskFile
for i, hash := range entryList.UploadedChunkHashes {
if len(hash) == 0 {
// An empty hash means the chunk has not been uploaded in previous run
entryList.UploadedChunkHashes = entryList.UploadedChunkHashes[0:i]
entryList.UploadedChunkLengths = entryList.UploadedChunkLengths[0:i]
break
}
}
LOG_INFO("INCOMPLETE_LOAD", "Previous incomlete backup contains %d files and %d chunks",
entryList.NumberOfEntries, len(entryList.PreservedChunkLengths) + len(entryList.UploadedChunkHashes))
return entryList
}
// Delete the two incomplete files.
func deleteIncompleteSnapshot(cachePath string) {
for _, file := range []string{"incomplete_snapshot", "incomplete_chunks"} {
filePath := path.Join(cachePath, file)
if _, err := os.Stat(filePath); err == nil {
err = os.Remove(filePath)
if err != nil {
LOG_WARN("INCOMPLETE_REMOVE", "Failed to remove the incomplete snapshot: %v", err)
return
}
}
}
}

View File

@@ -0,0 +1,179 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"os"
"path"
"time"
"testing"
"math/rand"
)
func generateRandomString(length int) string {
var letters = []rune("abcdefghijklmnopqrstuvwxyz")
b := make([]rune, length)
for i := range b {
b[i] = letters[rand.Intn(len(letters))]
}
return string(b)
}
var fileSizeGenerator = rand.NewZipf(rand.New(rand.NewSource(time.Now().UnixNano())), 1.2, 1.0, 1024)
func generateRandomFileSize() int64 {
return int64(fileSizeGenerator.Uint64() + 1)
}
func generateRandomChunks(totalFileSize int64) (chunks []string, lengths []int) {
totalChunkSize := int64(0)
for totalChunkSize < totalFileSize {
chunks = append(chunks, generateRandomString(64))
chunkSize := int64(1 + (rand.Int() % 64))
if chunkSize + totalChunkSize > totalFileSize {
chunkSize = totalFileSize - totalChunkSize
}
lengths = append(lengths, int(chunkSize))
totalChunkSize += chunkSize
}
return chunks, lengths
}
func getPreservedChunks(entries []*Entry, chunks []string, lengths []int) (preservedChunks []string, preservedChunkLengths []int) {
lastPreservedChunk := -1
for i := range entries {
if entries[i].Size < 0 {
continue
}
delta := entries[i].StartChunk - len(chunks)
if lastPreservedChunk != entries[i].StartChunk {
lastPreservedChunk = entries[i].StartChunk
preservedChunks = append(preservedChunks, chunks[entries[i].StartChunk])
preservedChunkLengths = append(preservedChunkLengths, lengths[entries[i].StartChunk])
delta++
}
for j := entries[i].StartChunk + 1; i <= entries[i].EndChunk; i++ {
preservedChunks = append(preservedChunks, chunks[j])
preservedChunkLengths = append(preservedChunkLengths, lengths[j])
lastPreservedChunk = j
}
}
return
}
func testEntryList(t *testing.T, numberOfEntries int, maximumInMemoryEntries int) {
entries := make([]*Entry, 0, numberOfEntries)
entrySizes := make([]int64, 0)
for i := 0; i < numberOfEntries; i++ {
entry:= CreateEntry(generateRandomString(16), -1, 0, 0700)
entries = append(entries, entry)
entrySizes = append(entrySizes, generateRandomFileSize())
}
totalFileSize := int64(0)
for _, size := range entrySizes {
totalFileSize += size
}
testDir := path.Join(os.TempDir(), "duplicacy_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
os.MkdirAll(testDir + "/list1", 0700)
os.MkdirAll(testDir + "/list2", 0700)
os.MkdirAll(testDir + "/list3", 0700)
os.MkdirAll(testDir + "/list1", 0700)
// For the first entry list, all entries are new
entryList, _ := CreateEntryList("test", testDir + "/list1", maximumInMemoryEntries)
for _, entry := range entries {
entryList.AddEntry(entry)
}
uploadedChunks, uploadedChunksLengths := generateRandomChunks(totalFileSize)
for i, chunk := range uploadedChunks {
entryList.AddUploadedChunk(i, chunk, uploadedChunksLengths[i])
}
for i := range entryList.ModifiedEntries {
entryList.ModifiedEntries[i].Size = entrySizes[i]
}
totalEntries := 0
err := entryList.ReadEntries(func(entry *Entry) error {
totalEntries++
return nil
})
if err != nil {
t.Errorf("ReadEntries returned an error: %s", err)
return
}
if totalEntries != numberOfEntries {
t.Errorf("EntryList contains %d entries instead of %d", totalEntries, numberOfEntries)
return
}
// For the second entry list, half of the entries are new
for i := range entries {
if rand.Int() % 1 == 0 {
entries[i].Size = -1
} else {
entries[i].Size = entrySizes[i]
}
}
preservedChunks, preservedChunkLengths := getPreservedChunks(entries, uploadedChunks, uploadedChunksLengths)
entryList, _ = CreateEntryList("test", testDir + "/list2", maximumInMemoryEntries)
for _, entry := range entries {
entryList.AddEntry(entry)
}
for i, chunk := range preservedChunks {
entryList.AddPreservedChunk(chunk, preservedChunkLengths[i])
}
totalFileSize = 0
for i := range entryList.ModifiedEntries {
fileSize := generateRandomFileSize()
entryList.ModifiedEntries[i].Size = fileSize
totalFileSize += fileSize
}
uploadedChunks, uploadedChunksLengths = generateRandomChunks(totalFileSize)
for i, chunk := range uploadedChunks {
entryList.AddUploadedChunk(i, chunk, uploadedChunksLengths[i])
}
totalEntries = 0
err = entryList.ReadEntries(func(entry *Entry) error {
totalEntries++
return nil
})
if err != nil {
t.Errorf("ReadEntries returned an error: %s", err)
return
}
if totalEntries != numberOfEntries {
t.Errorf("EntryList contains %d entries instead of %d", totalEntries, numberOfEntries)
return
}
}
func TestEntryList(t *testing.T) {
testEntryList(t, 1024, 1024)
testEntryList(t, 1024, 512)
testEntryList(t, 1024, 0)
}

View File

@@ -41,6 +41,7 @@ type GCDStorage struct {
backoffs []int // desired backoff time in seconds for each thread
attempts []int // number of failed attempts since last success for each thread
driveID string // the ID of the shared drive or 'root' (GCDUserDrive) if the user's drive
spaces string // 'appDataFolder' if scope is drive.appdata; 'drive' otherwise
createDirectoryLock sync.Mutex
isConnected bool
@@ -199,7 +200,7 @@ func (storage *GCDStorage) listFiles(threadIndex int, parentID string, listFiles
var err error
for {
q := storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount)
q := storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount).Spaces(storage.spaces)
if storage.driveID != GCDUserDrive {
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
}
@@ -231,7 +232,7 @@ func (storage *GCDStorage) listByName(threadIndex int, parentID string, name str
for {
query := "name = '" + name + "' and '" + parentID + "' in parents and trashed = false "
q := storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)")
q := storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)").Spaces(storage.spaces)
if storage.driveID != GCDUserDrive {
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
}
@@ -255,6 +256,29 @@ func (storage *GCDStorage) listByName(threadIndex int, parentID string, name str
return file.Id, file.MimeType == GCDDirectoryMimeType, file.Size, nil
}
// Returns the id of the shared folder with the given name if it exists
func (storage *GCDStorage) findSharedFolder(threadIndex int, name string) (string, error) {
query := "name = '" + name + "' and sharedWithMe and trashed = false and mimeType = 'application/vnd.google-apps.folder'"
q := storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)").Spaces(storage.spaces)
if storage.driveID != GCDUserDrive {
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
}
fileList, err := q.Do()
if err != nil {
return "", err
}
if len(fileList.Files) == 0 {
return "", nil
}
file := fileList.Files[0]
return file.Id, nil
}
// getIDFromPath returns the id of the given path. If 'createDirectories' is true, create the given path and all its
// parent directories if they don't exist. Note that if 'createDirectories' is false, it may return an empty 'fileID'
// if the file doesn't exist.
@@ -344,11 +368,23 @@ func CreateGCDStorage(tokenFile string, driveID string, storagePath string, thre
var tokenSource oauth2.TokenSource
scope := drive.DriveScope
if isServiceAccount {
config, err := google.JWTConfigFromJSON(description, drive.DriveScope)
if newScope, ok := object["scope"]; ok {
scope = newScope.(string)
}
config, err := google.JWTConfigFromJSON(description, scope)
if err != nil {
return nil, err
}
if subject, ok := object["subject"]; ok {
config.Subject = subject.(string)
}
tokenSource = config.TokenSource(ctx)
} else {
gcdConfig := &GCDConfig{}
@@ -398,6 +434,7 @@ func CreateGCDStorage(tokenFile string, driveID string, storagePath string, thre
backoffs: make([]int, threads),
attempts: make([]int, threads),
driveID: driveID,
spaces: "drive",
}
for i := range storage.backoffs {
@@ -405,10 +442,29 @@ func CreateGCDStorage(tokenFile string, driveID string, storagePath string, thre
storage.attempts[i] = 0
}
storage.savePathID("", driveID)
storagePathID, err := storage.getIDFromPath(0, storagePath, true)
if err != nil {
return nil, err
if scope == drive.DriveAppdataScope {
storage.spaces = "appDataFolder"
storage.savePathID("", "appDataFolder")
} else {
storage.savePathID("", driveID)
}
storagePathID := ""
// When using service acount, check if storagePath is a shared folder which takes priority over regular folders.
if isServiceAccount && !strings.Contains(storagePath, "/") {
storagePathID, err = storage.findSharedFolder(0, storagePath)
if err != nil {
LOG_WARN("GCD_STORAGE", "Failed to check if %s is a shared folder: %v", storagePath, err)
}
}
if storagePathID == "" {
storagePathID, err = storage.getIDFromPath(0, storagePath, true)
if err != nil {
return nil, err
}
}
// Reset the id cache and start with 'storagePathID' as the root

View File

@@ -14,7 +14,6 @@ import (
"os"
"os/exec"
"regexp"
"strings"
"syscall"
"time"
)
@@ -77,19 +76,19 @@ func DeleteShadowCopy() {
err := exec.Command("/sbin/umount", "-f", snapshotPath).Run()
if err != nil {
LOG_ERROR("VSS_DELETE", "Error while unmounting snapshot")
LOG_WARN("VSS_DELETE", "Error while unmounting snapshot: %v", err)
return
}
err = exec.Command("tmutil", "deletelocalsnapshots", snapshotDate).Run()
if err != nil {
LOG_ERROR("VSS_DELETE", "Error while deleting local snapshot")
LOG_WARN("VSS_DELETE", "Error while deleting local snapshot: %v", err)
return
}
err = os.RemoveAll(snapshotPath)
if err != nil {
LOG_ERROR("VSS_DELETE", "Error while deleting temporary mount directory")
LOG_WARN("VSS_DELETE", "Error while deleting temporary mount directory: %v", err)
return
}
@@ -150,12 +149,13 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
return top
}
colonPos := strings.IndexByte(tmutilOutput, ':')
if colonPos < 0 {
snapshotDateRegex := regexp.MustCompile(`:\s+([0-9\-]+)`)
matched := snapshotDateRegex.FindStringSubmatch(tmutilOutput)
if matched == nil {
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: %s", tmutilOutput)
return top
}
snapshotDate = strings.TrimSpace(tmutilOutput[colonPos+1:])
snapshotDate = matched[1]
tmutilOutput, err = CommandWithTimeout(timeoutInSeconds, "tmutil", "listlocalsnapshots", ".")
if err != nil {
@@ -164,17 +164,17 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
}
snapshotName := "com.apple.TimeMachine." + snapshotDate
r := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`)
snapshotNames := r.FindStringSubmatch(tmutilOutput)
if len(snapshotNames) > 0 {
snapshotName = snapshotNames[0]
snapshotNameRegex := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`)
matched = snapshotNameRegex.FindStringSubmatch(tmutilOutput)
if len(matched) > 0 {
snapshotName = matched[0]
} else {
LOG_WARN("VSS_CREATE", "Error while using 'tmutil listlocalsnapshots' to find snapshot name. Will fallback to 'com.apple.TimeMachine.SNAPSHOT_DATE'")
LOG_INFO("VSS_CREATE", "Can't find the snapshot name with 'tmutil listlocalsnapshots'; fallback to %s", snapshotName)
}
// Mount snapshot as readonly and hide from GUI i.e. Finder
_, err = CommandWithTimeout(timeoutInSeconds,
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/", snapshotPath)
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/System/Volumes/Data", snapshotPath)
if err != nil {
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: %v", err)
return top

View File

@@ -8,17 +8,22 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"time"
"sort"
"bytes"
"github.com/vmihailenco/msgpack"
)
// Snapshot represents a backup of the repository.
type Snapshot struct {
Version int
ID string // the snapshot id; must be different for different repositories
Revision int // the revision number
Options string // options used to create this snapshot (some not included)
@@ -37,14 +42,11 @@ type Snapshot struct {
// A sequence of chunks whose aggregated content is the json representation of 'ChunkLengths'.
LengthSequence []string
Files []*Entry // list of files and subdirectories
ChunkHashes []string // a sequence of chunks representing the file content
ChunkLengths []int // the length of each chunk
Flag bool // used to mark certain snapshots for deletion or copy
discardAttributes bool
}
// CreateEmptySnapshot creates an empty snapshot.
@@ -56,16 +58,14 @@ func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
}
}
// CreateSnapshotFromDirectory creates a snapshot from the local directory 'top'. Only 'Files'
// will be constructed, while 'ChunkHashes' and 'ChunkLengths' can only be populated after uploading.
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, filtersFile string, excludeByAttribute bool) (snapshot *Snapshot, skippedDirectories []string,
skippedFiles []string, err error) {
type DirectoryListing struct {
directory string
files *[]Entry
}
snapshot = &Snapshot{
ID: id,
Revision: 0,
StartTime: time.Now().Unix(),
}
func (snapshot *Snapshot) ListLocalFiles(top string, nobackupFile string,
filtersFile string, excludeByAttribute bool, listingChannel chan *Entry,
skippedDirectories *[]string, skippedFiles *[]string) {
var patterns []string
@@ -77,45 +77,128 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, fil
directories := make([]*Entry, 0, 256)
directories = append(directories, CreateEntry("", 0, 0, 0))
snapshot.Files = make([]*Entry, 0, 256)
attributeThreshold := 1024 * 1024
if attributeThresholdValue, found := os.LookupEnv("DUPLICACY_ATTRIBUTE_THRESHOLD"); found && attributeThresholdValue != "" {
attributeThreshold, _ = strconv.Atoi(attributeThresholdValue)
}
for len(directories) > 0 {
directory := directories[len(directories)-1]
directories = directories[:len(directories)-1]
snapshot.Files = append(snapshot.Files, directory)
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, nobackupFile, snapshot.discardAttributes, excludeByAttribute)
subdirectories, skipped, err := ListEntries(top, directory.Path, patterns, nobackupFile, excludeByAttribute, listingChannel)
if err != nil {
if directory.Path == "" {
LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err)
return nil, nil, nil, err
return
}
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory %s: %v", directory.Path, err)
if skippedDirectories != nil {
*skippedDirectories = append(*skippedDirectories, directory.Path)
}
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory: %v", err)
skippedDirectories = append(skippedDirectories, directory.Path)
continue
}
directories = append(directories, subdirectories...)
skippedFiles = append(skippedFiles, skipped...)
if !snapshot.discardAttributes && len(snapshot.Files) > attributeThreshold {
LOG_INFO("LIST_ATTRIBUTES", "Discarding file attributes")
snapshot.discardAttributes = true
for _, file := range snapshot.Files {
file.Attributes = nil
}
if skippedFiles != nil {
*skippedFiles = append(*skippedFiles, skipped...)
}
}
close(listingChannel)
}
func (snapshot *Snapshot)ListRemoteFiles(config *Config, chunkOperator *ChunkOperator, entryOut func(*Entry) bool) {
var chunks []string
for _, chunkHash := range snapshot.FileSequence {
chunks = append(chunks, chunkOperator.config.GetChunkIDFromHash(chunkHash))
}
// Remove the root entry
snapshot.Files = snapshot.Files[1:]
var chunk *Chunk
reader := sequenceReader{
sequence: snapshot.FileSequence,
buffer: new(bytes.Buffer),
refillFunc: func(chunkHash string) []byte {
if chunk != nil {
config.PutChunk(chunk)
}
chunk = chunkOperator.Download(chunkHash, 0, true)
return chunk.GetBytes()
},
}
if snapshot.Version == 0 {
LOG_INFO("SNAPSHOT_VERSION", "snapshot %s at revision %d is encoded in an old version format", snapshot.ID, snapshot.Revision)
files := make([]*Entry, 0)
decoder := json.NewDecoder(&reader)
// read open bracket
_, err := decoder.Token()
if err != nil {
LOG_ERROR("SNAPSHOT_PARSE", "Failed to open the snapshot %s at revision %d: not a list of entries",
snapshot.ID, snapshot.Revision)
return
}
for decoder.More() {
var entry Entry
err = decoder.Decode(&entry)
if err != nil {
LOG_ERROR("SNAPSHOT_PARSE", "Failed to load files specified in the snapshot %s at revision %d: %v",
snapshot.ID, snapshot.Revision, err)
return
}
files = append(files, &entry)
}
sort.Sort(ByName(files))
for _, file := range files {
if !entryOut(file) {
return
}
}
} else if snapshot.Version == 1 {
decoder := msgpack.NewDecoder(&reader)
lastEndChunk := 0
// while the array contains values
for _, err := decoder.PeekCode(); err != io.EOF; _, err = decoder.PeekCode() {
if err != nil {
LOG_ERROR("SNAPSHOT_PARSE", "Failed to parse the snapshot %s at revision %d: %v",
snapshot.ID, snapshot.Revision, err)
return
}
var entry Entry
err = decoder.Decode(&entry)
if err != nil {
LOG_ERROR("SNAPSHOT_PARSE", "Failed to load the snapshot %s at revision %d: %v",
snapshot.ID, snapshot.Revision, err)
return
}
if entry.IsFile() {
entry.StartChunk += lastEndChunk
entry.EndChunk += entry.StartChunk
lastEndChunk = entry.EndChunk
}
err = entry.check(snapshot.ChunkLengths)
if err != nil {
LOG_ERROR("SNAPSHOT_ENTRY", "Failed to load the snapshot %s at revision %d: %v",
snapshot.ID, snapshot.Revision, err)
return
}
if !entryOut(&entry) {
return
}
}
} else {
LOG_ERROR("SNAPSHOT_VERSION", "snapshot %s at revision %d is encoded in unsupported version %d format",
snapshot.ID, snapshot.Revision, snapshot.Version)
return
}
return snapshot, skippedDirectories, skippedFiles, nil
}
func AppendPattern(patterns []string, new_pattern string) (new_patterns []string) {
@@ -215,100 +298,6 @@ func ProcessFilterLines(patternFileLines []string, includedFiles []string) (patt
return patterns
}
// This is the struct used to save/load incomplete snapshots
type IncompleteSnapshot struct {
Files []*Entry
ChunkHashes []string
ChunkLengths []int
}
// LoadIncompleteSnapshot loads the incomplete snapshot if it exists
func LoadIncompleteSnapshot() (snapshot *Snapshot) {
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
description, err := ioutil.ReadFile(snapshotFile)
if err != nil {
LOG_DEBUG("INCOMPLETE_LOCATE", "Failed to locate incomplete snapshot: %v", err)
return nil
}
var incompleteSnapshot IncompleteSnapshot
err = json.Unmarshal(description, &incompleteSnapshot)
if err != nil {
LOG_DEBUG("INCOMPLETE_PARSE", "Failed to parse incomplete snapshot: %v", err)
return nil
}
var chunkHashes []string
for _, chunkHash := range incompleteSnapshot.ChunkHashes {
hash, err := hex.DecodeString(chunkHash)
if err != nil {
LOG_DEBUG("INCOMPLETE_DECODE", "Failed to decode incomplete snapshot: %v", err)
return nil
}
chunkHashes = append(chunkHashes, string(hash))
}
snapshot = &Snapshot{
Files: incompleteSnapshot.Files,
ChunkHashes: chunkHashes,
ChunkLengths: incompleteSnapshot.ChunkLengths,
}
LOG_INFO("INCOMPLETE_LOAD", "Incomplete snapshot loaded from %s", snapshotFile)
return snapshot
}
// SaveIncompleteSnapshot saves the incomplete snapshot under the preference directory
func SaveIncompleteSnapshot(snapshot *Snapshot) {
var files []*Entry
for _, file := range snapshot.Files {
// All unprocessed files will have a size of -1
if file.Size >= 0 {
file.Attributes = nil
files = append(files, file)
} else {
break
}
}
var chunkHashes []string
for _, chunkHash := range snapshot.ChunkHashes {
chunkHashes = append(chunkHashes, hex.EncodeToString([]byte(chunkHash)))
}
incompleteSnapshot := IncompleteSnapshot{
Files: files,
ChunkHashes: chunkHashes,
ChunkLengths: snapshot.ChunkLengths,
}
description, err := json.MarshalIndent(incompleteSnapshot, "", " ")
if err != nil {
LOG_WARN("INCOMPLETE_ENCODE", "Failed to encode the incomplete snapshot: %v", err)
return
}
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
err = ioutil.WriteFile(snapshotFile, description, 0644)
if err != nil {
LOG_WARN("INCOMPLETE_WRITE", "Failed to save the incomplete snapshot: %v", err)
return
}
LOG_INFO("INCOMPLETE_SAVE", "Incomplete snapshot saved to %s", snapshotFile)
}
func RemoveIncompleteSnapshot() {
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
if stat, err := os.Stat(snapshotFile); err == nil && !stat.IsDir() {
err = os.Remove(snapshotFile)
if err != nil {
LOG_INFO("INCOMPLETE_SAVE", "Failed to remove ncomplete snapshot: %v", err)
} else {
LOG_INFO("INCOMPLETE_SAVE", "Removed incomplete snapshot %s", snapshotFile)
}
}
}
// CreateSnapshotFromDescription creates a snapshot from json decription.
func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err error) {
@@ -321,6 +310,14 @@ func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err
snapshot = &Snapshot{}
if value, ok := root["version"]; !ok {
snapshot.Version = 0
} else if version, ok := value.(float64); !ok {
return nil, fmt.Errorf("Invalid version is specified in the snapshot")
} else {
snapshot.Version = int(version)
}
if value, ok := root["id"]; !ok {
return nil, fmt.Errorf("No id is specified in the snapshot")
} else if snapshot.ID, ok = value.(string); !ok {
@@ -437,6 +434,7 @@ func (snapshot *Snapshot) MarshalJSON() ([]byte, error) {
object := make(map[string]interface{})
object["version"] = 1
object["id"] = snapshot.ID
object["revision"] = snapshot.Revision
object["options"] = snapshot.Options
@@ -458,9 +456,7 @@ func (snapshot *Snapshot) MarshalJSON() ([]byte, error) {
// MarshalSequence creates a json represetion for the specified chunk sequence.
func (snapshot *Snapshot) MarshalSequence(sequenceType string) ([]byte, error) {
if sequenceType == "files" {
return json.Marshal(snapshot.Files)
} else if sequenceType == "chunks" {
if sequenceType == "chunks" {
return json.Marshal(encodeSequence(snapshot.ChunkHashes))
} else {
return json.Marshal(snapshot.ChunkLengths)
@@ -489,3 +485,4 @@ func encodeSequence(sequence []string) []string {
return sequenceInHex
}

View File

@@ -20,6 +20,8 @@ import (
"strings"
"text/tabwriter"
"time"
"sync"
"sync/atomic"
"github.com/aryann/difflib"
)
@@ -147,7 +149,7 @@ func (collection *FossilCollection) IsDeletable(isStrongConsistent bool, ignored
// collection would have finsihed already, while a snapshot currently being created does not affect
// this fossil collection.
if lastSnapshotTime[hostID] > 0 && lastSnapshotTime[hostID] < time.Now().Unix()-maxSnapshotRunningTime*secondsInDay {
LOG_INFO("SNAPSHOT_INACTIVE", "Ignore snapshot %s whose last revision was created %d days ago",
LOG_INFO("SNAPSHOT_INACTIVE", "Ignore snapshot %s whose last revision was created more than %d days ago",
hostID, maxSnapshotRunningTime)
continue
}
@@ -189,7 +191,6 @@ type SnapshotManager struct {
fileChunk *Chunk
snapshotCache *FileStorage
chunkDownloader *ChunkDownloader
chunkOperator *ChunkOperator
}
@@ -268,72 +269,26 @@ func (reader *sequenceReader) Read(data []byte) (n int, err error) {
return reader.buffer.Read(data)
}
func (manager *SnapshotManager) CreateChunkDownloader() {
if manager.chunkDownloader == nil {
manager.chunkDownloader = CreateChunkDownloader(manager.config, manager.storage, manager.snapshotCache, false, 1, false)
func (manager *SnapshotManager) CreateChunkOperator(resurrect bool, threads int, allowFailures bool) {
if manager.chunkOperator == nil {
manager.chunkOperator = CreateChunkOperator(manager.config, manager.storage, manager.snapshotCache, resurrect, threads, allowFailures)
}
}
// DownloadSequence returns the content represented by a sequence of chunks.
func (manager *SnapshotManager) DownloadSequence(sequence []string) (content []byte) {
manager.CreateChunkDownloader()
manager.CreateChunkOperator(false, 1, false)
for _, chunkHash := range sequence {
i := manager.chunkDownloader.AddChunk(chunkHash)
chunk := manager.chunkDownloader.WaitForChunk(i)
chunk := manager.chunkOperator.Download(chunkHash, 0, true)
content = append(content, chunk.GetBytes()...)
manager.config.PutChunk(chunk)
}
return content
}
func (manager *SnapshotManager) DownloadSnapshotFileSequence(snapshot *Snapshot, patterns []string, attributesNeeded bool) bool {
manager.CreateChunkDownloader()
reader := sequenceReader{
sequence: snapshot.FileSequence,
buffer: new(bytes.Buffer),
refillFunc: func(chunkHash string) []byte {
i := manager.chunkDownloader.AddChunk(chunkHash)
chunk := manager.chunkDownloader.WaitForChunk(i)
return chunk.GetBytes()
},
}
files := make([]*Entry, 0)
decoder := json.NewDecoder(&reader)
// read open bracket
_, err := decoder.Token()
if err != nil {
LOG_ERROR("SNAPSHOT_PARSE", "Failed to load files specified in the snapshot %s at revision %d: not a list of entries",
snapshot.ID, snapshot.Revision)
return false
}
// while the array contains values
for decoder.More() {
var entry Entry
err = decoder.Decode(&entry)
if err != nil {
LOG_ERROR("SNAPSHOT_PARSE", "Failed to load files specified in the snapshot %s at revision %d: %v",
snapshot.ID, snapshot.Revision, err)
return false
}
// If we don't need the attributes or the file isn't included we clear the attributes to save memory
if !attributesNeeded || (len(patterns) != 0 && !MatchPath(entry.Path, patterns)) {
entry.Attributes = nil
}
files = append(files, &entry)
}
snapshot.Files = files
return true
}
// DownloadSnapshotSequence downloads the content represented by a sequence of chunks, and then unmarshal the content
// using the specified 'loadFunction'. It purpose is to decode the chunk sequences representing chunk hashes or chunk lengths
// using the specified 'loadFunction'. Its purpose is to decode the chunk sequences representing chunk hashes or chunk lengths
// in a snapshot.
func (manager *SnapshotManager) DownloadSnapshotSequence(snapshot *Snapshot, sequenceType string) bool {
@@ -362,30 +317,21 @@ func (manager *SnapshotManager) DownloadSnapshotSequence(snapshot *Snapshot, seq
return true
}
// DownloadSnapshotContents loads all chunk sequences in a snapshot. A snapshot, when just created, only contains
// some metadata and theree sequence representing files, chunk hashes, and chunk lengths. This function must be called
// for the actual content of the snapshot to be usable.
func (manager *SnapshotManager) DownloadSnapshotContents(snapshot *Snapshot, patterns []string, attributesNeeded bool) bool {
// DownloadSnapshotSequences loads all chunk sequences in a snapshot. A snapshot, when just created, only contains
// some metadata and three sequence representing files, chunk hashes, and chunk lengths. This function must be called
// for the chunk hash sequence and chunk length sequence to be usable.
func (manager *SnapshotManager) DownloadSnapshotSequences(snapshot *Snapshot) bool {
manager.DownloadSnapshotFileSequence(snapshot, patterns, attributesNeeded)
manager.DownloadSnapshotSequence(snapshot, "chunks")
manager.DownloadSnapshotSequence(snapshot, "lengths")
err := manager.CheckSnapshot(snapshot)
if err != nil {
LOG_ERROR("SNAPSHOT_CHECK", "The snapshot %s at revision %d contains an error: %v",
snapshot.ID, snapshot.Revision, err)
return false
}
return true
}
// ClearSnapshotContents removes contents loaded by DownloadSnapshotContents
func (manager *SnapshotManager) ClearSnapshotContents(snapshot *Snapshot) {
// ClearSnapshotContents removes sequences loaded by DownloadSnapshotSequences
func (manager *SnapshotManager) ClearSnapshotSequences(snapshot *Snapshot) {
snapshot.ChunkHashes = nil
snapshot.ChunkLengths = nil
snapshot.Files = nil
}
// CleanSnapshotCache removes all files not referenced by the specified 'snapshot' in the snapshot cache.
@@ -577,10 +523,6 @@ func (manager *SnapshotManager) downloadLatestSnapshot(snapshotID string) (remot
remote = manager.DownloadSnapshot(snapshotID, latest)
}
if remote != nil {
manager.DownloadSnapshotContents(remote, nil, false)
}
return remote
}
@@ -712,6 +654,12 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showFiles: %t, showChunks: %t",
snapshotID, revisionsToList, tag, showFiles, showChunks)
manager.CreateChunkOperator(false, 1, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
}()
var snapshotIDs []string
var err error
@@ -749,14 +697,16 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
if len(snapshot.Tag) > 0 {
tagWithSpace = snapshot.Tag + " "
}
LOG_INFO("SNAPSHOT_INFO", "Snapshot %s revision %d created at %s %s%s",
snapshotID, revision, creationTime, tagWithSpace, snapshot.Options)
if showFiles {
manager.DownloadSnapshotFileSequence(snapshot, nil, false)
options := snapshot.Options
if snapshot.Version == 0 {
options += " (0)"
}
LOG_INFO("SNAPSHOT_INFO", "Snapshot %s revision %d created at %s %s%s",
snapshotID, revision, creationTime, tagWithSpace, options)
if showFiles {
// We need to fill in ChunkHashes and ChunkLengths to verify that each entry is valid
manager.DownloadSnapshotSequences(snapshot)
if snapshot.NumberOfFiles > 0 {
LOG_INFO("SNAPSHOT_STATS", "Files: %d", snapshot.NumberOfFiles)
@@ -768,7 +718,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
totalFileSize := int64(0)
lastChunk := 0
for _, file := range snapshot.Files {
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry)bool {
if file.IsFile() {
totalFiles++
totalFileSize += file.Size
@@ -780,17 +730,18 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
lastChunk = file.EndChunk
}
}
}
return true
})
for _, file := range snapshot.Files {
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(file *Entry)bool {
if file.IsFile() {
LOG_INFO("SNAPSHOT_FILE", "%s", file.String(maxSizeDigits))
}
}
return true
})
metaChunks := len(snapshot.FileSequence) + len(snapshot.ChunkSequence) + len(snapshot.LengthSequence)
LOG_INFO("SNAPSHOT_STATS", "Files: %d, total size: %d, file chunks: %d, metadata chunks: %d",
totalFiles, totalFileSize, lastChunk+1, metaChunks)
LOG_INFO("SNAPSHOT_STATS", "Total size: %d, file chunks: %d, metadata chunks: %d", totalFileSize, lastChunk+1, metaChunks)
}
if showChunks {
@@ -807,11 +758,15 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
}
// ListSnapshots shows the information about a snapshot.
// CheckSnapshots checks if there is any problem with a snapshot.
func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToCheck []int, tag string, showStatistics bool, showTabular bool,
checkFiles bool, checkChunks, searchFossils bool, resurrect bool, threads int, allowFailures bool) bool {
manager.chunkDownloader = CreateChunkDownloader(manager.config, manager.storage, manager.snapshotCache, false, threads, allowFailures)
manager.CreateChunkOperator(resurrect, threads, allowFailures)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
}()
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, showTabular: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
snapshotID, revisionsToCheck, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
@@ -911,9 +866,9 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
for _, snapshot := range snapshotMap[snapshotID] {
if checkFiles {
manager.DownloadSnapshotContents(snapshot, nil, false)
manager.DownloadSnapshotSequences(snapshot)
manager.VerifySnapshot(snapshot)
manager.ClearSnapshotContents(snapshot)
manager.ClearSnapshotSequences(snapshot)
continue
}
@@ -1017,49 +972,127 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
manager.ShowStatistics(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
}
if checkChunks && !checkFiles {
manager.chunkDownloader.snapshotCache = nil
LOG_INFO("SNAPSHOT_VERIFY", "Verifying %d chunks", len(*allChunkHashes))
// Don't verify chunks with -files
if !checkChunks || checkFiles {
return true
}
startTime := time.Now()
var chunkHashes []string
// This contains chunks that have been verifed in previous checks and is loaded from
// .duplicacy/cache/storage/verified_chunks. Note that it contains the chunk ids not chunk
// hashes.
verifiedChunks := make(map[string]int64)
var verifiedChunksLock sync.Mutex
verifiedChunksFile := "verified_chunks"
// The index of the first chunk to add to the downloader, which may have already downloaded
// some metadata chunks so the index doesn't start with 0.
chunkIndex := -1
manager.fileChunk.Reset(false)
err = manager.snapshotCache.DownloadFile(0, verifiedChunksFile, manager.fileChunk)
if err != nil {
if !os.IsNotExist(err) {
LOG_WARN("SNAPSHOT_VERIFY", "Failed to load the file containing verified chunks: %v", err)
}
} else {
err = json.Unmarshal(manager.fileChunk.GetBytes(), &verifiedChunks)
if err != nil {
LOG_WARN("SNAPSHOT_VERIFY", "Failed to parse the file containing verified chunks: %v", err)
}
}
numberOfVerifiedChunks := len(verifiedChunks)
for chunkHash := range *allChunkHashes {
chunkHashes = append(chunkHashes, chunkHash)
if chunkIndex == -1 {
chunkIndex = manager.chunkDownloader.AddChunk(chunkHash)
saveVerifiedChunks := func() {
if len(verifiedChunks) > numberOfVerifiedChunks {
var description []byte
description, err = json.Marshal(verifiedChunks)
if err != nil {
LOG_WARN("SNAPSHOT_VERIFY", "Failed to create a json file for the set of verified chunks: %v", err)
} else {
manager.chunkDownloader.AddChunk(chunkHash)
err = manager.snapshotCache.UploadFile(0, verifiedChunksFile, description)
if err != nil {
LOG_WARN("SNAPSHOT_VERIFY", "Failed to save the verified chunks file: %v", err)
} else {
LOG_INFO("SNAPSHOT_VERIFY", "Added %d chunks to the list of verified chunks", len(verifiedChunks) - numberOfVerifiedChunks)
}
}
}
}
defer saveVerifiedChunks()
RunAtError = saveVerifiedChunks
var downloadedChunkSize int64
totalChunks := len(*allChunkHashes)
for i := 0; i < totalChunks; i++ {
chunk := manager.chunkDownloader.WaitForChunk(i + chunkIndex)
if chunk.isBroken {
LOG_INFO("SNAPSHOT_VERIFY", "Verifying %d chunks", len(*allChunkHashes))
startTime := time.Now()
var chunkHashes []string
skippedChunks := 0
for chunkHash := range *allChunkHashes {
if len(verifiedChunks) > 0 {
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
if _, found := verifiedChunks[chunkID]; found {
skippedChunks++
continue
}
downloadedChunkSize += int64(chunk.GetLength())
elapsedTime := time.Now().Sub(startTime).Seconds()
speed := int64(float64(downloadedChunkSize) / elapsedTime)
remainingTime := int64(float64(totalChunks - i - 1) / float64(i + 1) * elapsedTime)
percentage := float64(i + 1) / float64(totalChunks) * 100.0
LOG_INFO("VERIFY_PROGRESS", "Verified chunk %s (%d/%d), %sB/s %s %.1f%%",
manager.config.GetChunkIDFromHash(chunkHashes[i]), i + 1, totalChunks,
PrettySize(speed), PrettyTime(remainingTime), percentage)
}
chunkHashes = append(chunkHashes, chunkHash)
}
if manager.chunkDownloader.NumberOfFailedChunks > 0 {
LOG_ERROR("SNAPSHOT_VERIFY", "%d out of %d chunks are corrupted", manager.chunkDownloader.NumberOfFailedChunks, totalChunks)
} else {
LOG_INFO("SNAPSHOT_VERIFY", "All %d chunks have been successfully verified", totalChunks)
}
if skippedChunks > 0 {
LOG_INFO("SNAPSHOT_VERIFY", "Skipped %d chunks that have already been verified before", skippedChunks)
}
var totalDownloadedChunkSize int64
var totalDownloadedChunks int64
totalChunks := int64(len(chunkHashes))
chunkChannel := make(chan int, threads)
var wg sync.WaitGroup
wg.Add(threads)
for i := 0; i < threads; i++ {
go func() {
defer CatchLogException()
for {
chunkIndex, ok := <- chunkChannel
if !ok {
wg.Done()
return
}
chunk := manager.chunkOperator.Download(chunkHashes[chunkIndex], chunkIndex, false)
if chunk == nil {
continue
}
chunkID := manager.config.GetChunkIDFromHash(chunkHashes[chunkIndex])
verifiedChunksLock.Lock()
verifiedChunks[chunkID] = startTime.Unix()
verifiedChunksLock.Unlock()
downloadedChunkSize := atomic.AddInt64(&totalDownloadedChunkSize, int64(chunk.GetLength()))
downloadedChunks := atomic.AddInt64(&totalDownloadedChunks, 1)
elapsedTime := time.Now().Sub(startTime).Seconds()
speed := int64(float64(downloadedChunkSize) / elapsedTime)
remainingTime := int64(float64(totalChunks - downloadedChunks) / float64(downloadedChunks) * elapsedTime)
percentage := float64(downloadedChunks) / float64(totalChunks) * 100.0
LOG_INFO("VERIFY_PROGRESS", "Verified chunk %s (%d/%d), %sB/s %s %.1f%%",
chunkID, downloadedChunks, totalChunks, PrettySize(speed), PrettyTime(remainingTime), percentage)
manager.config.PutChunk(chunk)
}
} ()
}
for chunkIndex := range chunkHashes {
chunkChannel <- chunkIndex
}
close(chunkChannel)
wg.Wait()
manager.chunkOperator.WaitForCompletion()
if manager.chunkOperator.NumberOfFailedChunks > 0 {
LOG_ERROR("SNAPSHOT_VERIFY", "%d out of %d chunks are corrupted", manager.chunkOperator.NumberOfFailedChunks, len(*allChunkHashes))
} else {
LOG_INFO("SNAPSHOT_VERIFY", "All %d chunks have been successfully verified", len(*allChunkHashes))
}
return true
}
@@ -1225,14 +1258,6 @@ func (manager *SnapshotManager) PrintSnapshot(snapshot *Snapshot) bool {
object["chunks"] = manager.ConvertSequence(snapshot.ChunkHashes)
object["lengths"] = snapshot.ChunkLengths
// By default the json serialization of a file entry contains the path in base64 format. This is
// to convert every file entry into an object which include the path in a more readable format.
var files []map[string]interface{}
for _, file := range snapshot.Files {
files = append(files, file.convertToObject(false))
}
object["files"] = files
description, err := json.MarshalIndent(object, "", " ")
if err != nil {
@@ -1241,8 +1266,24 @@ func (manager *SnapshotManager) PrintSnapshot(snapshot *Snapshot) bool {
return false
}
fmt.Printf("%s\n", string(description))
// Don't print the ending bracket
fmt.Printf("%s", string(description[:len(description) - 2]))
fmt.Printf(",\n \"files\": [\n")
isFirstFile := true
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (file *Entry) bool {
fileDescription, _ := json.MarshalIndent(file.convertToObject(false), "", " ")
if isFirstFile {
fmt.Printf("%s", fileDescription)
isFirstFile = false
} else {
fmt.Printf(",\n%s", fileDescription)
}
return true
})
fmt.Printf(" ]\n}\n")
return true
}
@@ -1258,17 +1299,20 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool {
return false
}
files := make([]*Entry, 0, len(snapshot.Files)/2)
for _, file := range snapshot.Files {
files := make([]*Entry, 0)
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (file *Entry) bool {
if file.IsFile() && file.Size != 0 {
file.Attributes = nil
files = append(files, file)
}
}
return true
})
sort.Sort(ByChunk(files))
corruptedFiles := 0
var lastChunk *Chunk
for _, file := range files {
if !manager.RetrieveFile(snapshot, file, func([]byte) {}) {
if !manager.RetrieveFile(snapshot, file, &lastChunk, func([]byte) {}) {
corruptedFiles++
}
LOG_TRACE("SNAPSHOT_VERIFY", "%s", file.Path)
@@ -1286,21 +1330,13 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool {
}
// RetrieveFile retrieves the file in the specified snapshot.
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, output func([]byte)) bool {
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, lastChunk **Chunk, output func([]byte)) bool {
if file.Size == 0 {
return true
}
manager.CreateChunkDownloader()
// Temporarily disable the snapshot cache of the download so that downloaded file chunks won't be saved
// to the cache.
snapshotCache := manager.chunkDownloader.snapshotCache
manager.chunkDownloader.snapshotCache = nil
defer func() {
manager.chunkDownloader.snapshotCache = snapshotCache
}()
manager.CreateChunkOperator(false, 1, false)
fileHasher := manager.config.NewFileHasher()
alternateHash := false
@@ -1321,12 +1357,19 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
}
hash := snapshot.ChunkHashes[i]
lastChunk, lastChunkHash := manager.chunkDownloader.GetLastDownloadedChunk()
if lastChunkHash != hash {
i := manager.chunkDownloader.AddChunk(hash)
chunk = manager.chunkDownloader.WaitForChunk(i)
if lastChunk == nil {
chunk = manager.chunkOperator.Download(hash, 0, false)
} else if *lastChunk == nil {
chunk = manager.chunkOperator.Download(hash, 0, false)
*lastChunk = chunk
} else {
chunk = lastChunk
if (*lastChunk).GetHash() == hash {
chunk = *lastChunk
} else {
manager.config.PutChunk(*lastChunk)
chunk = manager.chunkOperator.Download(hash, 0, false)
*lastChunk = chunk
}
}
output(chunk.GetBytes()[start:end])
@@ -1350,10 +1393,18 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
// FindFile returns the file entry that has the given file name.
func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string, suppressError bool) *Entry {
for _, entry := range snapshot.Files {
var found *Entry
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (entry *Entry) bool {
if entry.Path == filePath {
return entry
found = entry
return false
}
return true
})
if found != nil {
return found
}
if !suppressError {
@@ -1385,13 +1436,8 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
return false
}
patterns := []string{}
if path != "" {
patterns = []string{path}
}
// If no path is specified, we're printing the snapshot so we need all attributes
if !manager.DownloadSnapshotContents(snapshot, patterns, path == "") {
// If no path is specified, we're printing the snapshot
if !manager.DownloadSnapshotSequences(snapshot) {
return false
}
@@ -1401,7 +1447,7 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
}
file := manager.FindFile(snapshot, path, false)
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) {
if !manager.RetrieveFile(snapshot, file, nil, func(chunk []byte) {
fmt.Printf("%s", chunk)
}) {
LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d",
@@ -1419,22 +1465,38 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
LOG_DEBUG("DIFF_PARAMETERS", "top: %s, id: %s, revision: %v, path: %s, compareByHash: %t",
top, snapshotID, revisions, filePath, compareByHash)
manager.CreateChunkOperator(false, 1, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
} ()
var leftSnapshot *Snapshot
var rightSnapshot *Snapshot
var err error
leftSnapshotFiles := make([]*Entry, 0, 1024)
rightSnapshotFiles := make([]*Entry, 0, 1024)
// If no or only one revision is specified, use the on-disk version for the right-hand side.
if len(revisions) <= 1 {
// Only scan the repository if filePath is not provided
if len(filePath) == 0 {
rightSnapshot, _, _, err = CreateSnapshotFromDirectory(snapshotID, top, nobackupFile, filtersFile, excludeByAttribute)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
return false
rightSnapshot = CreateEmptySnapshot(snapshotID)
localListingChannel := make(chan *Entry)
go func() {
defer CatchLogException()
rightSnapshot.ListLocalFiles(top, nobackupFile, filtersFile, excludeByAttribute, localListingChannel, nil, nil)
} ()
for entry := range localListingChannel {
entry.Attributes = nil // attributes are not compared
rightSnapshotFiles = append(rightSnapshotFiles, entry)
}
}
} else {
rightSnapshot = manager.DownloadSnapshot(snapshotID, revisions[1])
manager.DownloadSnapshotSequences(rightSnapshot)
}
// If no revision is specified, use the latest revision as the left-hand side.
@@ -1448,15 +1510,11 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
leftSnapshot = manager.DownloadSnapshot(snapshotID, revisions[0])
}
manager.DownloadSnapshotSequences(leftSnapshot)
if len(filePath) > 0 {
manager.DownloadSnapshotContents(leftSnapshot, nil, false)
if rightSnapshot != nil && rightSnapshot.Revision != 0 {
manager.DownloadSnapshotContents(rightSnapshot, nil, false)
}
var leftFile []byte
if !manager.RetrieveFile(leftSnapshot, manager.FindFile(leftSnapshot, filePath, false), func(content []byte) {
if !manager.RetrieveFile(leftSnapshot, manager.FindFile(leftSnapshot, filePath, false), nil, func(content []byte) {
leftFile = append(leftFile, content...)
}) {
LOG_ERROR("SNAPSHOT_DIFF", "File %s is corrupted in snapshot %s at revision %d",
@@ -1466,7 +1524,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
var rightFile []byte
if rightSnapshot != nil {
if !manager.RetrieveFile(rightSnapshot, manager.FindFile(rightSnapshot, filePath, false), func(content []byte) {
if !manager.RetrieveFile(rightSnapshot, manager.FindFile(rightSnapshot, filePath, false), nil, func(content []byte) {
rightFile = append(rightFile, content...)
}) {
LOG_ERROR("SNAPSHOT_DIFF", "File %s is corrupted in snapshot %s at revision %d",
@@ -1527,24 +1585,32 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
return true
}
// We only need to decode the 'files' sequence, not 'chunkhashes' or 'chunklengthes'
manager.DownloadSnapshotFileSequence(leftSnapshot, nil, false)
if rightSnapshot != nil && rightSnapshot.Revision != 0 {
manager.DownloadSnapshotFileSequence(rightSnapshot, nil, false)
leftSnapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(entry *Entry) bool {
entry.Attributes = nil
leftSnapshotFiles = append(leftSnapshotFiles, entry)
return true
})
if rightSnapshot.Revision != 0 {
rightSnapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func(entry *Entry) bool {
entry.Attributes = nil
rightSnapshotFiles = append(rightSnapshotFiles, entry)
return true
})
}
maxSize := int64(9)
maxSizeDigits := 1
// Find the max Size value in order for pretty alignment.
for _, file := range leftSnapshot.Files {
for _, file := range leftSnapshotFiles {
for !file.IsDir() && file.Size > maxSize {
maxSize = maxSize*10 + 9
maxSizeDigits += 1
}
}
for _, file := range rightSnapshot.Files {
for _, file := range rightSnapshotFiles {
for !file.IsDir() && file.Size > maxSize {
maxSize = maxSize*10 + 9
maxSizeDigits += 1
@@ -1554,22 +1620,22 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
buffer := make([]byte, 32*1024)
var i, j int
for i < len(leftSnapshot.Files) || j < len(rightSnapshot.Files) {
for i < len(leftSnapshotFiles) || j < len(rightSnapshotFiles) {
if i >= len(leftSnapshot.Files) {
if rightSnapshot.Files[j].IsFile() {
LOG_INFO("SNAPSHOT_DIFF", "+ %s", rightSnapshot.Files[j].String(maxSizeDigits))
if i >= len(leftSnapshotFiles) {
if rightSnapshotFiles[j].IsFile() {
LOG_INFO("SNAPSHOT_DIFF", "+ %s", rightSnapshotFiles[j].String(maxSizeDigits))
}
j++
} else if j >= len(rightSnapshot.Files) {
if leftSnapshot.Files[i].IsFile() {
LOG_INFO("SNAPSHOT_DIFF", "- %s", leftSnapshot.Files[i].String(maxSizeDigits))
} else if j >= len(rightSnapshotFiles) {
if leftSnapshotFiles[i].IsFile() {
LOG_INFO("SNAPSHOT_DIFF", "- %s", leftSnapshotFiles[i].String(maxSizeDigits))
}
i++
} else {
left := leftSnapshot.Files[i]
right := rightSnapshot.Files[j]
left := leftSnapshotFiles[i]
right := rightSnapshotFiles[j]
if !left.IsFile() {
i++
@@ -1624,6 +1690,12 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
LOG_DEBUG("HISTORY_PARAMETERS", "top: %s, id: %s, revisions: %v, path: %s, showLocalHash: %t",
top, snapshotID, revisions, filePath, showLocalHash)
manager.CreateChunkOperator(false, 1, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
} ()
var err error
if len(revisions) == 0 {
@@ -1638,7 +1710,7 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
sort.Ints(revisions)
for _, revision := range revisions {
snapshot := manager.DownloadSnapshot(snapshotID, revision)
manager.DownloadSnapshotFileSequence(snapshot, nil, false)
manager.DownloadSnapshotSequences(snapshot)
file := manager.FindFile(snapshot, filePath, true)
if file != nil {
@@ -1746,8 +1818,11 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
LOG_WARN("DELETE_OPTIONS", "Tags or retention policy will be ignored if at least one revision is specified")
}
manager.chunkOperator = CreateChunkOperator(manager.storage, threads)
defer manager.chunkOperator.Stop()
manager.CreateChunkOperator(false, threads, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
} ()
prefPath := GetDuplicacyPreferencePath()
logDir := path.Join(prefPath, "logs")
@@ -2129,7 +2204,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
return false
}
manager.chunkOperator.Stop()
manager.chunkOperator.WaitForCompletion()
for _, fossil := range manager.chunkOperator.fossils {
collection.AddFossil(fossil)
}
@@ -2210,6 +2285,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
} else {
manager.CleanSnapshotCache(nil, allSnapshots)
}
manager.chunkOperator.WaitForCompletion()
return true
}
@@ -2422,8 +2498,6 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s
// CheckSnapshot performs sanity checks on the given snapshot.
func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
lastChunk := 0
lastOffset := 0
var lastEntry *Entry
numberOfChunks := len(snapshot.ChunkHashes)
@@ -2433,57 +2507,39 @@ func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
numberOfChunks, len(snapshot.ChunkLengths))
}
entries := make([]*Entry, len(snapshot.Files))
copy(entries, snapshot.Files)
sort.Sort(ByChunk(entries))
snapshot.ListRemoteFiles(manager.config, manager.chunkOperator, func (entry *Entry) bool {
for _, entry := range snapshot.Files {
if lastEntry != nil && lastEntry.Compare(entry) >= 0 && !strings.Contains(lastEntry.Path, "\ufffd") {
return fmt.Errorf("The entry %s appears before the entry %s", lastEntry.Path, entry.Path)
err = fmt.Errorf("The entry %s appears before the entry %s", lastEntry.Path, entry.Path)
return false
}
lastEntry = entry
}
for _, entry := range entries {
if !entry.IsFile() || entry.Size == 0 {
continue
return true
}
if entry.StartChunk < 0 {
return fmt.Errorf("The file %s starts at chunk %d", entry.Path, entry.StartChunk)
err = fmt.Errorf("The file %s starts at chunk %d", entry.Path, entry.StartChunk)
return false
}
if entry.EndChunk >= numberOfChunks {
return fmt.Errorf("The file %s ends at chunk %d while the number of chunks is %d",
err = fmt.Errorf("The file %s ends at chunk %d while the number of chunks is %d",
entry.Path, entry.EndChunk, numberOfChunks)
return false
}
if entry.EndChunk < entry.StartChunk {
return fmt.Errorf("The file %s starts at chunk %d and ends at chunk %d",
fmt.Errorf("The file %s starts at chunk %d and ends at chunk %d",
entry.Path, entry.StartChunk, entry.EndChunk)
return false
}
if entry.StartOffset > 0 {
if entry.StartChunk < lastChunk {
return fmt.Errorf("The file %s starts at chunk %d while the last chunk is %d",
entry.Path, entry.StartChunk, lastChunk)
}
if entry.StartChunk > lastChunk+1 {
return fmt.Errorf("The file %s starts at chunk %d while the last chunk is %d",
entry.Path, entry.StartChunk, lastChunk)
}
if entry.StartChunk == lastChunk && entry.StartOffset < lastOffset {
return fmt.Errorf("The file %s starts at offset %d of chunk %d while the last file ends at offset %d",
entry.Path, entry.StartOffset, entry.StartChunk, lastOffset)
}
if entry.StartChunk == entry.EndChunk && entry.StartOffset > entry.EndOffset {
return fmt.Errorf("The file %s starts at offset %d and ends at offset %d of the same chunk %d",
entry.Path, entry.StartOffset, entry.EndOffset, entry.StartChunk)
}
if entry.StartChunk == entry.EndChunk && entry.StartOffset > entry.EndOffset {
err = fmt.Errorf("The file %s starts at offset %d and ends at offset %d of the same chunk %d",
entry.Path, entry.StartOffset, entry.EndOffset, entry.StartChunk)
return false
}
fileSize := int64(0)
@@ -2503,22 +2559,13 @@ func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
}
if entry.Size != fileSize {
return fmt.Errorf("The file %s has a size of %d but the total size of chunks is %d",
err = fmt.Errorf("The file %s has a size of %d but the total size of chunks is %d",
entry.Path, entry.Size, fileSize)
return false
}
lastChunk = entry.EndChunk
lastOffset = entry.EndOffset
}
if len(entries) > 0 && entries[0].StartChunk != 0 {
return fmt.Errorf("The first file starts at chunk %d", entries[0].StartChunk)
}
// There may be a last chunk whose size is 0 so we allow this to happen
if lastChunk < numberOfChunks-2 {
return fmt.Errorf("The last file ends at chunk %d but the number of chunks is %d", lastChunk, numberOfChunks)
}
return true
})
return nil
}

View File

@@ -116,19 +116,18 @@ func createTestSnapshotManager(testDir string) *SnapshotManager {
func uploadTestChunk(manager *SnapshotManager, content []byte) string {
completionFunc := func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
chunkOperator := CreateChunkOperator(manager.config, manager.storage, nil, false, *testThreads, false)
chunkOperator.UploadCompletionFunc = func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
LOG_INFO("UPLOAD_CHUNK", "Chunk %s size %d uploaded", chunk.GetID(), chunkSize)
}
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, testThreads, nil)
chunkUploader.completionFunc = completionFunc
chunkUploader.Start()
chunk := CreateChunk(manager.config, true)
chunk.Reset(true)
chunk.Write(content)
chunkUploader.StartChunk(chunk, 0)
chunkUploader.Stop()
chunkOperator.Upload(chunk, 0, false)
chunkOperator.WaitForCompletion()
chunkOperator.Stop()
return chunk.GetHash()
}
@@ -180,6 +179,12 @@ func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision in
func checkTestSnapshots(manager *SnapshotManager, expectedSnapshots int, expectedFossils int) {
manager.CreateChunkOperator(false, 1, false)
defer func() {
manager.chunkOperator.Stop()
manager.chunkOperator = nil
}()
var snapshotIDs []string
var err error

View File

@@ -540,7 +540,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
} else if matched[1] == "dropbox" {
storageDir := matched[3] + matched[5]
token := GetPassword(preference, "dropbox_token", "Enter Dropbox access token:", true, resetPassword)
token := GetPassword(preference, "dropbox_token", "Enter Dropbox refresh token:", true, resetPassword)
dropboxStorage, err := CreateDropboxStorage(token, storageDir, 1, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the dropbox storage: %v", err)
@@ -714,6 +714,23 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
}
SavePassword(preference, "fabric_token", token)
return smeStorage
} else if matched[1] == "storj" {
satellite := matched[2] + matched[3]
bucket := matched[5]
storageDir := ""
index := strings.Index(bucket, "/")
if index >= 0 {
storageDir = bucket[index + 1:]
bucket = bucket[:index]
}
apiKey := GetPassword(preference, "storj_key", "Enter the API access key:", true, resetPassword)
passphrase := GetPassword(preference, "storj_passphrase", "Enter the passphrase:", true, resetPassword)
storjStorage, err := CreateStorjStorage(satellite, apiKey, passphrase, bucket, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Storj storage at %s: %v", storageURL, err)
return nil
}
return storjStorage
} else {
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
return nil

View File

@@ -22,28 +22,17 @@ import (
"math/rand"
)
var testStorageName string
var testRateLimit int
var testQuickMode bool
var testThreads int
var testFixedChunkSize bool
var testRSAEncryption bool
var testErasureCoding bool
func init() {
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
flag.IntVar(&testRateLimit, "limit-rate", 0, "maximum transfer speed in kbytes/sec")
flag.BoolVar(&testQuickMode, "quick", false, "quick test")
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption")
flag.BoolVar(&testErasureCoding, "erasure-coding", false, "enable Erasure Coding")
flag.Parse()
}
var testStorageName = flag.String("storage", "", "the test storage to use")
var testRateLimit = flag.Int("limit-rate", 0, "maximum transfer speed in kbytes/sec")
var testQuickMode = flag.Bool("quick", false, "quick test")
var testThreads = flag.Int("threads", 1, "number of downloading/uploading threads")
var testFixedChunkSize = flag.Bool("fixed-chunk-size", false, "fixed chunk size")
var testRSAEncryption = flag.Bool("rsa", false, "enable RSA encryption")
var testErasureCoding = flag.Bool("erasure-coding", false, "enable Erasure Coding")
func loadStorage(localStoragePath string, threads int) (Storage, error) {
if testStorageName == "" || testStorageName == "file" {
if *testStorageName == "" || *testStorageName == "file" {
storage, err := CreateFileStorage(localStoragePath, false, threads)
if storage != nil {
// Use a read level of at least 2 because this will catch more errors than a read level of 1.
@@ -64,113 +53,132 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
return nil, err
}
config, found := configs[testStorageName]
config, found := configs[*testStorageName]
if !found {
return nil, fmt.Errorf("No storage named '%s' found", testStorageName)
return nil, fmt.Errorf("No storage named '%s' found", *testStorageName)
}
if testStorageName == "flat" {
if *testStorageName == "flat" {
storage, err := CreateFileStorage(localStoragePath, false, threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "samba" {
} else if *testStorageName == "samba" {
storage, err := CreateFileStorage(localStoragePath, true, threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "sftp" {
} else if *testStorageName == "sftp" {
port, _ := strconv.Atoi(config["port"])
storage, err := CreateSFTPStorageWithPassword(config["server"], port, config["username"], config["directory"], 2, config["password"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "s3" {
} else if *testStorageName == "s3" {
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "wasabi" {
} else if *testStorageName == "wasabi" {
storage, err := CreateWasabiStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "s3c" {
} else if *testStorageName == "s3c" {
storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "digitalocean" {
} else if *testStorageName == "digitalocean" {
storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "minio" {
} else if *testStorageName == "minio" {
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, false, true)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "minios" {
} else if *testStorageName == "minios" {
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, true)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "dropbox" {
} else if *testStorageName == "dropbox" {
storage, err := CreateDropboxStorage(config["token"], config["directory"], 1, threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "b2" {
} else if *testStorageName == "b2" {
storage, err := CreateB2Storage(config["account"], config["key"], "", config["bucket"], config["directory"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcs-s3" {
} else if *testStorageName == "gcs-s3" {
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcs" {
} else if *testStorageName == "gcs" {
storage, err := CreateGCSStorage(config["token_file"], config["bucket"], config["directory"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcs-sa" {
} else if *testStorageName == "gcs-sa" {
storage, err := CreateGCSStorage(config["token_file"], config["bucket"], config["directory"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "azure" {
} else if *testStorageName == "azure" {
storage, err := CreateAzureStorage(config["account"], config["key"], config["container"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "acd" {
} else if *testStorageName == "acd" {
storage, err := CreateACDStorage(config["token_file"], config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcd" {
} else if *testStorageName == "gcd" {
storage, err := CreateGCDStorage(config["token_file"], "", config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcd-shared" {
} else if *testStorageName == "gcd-shared" {
storage, err := CreateGCDStorage(config["token_file"], config["drive"], config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "one" {
} else if *testStorageName == "gcd-impersonate" {
storage, err := CreateGCDStorage(config["token_file"], config["drive"], config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "one" {
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "odb" {
} else if *testStorageName == "odb" {
storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "one" {
} else if *testStorageName == "one" {
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "hubic" {
} else if *testStorageName == "hubic" {
storage, err := CreateHubicStorage(config["token_file"], config["storage_path"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "memset" {
} else if *testStorageName == "memset" {
storage, err := CreateSwiftStorage(config["storage_url"], config["key"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "pcloud" || testStorageName == "box" {
} else if *testStorageName == "pcloud" || *testStorageName == "box" {
storage, err := CreateWebDAVStorage(config["host"], 0, config["username"], config["password"], config["storage_path"], false, threads)
if err != nil {
return nil, err
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "fabric" {
storage, err := CreateFileFabricStorage(config["endpoint"], config["token"], config["storage_path"], threads)
if err != nil {
return nil, err
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if *testStorageName == "storj" {
storage, err := CreateStorjStorage(config["satellite"], config["key"], config["passphrase"], config["bucket"], config["storage_path"], threads)
if err != nil {
return nil, err
}
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
}
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
return nil, fmt.Errorf("Invalid storage named: %s", *testStorageName)
}
func cleanStorage(storage Storage) {
@@ -310,7 +318,7 @@ func TestStorage(t *testing.T) {
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
LOG_INFO("STORAGE_TEST", "storage: %s", *testStorageName)
threads := 8
storage, err := loadStorage(testDir, threads)
@@ -319,7 +327,7 @@ func TestStorage(t *testing.T) {
return
}
storage.EnableTestMode()
storage.SetRateLimits(testRateLimit, testRateLimit)
storage.SetRateLimits(*testRateLimit, *testRateLimit)
delay := 0
if _, ok := storage.(*ACDStorage); ok {
@@ -441,7 +449,7 @@ func TestStorage(t *testing.T) {
numberOfFiles := 10
maxFileSize := 64 * 1024
if testQuickMode {
if *testQuickMode {
numberOfFiles = 2
}
@@ -576,7 +584,7 @@ func TestCleanStorage(t *testing.T) {
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
LOG_INFO("STORAGE_TEST", "storage: %s", *testStorageName)
storage, err := loadStorage(testDir, 1)
if err != nil {

View File

@@ -0,0 +1,184 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"fmt"
"io"
"context"
"storj.io/uplink"
)
// StorjStorage is a storage backend for Storj.
type StorjStorage struct {
StorageBase
project *uplink.Project
bucket string
storageDir string
numberOfThreads int
}
// CreateStorjStorage creates a Storj storage.
func CreateStorjStorage(satellite string, apiKey string, passphrase string,
bucket string, storageDir string, threads int) (storage *StorjStorage, err error) {
ctx := context.Background()
access, err := uplink.RequestAccessWithPassphrase(ctx, satellite, apiKey, passphrase)
if err != nil {
return nil, fmt.Errorf("cannot request the access grant: %v", err)
}
project, err := uplink.OpenProject(ctx, access)
if err != nil {
return nil, fmt.Errorf("cannot open the project: %v", err)
}
_, err = project.StatBucket(ctx, bucket)
if err != nil {
return nil, fmt.Errorf("cannot found the bucket: %v", err)
}
if storageDir != "" && storageDir[len(storageDir) - 1] != '/' {
storageDir += "/"
}
storage = &StorjStorage {
project: project,
bucket: bucket,
storageDir: storageDir,
numberOfThreads: threads,
}
storage.DerivedStorage = storage
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively).
func (storage *StorjStorage) ListFiles(threadIndex int, dir string) (
files []string, sizes []int64, err error) {
fullPath := storage.storageDir + dir
if fullPath != "" && fullPath[len(fullPath) - 1] != '/' {
fullPath += "/"
}
options := uplink.ListObjectsOptions {
Prefix: fullPath,
System: true, // request SystemMetadata which includes ContentLength
}
objects := storage.project.ListObjects(context.Background(), storage.bucket, &options)
for objects.Next() {
if objects.Err() != nil {
return nil, nil, objects.Err()
}
item := objects.Item()
name := item.Key[len(fullPath):]
size := item.System.ContentLength
if item.IsPrefix {
if name != "" && name[len(name) - 1] != '/' {
name += "/"
size = 0
}
}
files = append(files, name)
sizes = append(sizes, size)
}
return files, sizes, nil
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *StorjStorage) DeleteFile(threadIndex int, filePath string) (err error) {
_, err = storage.project.DeleteObject(context.Background(), storage.bucket,
storage.storageDir + filePath)
return err
}
// MoveFile renames the file.
func (storage *StorjStorage) MoveFile(threadIndex int, from string, to string) (err error) {
err = storage.project.MoveObject(context.Background(), storage.bucket,
storage.storageDir + from, storage.bucket, storage.storageDir + to, nil)
return err
}
// CreateDirectory creates a new directory.
func (storage *StorjStorage) CreateDirectory(threadIndex int, dir string) (err error) {
return nil
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *StorjStorage) GetFileInfo(threadIndex int, filePath string) (
exist bool, isDir bool, size int64, err error) {
info, err := storage.project.StatObject(context.Background(), storage.bucket,
storage.storageDir + filePath)
if info == nil {
return false, false, 0, nil
} else if err != nil {
return false, false, 0, err
} else {
return true, info.IsPrefix, info.System.ContentLength, nil
}
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *StorjStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
file, err := storage.project.DownloadObject(context.Background(), storage.bucket,
storage.storageDir + filePath, nil)
if err != nil {
return err
}
defer file.Close()
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
return err
}
return nil
}
// UploadFile writes 'content' to the file at 'filePath'
func (storage *StorjStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
file, err := storage.project.UploadObject(context.Background(), storage.bucket,
storage.storageDir + filePath, nil)
if err != nil {
return err
}
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
_, err = io.Copy(file, reader)
if err != nil {
return err
}
err = file.Commit()
if err != nil {
return err
}
return nil
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *StorjStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *StorjStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *StorjStorage) IsStrongConsistent() bool { return false }
// If the storage supports fast listing of files names.
func (storage *StorjStorage) IsFastListing() bool { return true }
// Enable the test mode.
func (storage *StorjStorage) EnableTestMode() {}

View File

@@ -5,16 +5,18 @@
package duplicacy
import (
"context"
"strconv"
"strings"
"time"
"github.com/ncw/swift"
"github.com/ncw/swift/v2"
)
type SwiftStorage struct {
StorageBase
ctx context.Context
connection *swift.Connection
container string
storageDir string
@@ -106,6 +108,8 @@ func CreateSwiftStorage(storageURL string, key string, threads int) (storage *Sw
arguments["protocol"] = "https"
}
ctx, _ := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
// Please refer to https://godoc.org/github.com/ncw/swift#Connection
connection := swift.Connection{
Domain: arguments["domain"],
@@ -129,17 +133,18 @@ func CreateSwiftStorage(storageURL string, key string, threads int) (storage *Sw
TrustId: arguments["trust_id"],
}
err = connection.Authenticate()
err = connection.Authenticate(ctx)
if err != nil {
return nil, err
}
_, _, err = connection.Container(container)
_, _, err = connection.Container(ctx, container)
if err != nil {
return nil, err
}
storage = &SwiftStorage{
ctx: ctx,
connection: &connection,
container: container,
storageDir: storageDir,
@@ -168,7 +173,7 @@ func (storage *SwiftStorage) ListFiles(threadIndex int, dir string) (files []str
options.Delimiter = '/'
}
objects, err := storage.connection.ObjectsAll(storage.container, &options)
objects, err := storage.connection.ObjectsAll(storage.ctx, storage.container, &options)
if err != nil {
return nil, nil, err
}
@@ -190,12 +195,12 @@ func (storage *SwiftStorage) ListFiles(threadIndex int, dir string) (files []str
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *SwiftStorage) DeleteFile(threadIndex int, filePath string) (err error) {
return storage.connection.ObjectDelete(storage.container, storage.storageDir+filePath)
return storage.connection.ObjectDelete(storage.ctx, storage.container, storage.storageDir+filePath)
}
// MoveFile renames the file.
func (storage *SwiftStorage) MoveFile(threadIndex int, from string, to string) (err error) {
return storage.connection.ObjectMove(storage.container, storage.storageDir+from,
return storage.connection.ObjectMove(storage.ctx, storage.container, storage.storageDir+from,
storage.container, storage.storageDir+to)
}
@@ -207,7 +212,7 @@ func (storage *SwiftStorage) CreateDirectory(threadIndex int, dir string) (err e
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *SwiftStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
object, _, err := storage.connection.Object(storage.container, storage.storageDir+filePath)
object, _, err := storage.connection.Object(storage.ctx, storage.container, storage.storageDir+filePath)
if err != nil {
if err == swift.ObjectNotFound {
@@ -223,7 +228,7 @@ func (storage *SwiftStorage) GetFileInfo(threadIndex int, filePath string) (exis
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *SwiftStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
file, _, err := storage.connection.ObjectOpen(storage.container, storage.storageDir+filePath, false, nil)
file, _, err := storage.connection.ObjectOpen(storage.ctx, storage.container, storage.storageDir+filePath, false, nil)
if err != nil {
return err
}
@@ -234,7 +239,7 @@ func (storage *SwiftStorage) DownloadFile(threadIndex int, filePath string, chun
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *SwiftStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.threads)
_, err = storage.connection.ObjectPut(storage.container, storage.storageDir+filePath, reader, true, "", "application/duplicacy", nil)
_, err = storage.connection.ObjectPut(storage.ctx, storage.container, storage.storageDir+filePath, reader, true, "", "application/duplicacy", nil)
return err
}

View File

@@ -14,6 +14,7 @@ import (
"strconv"
"strings"
"time"
"runtime"
"github.com/gilbertchen/gopass"
"golang.org/x/crypto/pbkdf2"
@@ -460,3 +461,16 @@ func AtoSize(sizeString string) int {
return size
}
func PrintMemoryUsage() {
for {
var m runtime.MemStats
runtime.ReadMemStats(&m)
LOG_INFO("MEMORY_STATS", "Currently allocated: %s, total allocated: %s, system memory: %s, number of GCs: %d",
PrettySize(int64(m.Alloc)), PrettySize(int64(m.TotalAlloc)), PrettySize(int64(m.Sys)), m.NumGC)
time.Sleep(time.Second)
}
}

View File

@@ -13,7 +13,7 @@ import (
"path/filepath"
"syscall"
"github.com/gilbertchen/xattr"
"github.com/pkg/xattr"
)
func Readlink(path string) (isRegular bool, s string, err error) {
@@ -50,37 +50,38 @@ func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
func (entry *Entry) ReadAttributes(top string) {
fullPath := filepath.Join(top, entry.Path)
attributes, _ := xattr.Listxattr(fullPath)
attributes, _ := xattr.List(fullPath)
if len(attributes) > 0 {
entry.Attributes = make(map[string][]byte)
entry.Attributes = &map[string][]byte{}
for _, name := range attributes {
attribute, err := xattr.Getxattr(fullPath, name)
attribute, err := xattr.Get(fullPath, name)
if err == nil {
entry.Attributes[name] = attribute
(*entry.Attributes)[name] = attribute
}
}
}
}
func (entry *Entry) SetAttributesToFile(fullPath string) {
names, _ := xattr.Listxattr(fullPath)
names, _ := xattr.List(fullPath)
for _, name := range names {
newAttribute, found := entry.Attributes[name]
newAttribute, found := (*entry.Attributes)[name]
if found {
oldAttribute, _ := xattr.Getxattr(fullPath, name)
oldAttribute, _ := xattr.Get(fullPath, name)
if !bytes.Equal(oldAttribute, newAttribute) {
xattr.Setxattr(fullPath, name, newAttribute)
xattr.Set(fullPath, name, newAttribute)
}
delete(entry.Attributes, name)
delete(*entry.Attributes, name)
} else {
xattr.Removexattr(fullPath, name)
xattr.Remove(fullPath, name)
}
}
for name, attribute := range entry.Attributes {
xattr.Setxattr(fullPath, name, attribute)
for name, attribute := range *entry.Attributes {
xattr.Set(fullPath, name, attribute)
}
}