mirror of
https://github.com/gilbertchen/duplicacy
synced 2025-12-06 00:03:38 +00:00
Compare commits
45 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
670cbcd776 | ||
|
|
fd469bae9e | ||
|
|
acef01770a | ||
|
|
1eb1fb14a8 | ||
|
|
8b489f04eb | ||
|
|
089e19f8e6 | ||
|
|
1da7e2b536 | ||
|
|
ed8b4393be | ||
|
|
5e28dc4911 | ||
|
|
f2f07a120d | ||
|
|
153f6a2d20 | ||
|
|
5d45999077 | ||
|
|
1adcf56890 | ||
|
|
09e3cdfebf | ||
|
|
fe854d469d | ||
|
|
76f1274e13 | ||
|
|
9c3122b814 | ||
|
|
6ca8b8dff0 | ||
|
|
51cbf73caa | ||
|
|
835af11334 | ||
|
|
4c3557eb80 | ||
|
|
eebcece9e0 | ||
|
|
8c80470c29 | ||
|
|
bcb889272d | ||
|
|
79d8654a12 | ||
|
|
6bf0d2265c | ||
|
|
749db78a1f | ||
|
|
0a51bd8d1a | ||
|
|
7208adbce2 | ||
|
|
e827662869 | ||
|
|
57dd5ba927 | ||
|
|
01a37b7828 | ||
|
|
57cd20bb84 | ||
|
|
0e970da222 | ||
|
|
e880636502 | ||
|
|
810303ce25 | ||
|
|
ffac83dd80 | ||
|
|
05674871fe | ||
|
|
22d6f3abfc | ||
|
|
d26ffe2cff | ||
|
|
a35f6c27be | ||
|
|
aa07feeac0 | ||
|
|
7719bb9f29 | ||
|
|
426110e961 | ||
|
|
a55ac1b7ad |
114
Gopkg.lock
generated
114
Gopkg.lock
generated
@@ -7,17 +7,11 @@
|
||||
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
|
||||
version = "v0.16.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = ["version"]
|
||||
revision = "b7fadebe0e7f5c5720986080a01495bd8d27be37"
|
||||
version = "v14.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
|
||||
revision = "0ae36a9e544696de46fdadb7b0d5fb38af48c063"
|
||||
version = "v10.2.0"
|
||||
packages = ["autorest","autorest/adal","autorest/azure","autorest/date","logger","version"]
|
||||
revision = "9bc4033dd347c7f416fca46b2f42a043dc1fbdf6"
|
||||
version = "v10.15.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -27,9 +21,9 @@
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/sts"]
|
||||
revision = "a32b1dcd091264b5dee7b386149b6cc3823395c9"
|
||||
version = "v1.12.31"
|
||||
packages = ["aws","aws/arn","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/processcreds","aws/credentials/stscreds","aws/csm","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/context","internal/ini","internal/s3err","internal/sdkio","internal/sdkmath","internal/sdkrand","internal/sdkuri","internal/shareddefaults","internal/strings","internal/sync/singleflight","private/protocol","private/protocol/eventstream","private/protocol/eventstream/eventstreamapi","private/protocol/json/jsonutil","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/s3/internal/arn","service/sts","service/sts/stsiface"]
|
||||
revision = "851d5ffb66720c2540cc68020d4d8708950686c8"
|
||||
version = "v1.30.7"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/bkaradzic/go-lz4"
|
||||
@@ -40,14 +34,14 @@
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29"
|
||||
version = "v3.1.0"
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/azure-sdk-for-go"
|
||||
packages = ["storage"]
|
||||
revision = "bbf89bd4d716c184f158d1e1428c2dbef4a18307"
|
||||
packages = ["storage","version"]
|
||||
revision = "8fd4663cab7c7c1c46d00449291c92ad23b0d0d9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -56,10 +50,9 @@
|
||||
revision = "1de0a1836ce9c3ae1bf737a0869c4f04f28a7f98"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/go-dropbox"
|
||||
packages = ["."]
|
||||
revision = "90711b603312b1f973f3a5da3793ac4f1e5c2f2a"
|
||||
revision = "0baa9015ac2547d8b69b2e88c709aa90cfb8fbc1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gilbertchen/go-ole"
|
||||
@@ -98,33 +91,33 @@
|
||||
revision = "68e7a6806b0137a396d7d05601d7403ae1abac58"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
|
||||
version = "v1.32.0"
|
||||
branch = "master"
|
||||
name = "github.com/golang/groupcache"
|
||||
packages = ["lru"]
|
||||
revision = "8c9f03a8e57eb486e42badaed3fb287da51807ba"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto","protoc-gen-go/descriptor","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
|
||||
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
||||
revision = "84668698ea25b64748563aa20726db66a6b8d299"
|
||||
version = "v1.3.5"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/googleapis/gax-go"
|
||||
packages = ["."]
|
||||
revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f"
|
||||
version = "v2.0.0"
|
||||
packages = [".","v2"]
|
||||
revision = "c8a15bac9b9fe955bd9f900272f9a306465d28cf"
|
||||
version = "v2.0.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
packages = ["."]
|
||||
revision = "0b12d6b5"
|
||||
revision = "c2b33e84"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/kr/fs"
|
||||
packages = ["."]
|
||||
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
revision = "1455def202f6e05b95cc7bfc7e8ae67ae5141eba"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/marstr/guid"
|
||||
@@ -139,22 +132,22 @@
|
||||
revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "ae9f0ea1605b9aa6434ed5c731ca35d83ba67c55"
|
||||
revision = "3e1a09f21340e4828e7265aa89f4dc1495fa7ccc"
|
||||
version = "v1.0.50"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
revision = "614d223910a179a466c1767a985424175c39b465"
|
||||
version = "v0.9.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "3edd153f213d8d4191a0ee4577c61cca19436632"
|
||||
version = "v1.10.1"
|
||||
revision = "5616182052227b951e76d9c9b79a616c608bd91b"
|
||||
version = "v1.11.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
@@ -168,63 +161,68 @@
|
||||
packages = ["."]
|
||||
revision = "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
|
||||
|
||||
[[projects]]
|
||||
name = "go.opencensus.io"
|
||||
packages = [".","internal","internal/tagencoding","metric/metricdata","metric/metricproducer","plugin/ochttp","plugin/ochttp/propagation/b3","resource","stats","stats/internal","stats/view","tag","trace","trace/internal","trace/propagation","trace/tracestate"]
|
||||
revision = "d835ff86be02193d324330acdb7d65546b05f814"
|
||||
version = "v0.22.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["curve25519","ed25519","ed25519/internal/edwards25519","pbkdf2","ssh","ssh/agent","ssh/terminal"]
|
||||
revision = "9f005a07e0d31d45e6656d241bb5c0f2efd4bc94"
|
||||
packages = ["blowfish","chacha20","curve25519","ed25519","ed25519/internal/edwards25519","internal/subtle","pbkdf2","poly1305","ssh","ssh/agent","ssh/internal/bcrypt_pbkdf","ssh/terminal"]
|
||||
revision = "056763e48d71961566155f089ac0f02f1dda9b5a"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = ["context","context/ctxhttp","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"]
|
||||
revision = "9dfe39835686865bff950a07b394c12a98ddc811"
|
||||
packages = ["context","context/ctxhttp","http/httpguts","http2","http2/hpack","idna","internal/timeseries","trace"]
|
||||
revision = "d3edc9973b7eb1fb302b0ff2c62357091cea9a30"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [".","google","internal","jws","jwt"]
|
||||
revision = "f95fa95eaa936d9d87489b15d1d18b97c1ba9c28"
|
||||
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix","windows"]
|
||||
revision = "82aafbf43bf885069dc71b7e7c2f9d7a614d47da"
|
||||
packages = ["cpu","unix","windows"]
|
||||
revision = "59c9f1ba88faf592b225274f69c5ef1e4ebacf82"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||
revision = "88f656faf3f37f690df1a32515b479415e1a6769"
|
||||
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/language","internal/language/compact","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
|
||||
revision = "342b2e1fbaa52c93f31447ad2c6abc048c63e475"
|
||||
version = "v0.3.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
packages = ["drive/v3","gensupport","googleapi","googleapi/internal/uritemplates","googleapi/transport","internal","iterator","option","storage/v1","transport/http"]
|
||||
revision = "17b5f22a248d6d3913171c1a557552ace0d9c806"
|
||||
packages = ["drive/v3","googleapi","googleapi/transport","internal","internal/gensupport","internal/third_party/uritemplates","iterator","option","option/internaloption","storage/v1","transport/cert","transport/http","transport/http/internal/propagation"]
|
||||
revision = "52f0532eadbcc6f6b82d6f5edf66e610d10bfde6"
|
||||
version = "v0.21.0"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [".","internal","internal/app_identity","internal/base","internal/datastore","internal/log","internal/modules","internal/remote_api","internal/urlfetch","urlfetch"]
|
||||
revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a"
|
||||
version = "v1.0.0"
|
||||
revision = "971852bfffca25b069c31162ae8f247a3dba083b"
|
||||
version = "v1.6.5"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status"]
|
||||
revision = "891aceb7c239e72692819142dfca057bdcbfcb96"
|
||||
packages = ["googleapis/api/annotations","googleapis/iam/v1","googleapis/rpc/status","googleapis/type/expr"]
|
||||
revision = "baae70f3302d3efdff74db41e48a5d476d036906"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [".","balancer","balancer/roundrobin","codes","connectivity","credentials","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
|
||||
revision = "5a9f7b402fe85096d2e1d0383435ee1876e863d0"
|
||||
version = "v1.8.0"
|
||||
packages = [".","attributes","backoff","balancer","balancer/base","balancer/roundrobin","binarylog/grpc_binarylog_v1","codes","connectivity","credentials","credentials/internal","encoding","encoding/proto","grpclog","internal","internal/backoff","internal/balancerload","internal/binarylog","internal/buffer","internal/channelz","internal/envconfig","internal/grpclog","internal/grpcrand","internal/grpcsync","internal/grpcutil","internal/resolver/dns","internal/resolver/passthrough","internal/syscall","internal/transport","keepalive","metadata","naming","peer","resolver","serviceconfig","stats","status","tap"]
|
||||
revision = "ac54eec90516cee50fc6b9b113b34628a85f976f"
|
||||
version = "v1.28.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "8636a9db1eb54be5374f9914687693122efdde511f11c47d10c22f9e245e7f70"
|
||||
inputs-digest = "e462352e0b0c726247078462e30a79330ac7a8b9dc62e9ed9d1e097b684224e9"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
12
Gopkg.toml
12
Gopkg.toml
@@ -31,7 +31,7 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
version = "1.12.31"
|
||||
version = "1.30.7"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/bkaradzic/go-lz4"
|
||||
@@ -46,8 +46,8 @@
|
||||
name = "github.com/gilbertchen/cli"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/go-dropbox"
|
||||
revision = "0baa9015ac2547d8b69b2e88c709aa90cfb8fbc1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gilbertchen/go-ole"
|
||||
@@ -86,9 +86,13 @@
|
||||
name = "golang.org/x/net"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
revision = "bf48bf16ab8d622ce64ec6ce98d2c98f916b6303"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "google.golang.org/api"
|
||||
version = "0.21.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "1.28.0"
|
||||
|
||||
@@ -159,6 +159,10 @@ func setGlobalOptions(context *cli.Context) {
|
||||
}()
|
||||
}
|
||||
|
||||
for _, logID := range context.GlobalStringSlice("suppress") {
|
||||
duplicacy.SuppressLog(logID)
|
||||
}
|
||||
|
||||
duplicacy.RunInBackground = context.GlobalBool("background")
|
||||
}
|
||||
|
||||
@@ -208,15 +212,20 @@ func runScript(context *cli.Context, storageName string, phase string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func loadRSAPrivateKey(keyFile string, preference *duplicacy.Preference, backupManager *duplicacy.BackupManager, resetPasswords bool) {
|
||||
func loadRSAPrivateKey(keyFile string, passphrase string, preference *duplicacy.Preference, backupManager *duplicacy.BackupManager, resetPasswords bool) {
|
||||
if keyFile == "" {
|
||||
return
|
||||
}
|
||||
|
||||
prompt := fmt.Sprintf("Enter the passphrase for %s:", keyFile)
|
||||
passphrase := duplicacy.GetPassword(*preference, "rsa_passphrase", prompt, false, resetPasswords)
|
||||
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
|
||||
duplicacy.SavePassword(*preference, "rsa_passphrase", passphrase)
|
||||
if passphrase == "" {
|
||||
passphrase = duplicacy.GetPassword(*preference, "rsa_passphrase", prompt, false, resetPasswords)
|
||||
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
|
||||
duplicacy.SavePassword(*preference, "rsa_passphrase", passphrase)
|
||||
} else {
|
||||
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func initRepository(context *cli.Context) {
|
||||
@@ -817,7 +826,7 @@ func restoreRepository(context *cli.Context) {
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile)
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
|
||||
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, setOwner, showStatistics, patterns)
|
||||
@@ -870,7 +879,7 @@ func listSnapshots(context *cli.Context) {
|
||||
showChunks := context.Bool("chunks")
|
||||
|
||||
// list doesn't need to decrypt file chunks; but we need -key here so we can reset the passphrase for the private key
|
||||
loadRSAPrivateKey(context.String("key"), preference, backupManager, resetPassword)
|
||||
loadRSAPrivateKey(context.String("key"), "", preference, backupManager, resetPassword)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
backupManager.SnapshotManager.ListSnapshots(id, revisions, tag, showFiles, showChunks)
|
||||
@@ -894,7 +903,12 @@ func checkSnapshots(context *cli.Context) {
|
||||
|
||||
runScript(context, preference.Name, "pre")
|
||||
|
||||
storage := duplicacy.CreateStorage(*preference, false, 1)
|
||||
threads := context.Int("threads")
|
||||
if threads < 1 {
|
||||
threads = 1
|
||||
}
|
||||
|
||||
storage := duplicacy.CreateStorage(*preference, false, threads)
|
||||
if storage == nil {
|
||||
return
|
||||
}
|
||||
@@ -910,7 +924,7 @@ func checkSnapshots(context *cli.Context) {
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
|
||||
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
|
||||
|
||||
id := preference.SnapshotID
|
||||
if context.Bool("all") {
|
||||
@@ -922,11 +936,12 @@ func checkSnapshots(context *cli.Context) {
|
||||
showStatistics := context.Bool("stats")
|
||||
showTabular := context.Bool("tabular")
|
||||
checkFiles := context.Bool("files")
|
||||
checkChunks := context.Bool("chunks")
|
||||
searchFossils := context.Bool("fossils")
|
||||
resurrect := context.Bool("resurrect")
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
|
||||
backupManager.SnapshotManager.CheckSnapshots(id, revisions, tag, showStatistics, showTabular, checkFiles, checkChunks, searchFossils, resurrect, threads)
|
||||
|
||||
runScript(context, preference.Name, "post")
|
||||
}
|
||||
@@ -967,7 +982,7 @@ func printFile(context *cli.Context) {
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
|
||||
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
|
||||
@@ -1025,7 +1040,7 @@ func diff(context *cli.Context) {
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
|
||||
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile, preference.FiltersFile)
|
||||
@@ -1174,7 +1189,7 @@ func copySnapshots(context *cli.Context) {
|
||||
sourceManager.SetupSnapshotCache(source.Name)
|
||||
duplicacy.SavePassword(*source, "password", sourcePassword)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), source, sourceManager, false)
|
||||
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), source, sourceManager, false)
|
||||
|
||||
_, destination := getRepositoryPreference(context, context.String("to"))
|
||||
|
||||
@@ -1500,6 +1515,11 @@ func main() {
|
||||
Usage: "the RSA private key to decrypt file chunks",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key-passphrase",
|
||||
Usage: "the passphrase to decrypt the RSA private key",
|
||||
Argument: "<private key passphrase>",
|
||||
},
|
||||
},
|
||||
Usage: "Restore the repository to a previously saved snapshot",
|
||||
ArgsUsage: "[--] [pattern] ...",
|
||||
@@ -1589,6 +1609,10 @@ func main() {
|
||||
Name: "files",
|
||||
Usage: "verify the integrity of every file",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "chunks",
|
||||
Usage: "verify the integrity of every chunk",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "stats",
|
||||
Usage: "show deduplication statistics (imply -all and all revisions)",
|
||||
@@ -1607,6 +1631,17 @@ func main() {
|
||||
Usage: "the RSA private key to decrypt file chunks",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key-passphrase",
|
||||
Usage: "the passphrase to decrypt the RSA private key",
|
||||
Argument: "<private key passphrase>",
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "threads",
|
||||
Value: 1,
|
||||
Usage: "number of threads used to verify chunks",
|
||||
Argument: "<n>",
|
||||
},
|
||||
},
|
||||
Usage: "Check the integrity of snapshots",
|
||||
ArgsUsage: " ",
|
||||
@@ -1635,6 +1670,11 @@ func main() {
|
||||
Usage: "the RSA private key to decrypt file chunks",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key-passphrase",
|
||||
Usage: "the passphrase to decrypt the RSA private key",
|
||||
Argument: "<private key passphrase>",
|
||||
},
|
||||
},
|
||||
Usage: "Print to stdout the specified file, or the snapshot content if no file is specified",
|
||||
ArgsUsage: "[<file>]",
|
||||
@@ -1668,6 +1708,11 @@ func main() {
|
||||
Usage: "the RSA private key to decrypt file chunks",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key-passphrase",
|
||||
Usage: "the passphrase to decrypt the RSA private key",
|
||||
Argument: "<private key passphrase>",
|
||||
},
|
||||
},
|
||||
Usage: "Compare two snapshots or two revisions of a file",
|
||||
ArgsUsage: "[<file>]",
|
||||
@@ -1943,7 +1988,12 @@ func main() {
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA private key to decrypt file chunks from the source storage",
|
||||
Argument: "<public key>",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key-passphrase",
|
||||
Usage: "the passphrase to decrypt the RSA private key",
|
||||
Argument: "<private key passphrase>",
|
||||
},
|
||||
},
|
||||
Usage: "Copy snapshots between compatible storages",
|
||||
@@ -2053,13 +2103,18 @@ func main() {
|
||||
Name: "comment",
|
||||
Usage: "add a comment to identify the process",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "suppress, s",
|
||||
Usage: "suppress logs with the specified id",
|
||||
Argument: "<id>",
|
||||
},
|
||||
}
|
||||
|
||||
app.HideVersion = true
|
||||
app.Name = "duplicacy"
|
||||
app.HelpName = "duplicacy"
|
||||
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
||||
app.Version = "2.4.1" + " (" + GitCommit + ")"
|
||||
app.Version = "2.6.2" + " (" + GitCommit + ")"
|
||||
|
||||
// If the program is interrupted, call the RunAtError function.
|
||||
c := make(chan os.Signal, 1)
|
||||
|
||||
@@ -211,6 +211,11 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
return true
|
||||
}
|
||||
|
||||
if len(localSnapshot.Files) == 0 {
|
||||
LOG_ERROR("SNAPSHOT_EMPTY", "No files under the repository to be backed up")
|
||||
return false
|
||||
}
|
||||
|
||||
// This cache contains all chunks referenced by last snasphot. Any other chunks will lead to a call to
|
||||
// UploadChunk.
|
||||
chunkCache := make(map[string]bool)
|
||||
@@ -515,6 +520,11 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
chunkID := chunk.GetID()
|
||||
chunkSize := chunk.GetLength()
|
||||
|
||||
if chunkSize == 0 {
|
||||
LOG_DEBUG("CHUNK_EMPTY", "Ignored chunk %s of size 0", chunkID)
|
||||
return
|
||||
}
|
||||
|
||||
chunkIndex++
|
||||
|
||||
_, found := chunkCache[chunkID]
|
||||
|
||||
@@ -341,7 +341,7 @@ func TestBackupManager(t *testing.T) {
|
||||
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
|
||||
}
|
||||
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1, 2, 3} /*tag*/, "",
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1)
|
||||
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, []int{1} /*tags*/, nil /*retentions*/, nil,
|
||||
/*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
|
||||
numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false)
|
||||
@@ -349,7 +349,7 @@ func TestBackupManager(t *testing.T) {
|
||||
t.Errorf("Expected 2 snapshots but got %d", numberOfSnapshots)
|
||||
}
|
||||
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3} /*tag*/, "",
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1)
|
||||
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false)
|
||||
backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, nil /*tags*/, nil /*retentions*/, nil,
|
||||
/*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1)
|
||||
@@ -358,7 +358,7 @@ func TestBackupManager(t *testing.T) {
|
||||
t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots)
|
||||
}
|
||||
backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{2, 3, 4} /*tag*/, "",
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false)
|
||||
/*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*checkChunks*/, false /*searchFossils*/, false /*resurrect*/, false, 1)
|
||||
|
||||
/*buf := make([]byte, 1<<16)
|
||||
runtime.Stack(buf, true)
|
||||
|
||||
@@ -126,6 +126,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files []*Entry)
|
||||
|
||||
// AddChunk adds a single chunk the download list.
|
||||
func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
|
||||
|
||||
task := ChunkDownloadTask{
|
||||
chunkIndex: len(downloader.taskList),
|
||||
chunkHash: chunkHash,
|
||||
@@ -253,6 +254,47 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
|
||||
return downloader.taskList[chunkIndex].chunk
|
||||
}
|
||||
|
||||
// WaitForCompletion waits until all chunks have been downloaded
|
||||
func (downloader *ChunkDownloader) WaitForCompletion() {
|
||||
|
||||
// Tasks in completedTasks have not been counted by numberOfActiveChunks
|
||||
downloader.numberOfActiveChunks -= len(downloader.completedTasks)
|
||||
|
||||
// find the completed task with the largest index; we'll start from the next index
|
||||
for index := range downloader.completedTasks {
|
||||
if downloader.lastChunkIndex < index {
|
||||
downloader.lastChunkIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
// Looping until there isn't a download task in progress
|
||||
for downloader.numberOfActiveChunks > 0 || downloader.lastChunkIndex + 1 < len(downloader.taskList) {
|
||||
|
||||
// Wait for a completion event first
|
||||
if downloader.numberOfActiveChunks > 0 {
|
||||
completion := <-downloader.completionChannel
|
||||
downloader.config.PutChunk(completion.chunk)
|
||||
downloader.numberOfActiveChunks--
|
||||
downloader.numberOfDownloadedChunks++
|
||||
downloader.numberOfDownloadingChunks--
|
||||
}
|
||||
|
||||
// Pass the tasks one by one to the download queue
|
||||
if downloader.lastChunkIndex + 1 < len(downloader.taskList) {
|
||||
task := &downloader.taskList[downloader.lastChunkIndex + 1]
|
||||
if task.isDownloading {
|
||||
downloader.lastChunkIndex++
|
||||
continue
|
||||
}
|
||||
downloader.taskQueue <- *task
|
||||
task.isDownloading = true
|
||||
downloader.numberOfDownloadingChunks++
|
||||
downloader.numberOfActiveChunks++
|
||||
downloader.lastChunkIndex++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop terminates all downloading goroutines
|
||||
func (downloader *ChunkDownloader) Stop() {
|
||||
for downloader.numberOfDownloadingChunks > 0 {
|
||||
|
||||
@@ -172,11 +172,11 @@ func (config *Config) Print() {
|
||||
LOG_TRACE("CONFIG_INFO", "Hash key: %x", config.HashKey)
|
||||
LOG_TRACE("CONFIG_INFO", "ID key: %x", config.IDKey)
|
||||
|
||||
if len(config.ChunkKey) >= 0 {
|
||||
if len(config.ChunkKey) > 0 {
|
||||
LOG_TRACE("CONFIG_INFO", "File chunks are encrypted")
|
||||
}
|
||||
|
||||
if len(config.FileKey) >= 0 {
|
||||
if len(config.FileKey) > 0 {
|
||||
LOG_TRACE("CONFIG_INFO", "Metadata chunks are encrypted")
|
||||
}
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@ package duplicacy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/gilbertchen/go-dropbox"
|
||||
@@ -199,6 +200,7 @@ func (storage *DropboxStorage) DownloadFile(threadIndex int, filePath string, ch
|
||||
}
|
||||
|
||||
defer output.Body.Close()
|
||||
defer ioutil.ReadAll(output.Body)
|
||||
|
||||
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.clients))
|
||||
return err
|
||||
|
||||
618
src/duplicacy_filefabricstorage.go
Normal file
618
src/duplicacy_filefabricstorage.go
Normal file
@@ -0,0 +1,618 @@
|
||||
// Copyright (c) Storage Made Easy. All rights reserved.
|
||||
//
|
||||
// This storage backend is contributed by Storage Made Easy (https://storagemadeeasy.com/) to be used in
|
||||
// Duplicacy and its derivative works.
|
||||
//
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"io"
|
||||
"fmt"
|
||||
"time"
|
||||
"sync"
|
||||
"bytes"
|
||||
"errors"
|
||||
"strings"
|
||||
"net/url"
|
||||
"net/http"
|
||||
"math/rand"
|
||||
"io/ioutil"
|
||||
"encoding/xml"
|
||||
"path/filepath"
|
||||
"mime/multipart"
|
||||
)
|
||||
|
||||
// The XML element representing a file returned by the File Fabric server
|
||||
type FileFabricFile struct {
|
||||
XMLName xml.Name
|
||||
ID string `xml:"fi_id"`
|
||||
Path string `xml:"path"`
|
||||
Size int64 `xml:"fi_size"`
|
||||
Type int `xml:"fi_type"`
|
||||
}
|
||||
|
||||
// The XML element representing a file list returned by the server
|
||||
type FileFabricFileList struct {
|
||||
XMLName xml.Name `xml:"files"`
|
||||
Files []FileFabricFile `xml:",any"`
|
||||
}
|
||||
|
||||
type FileFabricStorage struct {
|
||||
StorageBase
|
||||
|
||||
endpoint string // the server
|
||||
authToken string // the authentication token
|
||||
accessToken string // the access token (as returned by getTokenByAuthToken)
|
||||
storageDir string // the path of the storage directory
|
||||
storageDirID string // the id of 'storageDir'
|
||||
|
||||
client *http.Client // the default http client
|
||||
threads int // number of threads
|
||||
maxRetries int // maximum number of tries
|
||||
directoryCache map[string]string // stores ids for directories known to this backend
|
||||
directoryCacheLock sync.Mutex // lock for accessing directoryCache
|
||||
|
||||
isAuthorized bool
|
||||
testMode bool
|
||||
}
|
||||
|
||||
var (
|
||||
errFileFabricAuthorizationFailure = errors.New("Authentication failure")
|
||||
errFileFabricDirectoryExists = errors.New("Directory exists")
|
||||
)
|
||||
|
||||
// The general server response
|
||||
type FileFabricResponse struct {
|
||||
Status string `xml:"status"`
|
||||
Message string `xml:"statusmessage"`
|
||||
}
|
||||
|
||||
// Check the server response and return an error representing the error message it contains
|
||||
func checkFileFabricResponse(response FileFabricResponse, actionFormat string, actionArguments ...interface{}) error {
|
||||
|
||||
action := fmt.Sprintf(actionFormat, actionArguments...)
|
||||
if response.Status == "ok" && response.Message == "Success" {
|
||||
return nil
|
||||
} else if response.Status == "error_data" {
|
||||
if response.Message == "Folder with same name already exists." {
|
||||
return errFileFabricDirectoryExists
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("Failed to %s (status: %s, message: %s)", action, response.Status, response.Message)
|
||||
}
|
||||
|
||||
// Create a File Fabric storage backend
|
||||
func CreateFileFabricStorage(endpoint string, token string, storageDir string, threads int) (storage *FileFabricStorage, err error) {
|
||||
|
||||
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||
storageDir += "/"
|
||||
}
|
||||
|
||||
storage = &FileFabricStorage{
|
||||
|
||||
endpoint: endpoint,
|
||||
authToken: token,
|
||||
client: http.DefaultClient,
|
||||
threads: threads,
|
||||
directoryCache: make(map[string]string),
|
||||
maxRetries: 12,
|
||||
}
|
||||
|
||||
err = storage.getAccessToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storageDirID, isDir, _, err := storage.getFileInfo(0, storageDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if storageDirID == "" {
|
||||
return nil, fmt.Errorf("Storage path %s does not exist", storageDir)
|
||||
}
|
||||
if !isDir {
|
||||
return nil, fmt.Errorf("Storage path %s is not a directory", storageDir)
|
||||
}
|
||||
storage.storageDir = storageDir
|
||||
storage.storageDirID = storageDirID
|
||||
|
||||
for _, dir := range []string{"snapshots", "chunks"} {
|
||||
storage.CreateDirectory(0, dir)
|
||||
}
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
storage.SetDefaultNestingLevels([]int{0}, 0)
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
// Retrieve the access token using an auth token
|
||||
func (storage *FileFabricStorage) getAccessToken() (error) {
|
||||
|
||||
formData := url.Values { "authtoken": {storage.authToken},}
|
||||
readCloser, _, _, err := storage.sendRequest(0, http.MethodPost, storage.getAPIURL("getTokenByAuthToken"), nil, formData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
var output struct {
|
||||
FileFabricResponse
|
||||
Token string `xml:"token"`
|
||||
}
|
||||
|
||||
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = checkFileFabricResponse(output.FileFabricResponse, "request the access token")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
storage.accessToken = output.Token
|
||||
return nil
|
||||
}
|
||||
|
||||
// Determine if we should retry based on the number of retries given by 'retry' and if so calculate the delay with exponential backoff
|
||||
func (storage *FileFabricStorage) shouldRetry(retry int, messageFormat string, messageArguments ...interface{}) bool {
|
||||
message := fmt.Sprintf(messageFormat, messageArguments...)
|
||||
|
||||
if retry >= storage.maxRetries {
|
||||
LOG_WARN("FILEFABRIC_REQUEST", "%s", message)
|
||||
return false
|
||||
}
|
||||
backoff := 1 << uint(retry)
|
||||
if backoff > 60 {
|
||||
backoff = 60
|
||||
}
|
||||
delay := rand.Intn(backoff*500) + backoff*500
|
||||
LOG_INFO("FILEFABRIC_RETRY", "%s; retrying after %.1f seconds", message, float32(delay) / 1000.0)
|
||||
time.Sleep(time.Duration(delay) * time.Millisecond)
|
||||
return true
|
||||
}
|
||||
|
||||
// Send a request to the server
|
||||
func (storage *FileFabricStorage) sendRequest(threadIndex int, method string, requestURL string, requestHeaders map[string]string, input interface{}) ( io.ReadCloser, http.Header, int64, error) {
|
||||
|
||||
var response *http.Response
|
||||
|
||||
for retries := 0; ; retries++ {
|
||||
var inputReader io.Reader
|
||||
|
||||
switch input.(type) {
|
||||
case url.Values:
|
||||
values := input.(url.Values)
|
||||
inputReader = strings.NewReader(values.Encode())
|
||||
if requestHeaders == nil {
|
||||
requestHeaders = make(map[string]string)
|
||||
}
|
||||
requestHeaders["Content-Type"] = "application/x-www-form-urlencoded"
|
||||
case *RateLimitedReader:
|
||||
rateLimitedReader := input.(*RateLimitedReader)
|
||||
rateLimitedReader.Reset()
|
||||
inputReader = rateLimitedReader
|
||||
default:
|
||||
LOG_FATAL("FILEFABRIC_REQUEST", "Input type is not supported")
|
||||
return nil, nil, 0, fmt.Errorf("Input type is not supported")
|
||||
}
|
||||
|
||||
request, err := http.NewRequest(method, requestURL, inputReader)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
if requestHeaders != nil {
|
||||
for key, value := range requestHeaders {
|
||||
request.Header.Set(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := input.(*RateLimitedReader); ok {
|
||||
request.ContentLength = input.(*RateLimitedReader).Length()
|
||||
}
|
||||
|
||||
response, err = storage.client.Do(request)
|
||||
if err != nil {
|
||||
if !storage.shouldRetry(retries, "[%d] %s %s returned an error: %v", threadIndex, method, requestURL, err) {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if response.StatusCode < 300 {
|
||||
return response.Body, response.Header, response.ContentLength, nil
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
defer io.Copy(ioutil.Discard, response.Body)
|
||||
|
||||
var output struct {
|
||||
Status string `xml:"status"`
|
||||
Message string `xml:"statusmessage"`
|
||||
}
|
||||
|
||||
err = xml.NewDecoder(response.Body).Decode(&output)
|
||||
if err != nil {
|
||||
if !storage.shouldRetry(retries, "[%d] %s %s returned an invalid response: %v", threadIndex, method, requestURL, err) {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if !storage.shouldRetry(retries, "[%d] %s %s returned status: %s, message: %s", threadIndex, method, requestURL, output.Status, output.Message) {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (storage *FileFabricStorage) getAPIURL(function string) string {
|
||||
if storage.accessToken == "" {
|
||||
return "https://" + storage.endpoint + "/api/*/" + function + "/"
|
||||
} else {
|
||||
return "https://" + storage.endpoint + "/api/" + storage.accessToken + "/" + function + "/"
|
||||
}
|
||||
}
|
||||
|
||||
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
|
||||
// a size of 0. If 'dir' is 'snapshots', only subdirectories will be returned. If 'dir' is 'snapshots/repository_id', then only
|
||||
// files will be returned. If 'dir' is 'chunks', the implementation can return the list either recusively or non-recusively.
|
||||
func (storage *FileFabricStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||
if dir != "" && dir[len(dir)-1] != '/' {
|
||||
dir += "/"
|
||||
}
|
||||
|
||||
dirID, _, _, err := storage.getFileInfo(threadIndex, dir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if dirID == "" {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
lastID := ""
|
||||
|
||||
for {
|
||||
formData := url.Values { "marker": {lastID}, "limit": {"1000"}, "includefolders": {"n"}, "fi_pid" : {dirID}}
|
||||
if dir == "snapshots/" {
|
||||
formData["includefolders"] = []string{"y"}
|
||||
}
|
||||
if storage.testMode {
|
||||
formData["limit"] = []string{"5"}
|
||||
}
|
||||
|
||||
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("getListOfFiles"), nil, formData)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
var output struct {
|
||||
FileFabricResponse
|
||||
FileList FileFabricFileList `xml:"files"`
|
||||
Truncated int `xml:"truncated"`
|
||||
}
|
||||
|
||||
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
err = checkFileFabricResponse(output.FileFabricResponse, "list the storage directory '%s'", dir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if dir == "snapshots/" {
|
||||
for _, file := range output.FileList.Files {
|
||||
if file.Type == 1 {
|
||||
files = append(files, file.Path + "/")
|
||||
}
|
||||
lastID = file.ID
|
||||
}
|
||||
} else {
|
||||
for _, file := range output.FileList.Files {
|
||||
if file.Type == 0 {
|
||||
files = append(files, file.Path)
|
||||
sizes = append(sizes, file.Size)
|
||||
}
|
||||
lastID = file.ID
|
||||
}
|
||||
}
|
||||
|
||||
if output.Truncated != 1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return files, sizes, nil
|
||||
}
|
||||
|
||||
// getFileInfo returns the information about the file or directory at 'filePath'.
|
||||
func (storage *FileFabricStorage) getFileInfo(threadIndex int, filePath string) (fileID string, isDir bool, size int64, err error) {
|
||||
|
||||
formData := url.Values { "path" : {storage.storageDir + filePath}}
|
||||
|
||||
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("checkPathExists"), nil, formData)
|
||||
if err != nil {
|
||||
return "", false, 0, err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
var output struct {
|
||||
FileFabricResponse
|
||||
File FileFabricFile `xml:"file"`
|
||||
Exists string `xml:"exists"`
|
||||
}
|
||||
|
||||
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||
if err != nil {
|
||||
return "", false, 0, err
|
||||
}
|
||||
|
||||
err = checkFileFabricResponse(output.FileFabricResponse, "get the info on '%s'", filePath)
|
||||
if err != nil {
|
||||
return "", false, 0, err
|
||||
}
|
||||
|
||||
if output.Exists != "y" {
|
||||
return "", false, 0, nil
|
||||
} else {
|
||||
if output.File.Type == 1 {
|
||||
for filePath != "" && filePath[len(filePath)-1] == '/' {
|
||||
filePath = filePath[:len(filePath)-1]
|
||||
}
|
||||
|
||||
storage.directoryCacheLock.Lock()
|
||||
storage.directoryCache[filePath] = output.File.ID
|
||||
storage.directoryCacheLock.Unlock()
|
||||
}
|
||||
return output.File.ID, output.File.Type == 1, output.File.Size, nil
|
||||
}
|
||||
}
|
||||
|
||||
// GetFileInfo returns the information about the file or directory at 'filePath'. This is a function required by the Storage interface.
|
||||
func (storage *FileFabricStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||
|
||||
fileID := ""
|
||||
fileID, isDir, size, err = storage.getFileInfo(threadIndex, filePath)
|
||||
return fileID != "", isDir, size, err
|
||||
}
|
||||
|
||||
// DeleteFile deletes the file or directory at 'filePath'.
|
||||
func (storage *FileFabricStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||
|
||||
fileID, _, _, _ := storage.getFileInfo(threadIndex, filePath)
|
||||
if fileID == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
formData := url.Values { "fi_id" : {fileID}}
|
||||
|
||||
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doDeleteFile"), nil, formData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
var output FileFabricResponse
|
||||
|
||||
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = checkFileFabricResponse(output, "delete file '%s'", filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MoveFile renames the file.
|
||||
func (storage *FileFabricStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||
fileID, _, _, _ := storage.getFileInfo(threadIndex, from)
|
||||
if fileID == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
formData := url.Values { "fi_id" : {fileID}, "fi_name": {filepath.Base(to)},}
|
||||
|
||||
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doRenameFile"), nil, formData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
var output FileFabricResponse
|
||||
|
||||
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = checkFileFabricResponse(output, "rename file '%s' to '%s'", from, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createParentDirectory creates the parent directory if it doesn't exist in the cache.
|
||||
func (storage *FileFabricStorage) createParentDirectory(threadIndex int, dir string) (parentID string, err error) {
|
||||
|
||||
found := strings.LastIndex(dir, "/")
|
||||
if found == -1 {
|
||||
return storage.storageDirID, nil
|
||||
}
|
||||
parent := dir[:found]
|
||||
|
||||
storage.directoryCacheLock.Lock()
|
||||
parentID = storage.directoryCache[parent]
|
||||
storage.directoryCacheLock.Unlock()
|
||||
|
||||
if parentID != "" {
|
||||
return parentID, nil
|
||||
}
|
||||
|
||||
parentID, err = storage.createDirectory(threadIndex, parent)
|
||||
if err != nil {
|
||||
if err == errFileFabricDirectoryExists {
|
||||
var isDir bool
|
||||
parentID, isDir, _, err = storage.getFileInfo(threadIndex, parent)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if isDir == false {
|
||||
return "", fmt.Errorf("'%s' in the storage is a file", parent)
|
||||
}
|
||||
storage.directoryCacheLock.Lock()
|
||||
storage.directoryCache[parent] = parentID
|
||||
storage.directoryCacheLock.Unlock()
|
||||
return parentID, nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return parentID, nil
|
||||
}
|
||||
|
||||
// createDirectory creates a new directory.
|
||||
func (storage *FileFabricStorage) createDirectory(threadIndex int, dir string) (dirID string, err error) {
|
||||
for dir != "" && dir[len(dir)-1] == '/' {
|
||||
dir = dir[:len(dir)-1]
|
||||
}
|
||||
|
||||
parentID, err := storage.createParentDirectory(threadIndex, dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
formData := url.Values { "fi_name": {filepath.Base(dir)}, "fi_pid" : {parentID}}
|
||||
|
||||
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doCreateNewFolder"), nil, formData)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
var output struct {
|
||||
FileFabricResponse
|
||||
File FileFabricFile `xml:"file"`
|
||||
}
|
||||
|
||||
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err = checkFileFabricResponse(output.FileFabricResponse, "create directory '%s'", dir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
storage.directoryCacheLock.Lock()
|
||||
storage.directoryCache[dir] = output.File.ID
|
||||
storage.directoryCacheLock.Unlock()
|
||||
|
||||
return output.File.ID, nil
|
||||
}
|
||||
|
||||
func (storage *FileFabricStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||
_, err = storage.createDirectory(threadIndex, dir)
|
||||
if err == errFileFabricDirectoryExists {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *FileFabricStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
formData := url.Values { "fi_id" : {storage.storageDir + filePath}}
|
||||
|
||||
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("getFile"), nil, formData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.threads)
|
||||
return err
|
||||
}
|
||||
|
||||
// UploadFile writes 'content' to the file at 'filePath'.
|
||||
func (storage *FileFabricStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||
|
||||
parentID, err := storage.createParentDirectory(threadIndex, filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fileName := filepath.Base(filePath)
|
||||
requestBody := &bytes.Buffer{}
|
||||
writer := multipart.NewWriter(requestBody)
|
||||
part, _ := writer.CreateFormFile("file_1", fileName)
|
||||
part.Write(content)
|
||||
|
||||
writer.WriteField("file_name1", fileName)
|
||||
writer.WriteField("fi_pid", parentID)
|
||||
writer.WriteField("fi_structtype", "g")
|
||||
writer.Close()
|
||||
|
||||
headers := make(map[string]string)
|
||||
headers["Content-Type"] = writer.FormDataContentType()
|
||||
|
||||
rateLimitedReader := CreateRateLimitedReader(requestBody.Bytes(), storage.UploadRateLimit/storage.threads)
|
||||
readCloser, _, _, err := storage.sendRequest(threadIndex, http.MethodPost, storage.getAPIURL("doUploadFiles"), headers, rateLimitedReader)
|
||||
|
||||
defer readCloser.Close()
|
||||
defer io.Copy(ioutil.Discard, readCloser)
|
||||
|
||||
var output FileFabricResponse
|
||||
|
||||
err = xml.NewDecoder(readCloser).Decode(&output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = checkFileFabricResponse(output, "upload file '%s'", filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *FileFabricStorage) IsCacheNeeded() bool { return true }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *FileFabricStorage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *FileFabricStorage) IsStrongConsistent() bool { return false }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *FileFabricStorage) IsFastListing() bool { return false }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *FileFabricStorage) EnableTestMode() { storage.testMode = true }
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -190,10 +191,13 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
|
||||
return err
|
||||
}
|
||||
|
||||
err = file.Sync()
|
||||
if err != nil {
|
||||
file.Close()
|
||||
return err
|
||||
if err = file.Sync(); err != nil {
|
||||
pathErr, ok := err.(*os.PathError)
|
||||
isNotSupported := ok && pathErr.Op == "sync" && pathErr.Err == syscall.ENOTSUP
|
||||
if !isNotSupported {
|
||||
_ = file.Close()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = file.Close()
|
||||
|
||||
@@ -20,13 +20,16 @@ import (
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/drive/v3"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var (
|
||||
GCDFileMimeType = "application/octet-stream"
|
||||
GCDDirectoryMimeType = "application/vnd.google-apps.folder"
|
||||
GCDUserDrive = "root"
|
||||
)
|
||||
|
||||
type GCDStorage struct {
|
||||
@@ -37,6 +40,7 @@ type GCDStorage struct {
|
||||
idCacheLock sync.Mutex
|
||||
backoffs []int // desired backoff time in seconds for each thread
|
||||
attempts []int // number of failed attempts since last success for each thread
|
||||
driveID string // the ID of the shared drive or 'root' (GCDUserDrive) if the user's drive
|
||||
|
||||
createDirectoryLock sync.Mutex
|
||||
isConnected bool
|
||||
@@ -82,6 +86,10 @@ func (storage *GCDStorage) shouldRetry(threadIndex int, err error) (bool, error)
|
||||
// Request timeout
|
||||
message = e.Message
|
||||
retry = true
|
||||
} else if e.Code == 400 && strings.Contains(e.Message, "failedPrecondition") {
|
||||
// Daily quota exceeded
|
||||
message = e.Message
|
||||
retry = true
|
||||
} else if e.Code == 401 {
|
||||
// Only retry on authorization error when storage has been connected before
|
||||
if storage.isConnected {
|
||||
@@ -191,7 +199,11 @@ func (storage *GCDStorage) listFiles(threadIndex int, parentID string, listFiles
|
||||
var err error
|
||||
|
||||
for {
|
||||
fileList, err = storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount).Do()
|
||||
q := storage.service.Files.List().Q(query).Fields("nextPageToken", "files(name, mimeType, id, size)").PageToken(startToken).PageSize(maxCount)
|
||||
if storage.driveID != GCDUserDrive {
|
||||
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
|
||||
}
|
||||
fileList, err = q.Do()
|
||||
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
|
||||
break
|
||||
} else if retry {
|
||||
@@ -219,7 +231,11 @@ func (storage *GCDStorage) listByName(threadIndex int, parentID string, name str
|
||||
|
||||
for {
|
||||
query := "name = '" + name + "' and '" + parentID + "' in parents and trashed = false "
|
||||
fileList, err = storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)").Do()
|
||||
q := storage.service.Files.List().Q(query).Fields("files(name, mimeType, id, size)")
|
||||
if storage.driveID != GCDUserDrive {
|
||||
q = q.DriveId(storage.driveID).IncludeItemsFromAllDrives(true).Corpora("drive").SupportsAllDrives(true)
|
||||
}
|
||||
fileList, err = q.Do()
|
||||
|
||||
if retry, e := storage.shouldRetry(threadIndex, err); e == nil && !retry {
|
||||
break
|
||||
@@ -248,7 +264,7 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, creat
|
||||
return fileID, nil
|
||||
}
|
||||
|
||||
fileID := "root"
|
||||
fileID := storage.driveID
|
||||
|
||||
if rootID, ok := storage.findPathID(""); ok {
|
||||
fileID = rootID
|
||||
@@ -303,37 +319,85 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, creat
|
||||
}
|
||||
|
||||
// CreateGCDStorage creates a GCD storage object.
|
||||
func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storage *GCDStorage, err error) {
|
||||
func CreateGCDStorage(tokenFile string, driveID string, storagePath string, threads int) (storage *GCDStorage, err error) {
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
description, err := ioutil.ReadFile(tokenFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gcdConfig := &GCDConfig{}
|
||||
if err := json.Unmarshal(description, gcdConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var object map[string]interface{}
|
||||
|
||||
oauth2Config := oauth2.Config{
|
||||
ClientID: gcdConfig.ClientID,
|
||||
ClientSecret: gcdConfig.ClientSecret,
|
||||
Endpoint: gcdConfig.Endpoint,
|
||||
}
|
||||
|
||||
authClient := oauth2Config.Client(context.Background(), &gcdConfig.Token)
|
||||
|
||||
service, err := drive.New(authClient)
|
||||
err = json.Unmarshal(description, &object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isServiceAccount := false
|
||||
if value, ok := object["type"]; ok {
|
||||
if authType, ok := value.(string); ok && authType == "service_account" {
|
||||
isServiceAccount = true
|
||||
}
|
||||
}
|
||||
|
||||
var tokenSource oauth2.TokenSource
|
||||
|
||||
if isServiceAccount {
|
||||
config, err := google.JWTConfigFromJSON(description, drive.DriveScope)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tokenSource = config.TokenSource(ctx)
|
||||
} else {
|
||||
gcdConfig := &GCDConfig{}
|
||||
if err := json.Unmarshal(description, gcdConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := oauth2.Config{
|
||||
ClientID: gcdConfig.ClientID,
|
||||
ClientSecret: gcdConfig.ClientSecret,
|
||||
Endpoint: gcdConfig.Endpoint,
|
||||
}
|
||||
tokenSource = config.TokenSource(ctx, &gcdConfig.Token)
|
||||
}
|
||||
|
||||
service, err := drive.NewService(ctx, option.WithTokenSource(tokenSource))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(driveID) == 0 {
|
||||
driveID = GCDUserDrive
|
||||
} else {
|
||||
driveList, err := drive.NewTeamdrivesService(service).List().Do()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to look up the drive id: %v", err)
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, teamDrive := range driveList.TeamDrives {
|
||||
if teamDrive.Id == driveID || teamDrive.Name == driveID {
|
||||
driveID = teamDrive.Id
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return nil, fmt.Errorf("%s is not the id or name of a shared drive", driveID)
|
||||
}
|
||||
}
|
||||
|
||||
storage = &GCDStorage{
|
||||
service: service,
|
||||
numberOfThreads: threads,
|
||||
idCache: make(map[string]string),
|
||||
backoffs: make([]int, threads),
|
||||
attempts: make([]int, threads),
|
||||
driveID: driveID,
|
||||
}
|
||||
|
||||
for i := range storage.backoffs {
|
||||
@@ -341,6 +405,7 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
||||
storage.attempts[i] = 0
|
||||
}
|
||||
|
||||
storage.savePathID("", driveID)
|
||||
storagePathID, err := storage.getIDFromPath(0, storagePath, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -415,39 +480,76 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
||||
}
|
||||
return files, nil, nil
|
||||
} else {
|
||||
files := []string{}
|
||||
sizes := []int64{}
|
||||
lock := sync.Mutex {}
|
||||
allFiles := []string{}
|
||||
allSizes := []int64{}
|
||||
|
||||
errorChannel := make(chan error)
|
||||
directoryChannel := make(chan string)
|
||||
activeWorkers := 0
|
||||
|
||||
parents := []string{"chunks", "fossils"}
|
||||
for i := 0; i < len(parents); i++ {
|
||||
parent := parents[i]
|
||||
pathID, ok := storage.findPathID(parent)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
entries, err := storage.listFiles(threadIndex, pathID, true, true)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.MimeType != GCDDirectoryMimeType {
|
||||
name := entry.Name
|
||||
if strings.HasPrefix(parent, "fossils") {
|
||||
name = parent + "/" + name + ".fsl"
|
||||
name = name[len("fossils/"):]
|
||||
} else {
|
||||
name = parent + "/" + name
|
||||
name = name[len("chunks/"):]
|
||||
for len(parents) > 0 || activeWorkers > 0 {
|
||||
|
||||
if len(parents) > 0 && activeWorkers < storage.numberOfThreads {
|
||||
parent := parents[0]
|
||||
parents = parents[1:]
|
||||
activeWorkers++
|
||||
go func(parent string) {
|
||||
pathID, ok := storage.findPathID(parent)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
entries, err := storage.listFiles(threadIndex, pathID, true, true)
|
||||
if err != nil {
|
||||
errorChannel <- err
|
||||
return
|
||||
}
|
||||
|
||||
LOG_DEBUG("GCD_STORAGE", "Listing %s; %d items returned", parent, len(entries))
|
||||
|
||||
files := []string {}
|
||||
sizes := []int64 {}
|
||||
for _, entry := range entries {
|
||||
if entry.MimeType != GCDDirectoryMimeType {
|
||||
name := entry.Name
|
||||
if strings.HasPrefix(parent, "fossils") {
|
||||
name = parent + "/" + name + ".fsl"
|
||||
name = name[len("fossils/"):]
|
||||
} else {
|
||||
name = parent + "/" + name
|
||||
name = name[len("chunks/"):]
|
||||
}
|
||||
files = append(files, name)
|
||||
sizes = append(sizes, entry.Size)
|
||||
} else {
|
||||
directoryChannel <- parent+"/"+entry.Name
|
||||
storage.savePathID(parent+"/"+entry.Name, entry.Id)
|
||||
}
|
||||
}
|
||||
lock.Lock()
|
||||
allFiles = append(allFiles, files...)
|
||||
allSizes = append(allSizes, sizes...)
|
||||
lock.Unlock()
|
||||
directoryChannel <- ""
|
||||
} (parent)
|
||||
}
|
||||
|
||||
if activeWorkers > 0 {
|
||||
select {
|
||||
case err := <- errorChannel:
|
||||
return nil, nil, err
|
||||
case directory := <- directoryChannel:
|
||||
if directory == "" {
|
||||
activeWorkers--
|
||||
} else {
|
||||
parents = append(parents, directory)
|
||||
}
|
||||
files = append(files, name)
|
||||
sizes = append(sizes, entry.Size)
|
||||
} else {
|
||||
parents = append(parents, parent+"/"+entry.Name)
|
||||
storage.savePathID(parent+"/"+entry.Name, entry.Id)
|
||||
}
|
||||
}
|
||||
}
|
||||
return files, sizes, nil
|
||||
|
||||
return allFiles, allSizes, nil
|
||||
}
|
||||
|
||||
}
|
||||
@@ -462,7 +564,7 @@ func (storage *GCDStorage) DeleteFile(threadIndex int, filePath string) (err err
|
||||
}
|
||||
|
||||
for {
|
||||
err = storage.service.Files.Delete(fileID).Fields("id").Do()
|
||||
err = storage.service.Files.Delete(fileID).SupportsAllDrives(true).Fields("id").Do()
|
||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||
storage.deletePathID(filePath)
|
||||
return nil
|
||||
@@ -508,7 +610,7 @@ func (storage *GCDStorage) MoveFile(threadIndex int, from string, to string) (er
|
||||
}
|
||||
|
||||
for {
|
||||
_, err = storage.service.Files.Update(fileID, nil).AddParents(toParentID).RemoveParents(fromParentID).Do()
|
||||
_, err = storage.service.Files.Update(fileID, nil).SupportsAllDrives(true).AddParents(toParentID).RemoveParents(fromParentID).Do()
|
||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||
break
|
||||
} else if retry {
|
||||
@@ -559,7 +661,7 @@ func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err err
|
||||
Parents: []string{parentID},
|
||||
}
|
||||
|
||||
file, err = storage.service.Files.Create(file).Fields("id").Do()
|
||||
file, err = storage.service.Files.Create(file).SupportsAllDrives(true).Fields("id").Do()
|
||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||
break
|
||||
} else {
|
||||
@@ -630,7 +732,7 @@ func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk
|
||||
for {
|
||||
// AcknowledgeAbuse(true) lets the download proceed even if GCD thinks that it contains malware.
|
||||
// TODO: Should this prompt the user or log a warning?
|
||||
req := storage.service.Files.Get(fileID)
|
||||
req := storage.service.Files.Get(fileID).SupportsAllDrives(true)
|
||||
if e, ok := err.(*googleapi.Error); ok {
|
||||
if strings.Contains(err.Error(), "cannotDownloadAbusiveFile") || len(e.Errors) > 0 && e.Errors[0].Reason == "cannotDownloadAbusiveFile" {
|
||||
LOG_WARN("GCD_STORAGE", "%s is marked as abusive, will download anyway.", filePath)
|
||||
@@ -676,7 +778,7 @@ func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content
|
||||
|
||||
for {
|
||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||
_, err = storage.service.Files.Create(file).Media(reader).Fields("id").Do()
|
||||
_, err = storage.service.Files.Create(file).SupportsAllDrives(true).Media(reader).Fields("id").Do()
|
||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||
break
|
||||
} else if retry {
|
||||
|
||||
@@ -7,10 +7,12 @@ package duplicacy
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"log"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,6 +45,13 @@ func setTestingT(t *testing.T) {
|
||||
testingT = t
|
||||
}
|
||||
|
||||
// Contains the ids of logs that won't be displayed
|
||||
var suppressedLogs map[string]bool = map[string]bool{}
|
||||
|
||||
func SuppressLog(id string) {
|
||||
suppressedLogs[id] = true
|
||||
}
|
||||
|
||||
func getLevelName(level int) string {
|
||||
switch level {
|
||||
case DEBUG:
|
||||
@@ -143,6 +152,12 @@ func logf(level int, logID string, format string, v ...interface{}) {
|
||||
defer logMutex.Unlock()
|
||||
|
||||
if level >= loggingLevel {
|
||||
if level <= ERROR && len(suppressedLogs) > 0 {
|
||||
if _, found := suppressedLogs[logID]; found {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if printLogHeader {
|
||||
fmt.Printf("%s %s %s %s\n",
|
||||
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
||||
@@ -161,6 +176,32 @@ func logf(level int, logID string, format string, v ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// Set up logging for libraries that Duplicacy depends on. They can call 'log.Printf("[ID] message")'
|
||||
// to produce logs in Duplicacy's format
|
||||
type Logger struct {
|
||||
formatRegex *regexp.Regexp
|
||||
}
|
||||
|
||||
func (logger *Logger) Write(line []byte) (n int, err error) {
|
||||
n = len(line)
|
||||
for len(line) > 0 && line[len(line) - 1] == '\n' {
|
||||
line = line[:len(line) - 1]
|
||||
}
|
||||
matched := logger.formatRegex.FindStringSubmatch(string(line))
|
||||
if matched != nil {
|
||||
LOG_INFO(matched[1], "%s", matched[2])
|
||||
} else {
|
||||
LOG_INFO("LOG_DEFAULT", "%s", line)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func init() {
|
||||
log.SetFlags(0)
|
||||
log.SetOutput(&Logger{ formatRegex: regexp.MustCompile(`^\[(.+)\]\s*(.+)`) })
|
||||
}
|
||||
|
||||
const (
|
||||
duplicacyExitCode = 100
|
||||
otherExitCode = 101
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"path/filepath"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
@@ -32,9 +33,6 @@ type OneDriveErrorResponse struct {
|
||||
Error OneDriveError `json:"error"`
|
||||
}
|
||||
|
||||
var OneDriveRefreshTokenURL = "https://duplicacy.com/one_refresh"
|
||||
var OneDriveAPIURL = "https://api.onedrive.com/v1.0"
|
||||
|
||||
type OneDriveClient struct {
|
||||
HTTPClient *http.Client
|
||||
|
||||
@@ -44,9 +42,13 @@ type OneDriveClient struct {
|
||||
|
||||
IsConnected bool
|
||||
TestMode bool
|
||||
|
||||
IsBusiness bool
|
||||
RefreshTokenURL string
|
||||
APIURL string
|
||||
}
|
||||
|
||||
func NewOneDriveClient(tokenFile string) (*OneDriveClient, error) {
|
||||
func NewOneDriveClient(tokenFile string, isBusiness bool) (*OneDriveClient, error) {
|
||||
|
||||
description, err := ioutil.ReadFile(tokenFile)
|
||||
if err != nil {
|
||||
@@ -63,6 +65,15 @@ func NewOneDriveClient(tokenFile string) (*OneDriveClient, error) {
|
||||
TokenFile: tokenFile,
|
||||
Token: token,
|
||||
TokenLock: &sync.Mutex{},
|
||||
IsBusiness: isBusiness,
|
||||
}
|
||||
|
||||
if isBusiness {
|
||||
client.RefreshTokenURL = "https://duplicacy.com/odb_refresh"
|
||||
client.APIURL = "https://graph.microsoft.com/v1.0/me"
|
||||
} else {
|
||||
client.RefreshTokenURL = "https://duplicacy.com/one_refresh"
|
||||
client.APIURL = "https://api.onedrive.com/v1.0"
|
||||
}
|
||||
|
||||
client.RefreshToken(false)
|
||||
@@ -106,9 +117,10 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
||||
|
||||
if reader, ok := inputReader.(*RateLimitedReader); ok {
|
||||
request.ContentLength = reader.Length()
|
||||
request.Header.Set("Content-Range", fmt.Sprintf("bytes 0-%d/%d", reader.Length() - 1, reader.Length()))
|
||||
}
|
||||
|
||||
if url != OneDriveRefreshTokenURL {
|
||||
if url != client.RefreshTokenURL {
|
||||
client.TokenLock.Lock()
|
||||
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
|
||||
client.TokenLock.Unlock()
|
||||
@@ -152,7 +164,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
||||
|
||||
if response.StatusCode == 401 {
|
||||
|
||||
if url == OneDriveRefreshTokenURL {
|
||||
if url == client.RefreshTokenURL {
|
||||
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
|
||||
}
|
||||
|
||||
@@ -161,6 +173,8 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
||||
return nil, 0, err
|
||||
}
|
||||
continue
|
||||
} else if response.StatusCode == 409 {
|
||||
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Conflict"}
|
||||
} else if response.StatusCode > 401 && response.StatusCode != 404 {
|
||||
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
||||
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
|
||||
@@ -188,7 +202,7 @@ func (client *OneDriveClient) RefreshToken(force bool) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
readCloser, _, err := client.call(OneDriveRefreshTokenURL, "POST", client.Token, "")
|
||||
readCloser, _, err := client.call(client.RefreshTokenURL, "POST", client.Token, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to refresh the access token: %v", err)
|
||||
}
|
||||
@@ -228,9 +242,9 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
|
||||
|
||||
entries := []OneDriveEntry{}
|
||||
|
||||
url := OneDriveAPIURL + "/drive/root:/" + path + ":/children"
|
||||
url := client.APIURL + "/drive/root:/" + path + ":/children"
|
||||
if path == "" {
|
||||
url = OneDriveAPIURL + "/drive/root/children"
|
||||
url = client.APIURL + "/drive/root/children"
|
||||
}
|
||||
if client.TestMode {
|
||||
url += "?top=8"
|
||||
@@ -266,7 +280,7 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
|
||||
|
||||
func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) {
|
||||
|
||||
url := OneDriveAPIURL + "/drive/root:/" + path
|
||||
url := client.APIURL + "/drive/root:/" + path
|
||||
url += "?select=id,name,size,folder"
|
||||
|
||||
readCloser, _, err := client.call(url, "GET", 0, "")
|
||||
@@ -291,28 +305,95 @@ func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, err
|
||||
|
||||
func (client *OneDriveClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
|
||||
|
||||
url := OneDriveAPIURL + "/drive/items/root:/" + path + ":/content"
|
||||
url := client.APIURL + "/drive/items/root:/" + path + ":/content"
|
||||
|
||||
return client.call(url, "GET", 0, "")
|
||||
}
|
||||
|
||||
func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
|
||||
|
||||
url := OneDriveAPIURL + "/drive/root:/" + path + ":/content"
|
||||
// Upload file using the simple method; this is only possible for OneDrive Personal or if the file
|
||||
// is smaller than 4MB for OneDrive Business
|
||||
if !client.IsBusiness || len(content) < 4 * 1024 * 1024 || (client.TestMode && rand.Int() % 2 == 0) {
|
||||
url := client.APIURL + "/drive/root:/" + path + ":/content"
|
||||
|
||||
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
|
||||
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
readCloser.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// For large files, create an upload session first
|
||||
uploadURL, err := client.CreateUploadSession(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return client.UploadFileSession(uploadURL, content, rateLimit)
|
||||
}
|
||||
|
||||
func (client *OneDriveClient) CreateUploadSession(path string) (uploadURL string, err error) {
|
||||
|
||||
type CreateUploadSessionItem struct {
|
||||
ConflictBehavior string `json:"@microsoft.graph.conflictBehavior"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
input := map[string]interface{} {
|
||||
"item": CreateUploadSessionItem {
|
||||
ConflictBehavior: "replace",
|
||||
Name: filepath.Base(path),
|
||||
},
|
||||
}
|
||||
|
||||
readCloser, _, err := client.call(client.APIURL + "/drive/root:/" + path + ":/createUploadSession", "POST", input, "application/json")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
type CreateUploadSessionOutput struct {
|
||||
UploadURL string `json:"uploadUrl"`
|
||||
}
|
||||
|
||||
output := &CreateUploadSessionOutput{}
|
||||
|
||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
readCloser.Close()
|
||||
return output.UploadURL, nil
|
||||
}
|
||||
|
||||
func (client *OneDriveClient) UploadFileSession(uploadURL string, content []byte, rateLimit int) (err error) {
|
||||
|
||||
readCloser, _, err := client.call(uploadURL, "PUT", CreateRateLimitedReader(content, rateLimit), "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
type UploadFileSessionOutput struct {
|
||||
Size int `json:"size"`
|
||||
}
|
||||
output := &UploadFileSessionOutput{}
|
||||
|
||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||
return fmt.Errorf("Failed to complete the file upload session: %v", err)
|
||||
}
|
||||
|
||||
if output.Size != len(content) {
|
||||
return fmt.Errorf("Uploaded %d bytes out of %d bytes", output.Size, len(content))
|
||||
}
|
||||
|
||||
readCloser.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *OneDriveClient) DeleteFile(path string) error {
|
||||
|
||||
url := OneDriveAPIURL + "/drive/root:/" + path
|
||||
url := client.APIURL + "/drive/root:/" + path
|
||||
|
||||
readCloser, _, err := client.call(url, "DELETE", 0, "")
|
||||
if err != nil {
|
||||
@@ -325,7 +406,7 @@ func (client *OneDriveClient) DeleteFile(path string) error {
|
||||
|
||||
func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
||||
|
||||
url := OneDriveAPIURL + "/drive/root:/" + path
|
||||
url := client.APIURL + "/drive/root:/" + path
|
||||
|
||||
parentReference := make(map[string]string)
|
||||
parentReference["path"] = "/drive/root:/" + parent
|
||||
@@ -335,6 +416,20 @@ func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
||||
|
||||
readCloser, _, err := client.call(url, "PATCH", parameters, "application/json")
|
||||
if err != nil {
|
||||
if e, ok := err.(OneDriveError); ok && e.Status == 400 {
|
||||
// The destination directory doesn't exist; trying to create it...
|
||||
dir := filepath.Dir(parent)
|
||||
if dir == "." {
|
||||
dir = ""
|
||||
}
|
||||
client.CreateDirectory(dir, filepath.Base(parent))
|
||||
readCloser, _, err = client.call(url, "PATCH", parameters, "application/json")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -344,24 +439,29 @@ func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
||||
|
||||
func (client *OneDriveClient) CreateDirectory(path string, name string) error {
|
||||
|
||||
url := OneDriveAPIURL + "/root/children"
|
||||
url := client.APIURL + "/root/children"
|
||||
|
||||
if path != "" {
|
||||
|
||||
parentID, isDir, _, err := client.GetFileInfo(path)
|
||||
pathID, isDir, _, err := client.GetFileInfo(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if parentID == "" {
|
||||
return fmt.Errorf("The path '%s' does not exist", path)
|
||||
if pathID == "" {
|
||||
dir := filepath.Dir(path)
|
||||
if dir != "." {
|
||||
// The parent directory doesn't exist; trying to create it...
|
||||
client.CreateDirectory(dir, filepath.Base(path))
|
||||
isDir = true
|
||||
}
|
||||
}
|
||||
|
||||
if !isDir {
|
||||
return fmt.Errorf("The path '%s' is not a directory", path)
|
||||
}
|
||||
|
||||
url = OneDriveAPIURL + "/drive/items/" + parentID + "/children"
|
||||
url = client.APIURL + "/drive/root:/" + path + ":/children"
|
||||
}
|
||||
|
||||
parameters := make(map[string]interface{})
|
||||
@@ -370,6 +470,11 @@ func (client *OneDriveClient) CreateDirectory(path string, name string) error {
|
||||
|
||||
readCloser, _, err := client.call(url, "POST", parameters, "application/json")
|
||||
if err != nil {
|
||||
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
|
||||
// This error usually means the directory already exists
|
||||
LOG_TRACE("ONEDRIVE_MKDIR", "The directory '%s/%s' already exists", path, name)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
func TestOneDriveClient(t *testing.T) {
|
||||
|
||||
oneDriveClient, err := NewOneDriveClient("one-token.json")
|
||||
oneDriveClient, err := NewOneDriveClient("one-token.json", false)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create the OneDrive client: %v", err)
|
||||
return
|
||||
|
||||
@@ -19,13 +19,13 @@ type OneDriveStorage struct {
|
||||
}
|
||||
|
||||
// CreateOneDriveStorage creates an OneDrive storage object.
|
||||
func CreateOneDriveStorage(tokenFile string, storagePath string, threads int) (storage *OneDriveStorage, err error) {
|
||||
func CreateOneDriveStorage(tokenFile string, isBusiness bool, storagePath string, threads int) (storage *OneDriveStorage, err error) {
|
||||
|
||||
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
|
||||
storagePath = storagePath[:len(storagePath)-1]
|
||||
}
|
||||
|
||||
client, err := NewOneDriveClient(tokenFile)
|
||||
client, err := NewOneDriveClient(tokenFile, isBusiness)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -80,6 +80,7 @@ func (storage *OneDriveStorage) convertFilePath(filePath string) string {
|
||||
|
||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||
func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||
|
||||
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||
dir = dir[:len(dir)-1]
|
||||
}
|
||||
|
||||
@@ -43,10 +43,10 @@ func CreateSFTPStorageWithPassword(server string, port int, username string, sto
|
||||
return nil
|
||||
}
|
||||
|
||||
return CreateSFTPStorage(server, port, username, storageDir, minimumNesting, authMethods, hostKeyCallback, threads)
|
||||
return CreateSFTPStorage(false, server, port, username, storageDir, minimumNesting, authMethods, hostKeyCallback, threads)
|
||||
}
|
||||
|
||||
func CreateSFTPStorage(server string, port int, username string, storageDir string, minimumNesting int,
|
||||
func CreateSFTPStorage(compatibilityMode bool, server string, port int, username string, storageDir string, minimumNesting int,
|
||||
authMethods []ssh.AuthMethod,
|
||||
hostKeyCallback func(hostname string, remote net.Addr,
|
||||
key ssh.PublicKey) error, threads int) (storage *SFTPStorage, err error) {
|
||||
@@ -57,8 +57,21 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
|
||||
HostKeyCallback: hostKeyCallback,
|
||||
}
|
||||
|
||||
if server == "sftp.hidrive.strato.com" {
|
||||
sftpConfig.Ciphers = []string{"aes128-ctr", "aes256-ctr"}
|
||||
if compatibilityMode {
|
||||
sftpConfig.Ciphers = []string{
|
||||
"aes128-ctr", "aes192-ctr", "aes256-ctr",
|
||||
"aes128-gcm@openssh.com",
|
||||
"chacha20-poly1305@openssh.com",
|
||||
"arcfour256", "arcfour128", "arcfour",
|
||||
"aes128-cbc",
|
||||
"3des-cbc",
|
||||
}
|
||||
sftpConfig.KeyExchanges = [] string {
|
||||
"curve25519-sha256@libssh.org",
|
||||
"ecdh-sha2-nistp256", "ecdh-sha2-nistp384", "ecdh-sha2-nistp521",
|
||||
"diffie-hellman-group1-sha1", "diffie-hellman-group14-sha1",
|
||||
"diffie-hellman-group-exchange-sha1", "diffie-hellman-group-exchange-sha256",
|
||||
}
|
||||
}
|
||||
|
||||
serverAddress := fmt.Sprintf("%s:%d", server, port)
|
||||
@@ -305,7 +318,11 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
||||
file.Close()
|
||||
return err
|
||||
}
|
||||
file.Close()
|
||||
|
||||
err = file.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = storage.getSFTPClient().Rename(temporaryFile, fullPath)
|
||||
if err != nil {
|
||||
|
||||
@@ -91,6 +91,10 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, fil
|
||||
snapshot.Files = append(snapshot.Files, directory)
|
||||
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, nobackupFile, snapshot.discardAttributes)
|
||||
if err != nil {
|
||||
if directory.Path == "" {
|
||||
LOG_ERROR("LIST_FAILURE", "Failed to list the repository root: %v", err)
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory: %v", err)
|
||||
skippedDirectories = append(skippedDirectories, directory.Path)
|
||||
continue
|
||||
|
||||
@@ -381,6 +381,13 @@ func (manager *SnapshotManager) DownloadSnapshotContents(snapshot *Snapshot, pat
|
||||
return true
|
||||
}
|
||||
|
||||
// ClearSnapshotContents removes contents loaded by DownloadSnapshotContents
|
||||
func (manager *SnapshotManager) ClearSnapshotContents(snapshot *Snapshot) {
|
||||
snapshot.ChunkHashes = nil
|
||||
snapshot.ChunkLengths = nil
|
||||
snapshot.Files = nil
|
||||
}
|
||||
|
||||
// CleanSnapshotCache removes all files not referenced by the specified 'snapshot' in the snapshot cache.
|
||||
func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, allSnapshots map[string][]*Snapshot) bool {
|
||||
|
||||
@@ -653,6 +660,51 @@ func (manager *SnapshotManager) GetSnapshotChunks(snapshot *Snapshot, keepChunkH
|
||||
return chunks
|
||||
}
|
||||
|
||||
// GetSnapshotChunkHashes has an option to retrieve chunk hashes in addition to chunk ids.
|
||||
func (manager *SnapshotManager) GetSnapshotChunkHashes(snapshot *Snapshot, chunkHashes *map[string]bool, chunkIDs map[string]bool) {
|
||||
|
||||
for _, chunkHash := range snapshot.FileSequence {
|
||||
if chunkHashes != nil {
|
||||
(*chunkHashes)[chunkHash] = true
|
||||
}
|
||||
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
|
||||
}
|
||||
|
||||
for _, chunkHash := range snapshot.ChunkSequence {
|
||||
if chunkHashes != nil {
|
||||
(*chunkHashes)[chunkHash] = true
|
||||
}
|
||||
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
|
||||
}
|
||||
|
||||
for _, chunkHash := range snapshot.LengthSequence {
|
||||
if chunkHashes != nil {
|
||||
(*chunkHashes)[chunkHash] = true
|
||||
}
|
||||
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
|
||||
}
|
||||
|
||||
if len(snapshot.ChunkHashes) == 0 {
|
||||
|
||||
description := manager.DownloadSequence(snapshot.ChunkSequence)
|
||||
err := snapshot.LoadChunks(description)
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_CHUNK", "Failed to load chunks for snapshot %s at revision %d: %v",
|
||||
snapshot.ID, snapshot.Revision, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, chunkHash := range snapshot.ChunkHashes {
|
||||
if chunkHashes != nil {
|
||||
(*chunkHashes)[chunkHash] = true
|
||||
}
|
||||
chunkIDs[manager.config.GetChunkIDFromHash(chunkHash)] = true
|
||||
}
|
||||
|
||||
snapshot.ClearChunks()
|
||||
}
|
||||
|
||||
// ListSnapshots shows the information about a snapshot.
|
||||
func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList []int, tag string,
|
||||
showFiles bool, showChunks bool) int {
|
||||
@@ -757,7 +809,9 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
|
||||
|
||||
// ListSnapshots shows the information about a snapshot.
|
||||
func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToCheck []int, tag string, showStatistics bool, showTabular bool,
|
||||
checkFiles bool, searchFossils bool, resurrect bool) bool {
|
||||
checkFiles bool, checkChunks, searchFossils bool, resurrect bool, threads int) bool {
|
||||
|
||||
manager.chunkDownloader = CreateChunkDownloader(manager.config, manager.storage, manager.snapshotCache, false, threads)
|
||||
|
||||
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, showTabular: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
|
||||
snapshotID, revisionsToCheck, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
|
||||
@@ -774,6 +828,8 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
// Store the index of the snapshot that references each chunk; if the chunk is shared by multiple chunks, the index is -1
|
||||
chunkSnapshotMap := make(map[string]int)
|
||||
|
||||
emptyChunks := 0
|
||||
|
||||
LOG_INFO("SNAPSHOT_CHECK", "Listing all chunks")
|
||||
allChunks, allSizes := manager.ListAllFiles(manager.storage, chunkDir)
|
||||
|
||||
@@ -788,6 +844,11 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
|
||||
chunk = strings.Replace(chunk, "/", "", -1)
|
||||
chunkSizeMap[chunk] = allSizes[i]
|
||||
|
||||
if allSizes[i] == 0 && !strings.HasSuffix(chunk, ".tmp") {
|
||||
LOG_WARN("SNAPSHOT_CHECK", "Chunk %s has a size of 0", chunk)
|
||||
emptyChunks++
|
||||
}
|
||||
}
|
||||
|
||||
if snapshotID == "" || showStatistics || showTabular {
|
||||
@@ -839,6 +900,12 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
}
|
||||
LOG_INFO("SNAPSHOT_CHECK", "Total chunk size is %s in %d chunks", PrettyNumber(totalChunkSize), len(chunkSizeMap))
|
||||
|
||||
var allChunkHashes *map[string]bool
|
||||
if checkChunks && !checkFiles {
|
||||
m := make(map[string]bool)
|
||||
allChunkHashes = &m
|
||||
}
|
||||
|
||||
for snapshotID = range snapshotMap {
|
||||
|
||||
for _, snapshot := range snapshotMap[snapshotID] {
|
||||
@@ -846,13 +913,12 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
if checkFiles {
|
||||
manager.DownloadSnapshotContents(snapshot, nil, false)
|
||||
manager.VerifySnapshot(snapshot)
|
||||
manager.ClearSnapshotContents(snapshot)
|
||||
continue
|
||||
}
|
||||
|
||||
chunks := make(map[string]bool)
|
||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot, false) {
|
||||
chunks[chunkID] = true
|
||||
}
|
||||
manager.GetSnapshotChunkHashes(snapshot, allChunkHashes, chunks)
|
||||
|
||||
missingChunks := 0
|
||||
for chunkID := range chunks {
|
||||
@@ -940,12 +1006,26 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
return false
|
||||
}
|
||||
|
||||
if emptyChunks > 0 {
|
||||
LOG_ERROR("SNAPSHOT_CHECK", "%d chunks have a size of 0", emptyChunks)
|
||||
return false
|
||||
}
|
||||
|
||||
if showTabular {
|
||||
manager.ShowStatisticsTabular(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
|
||||
} else if showStatistics {
|
||||
manager.ShowStatistics(snapshotMap, chunkSizeMap, chunkUniqueMap, chunkSnapshotMap)
|
||||
}
|
||||
|
||||
if checkChunks && !checkFiles {
|
||||
manager.chunkDownloader.snapshotCache = nil
|
||||
LOG_INFO("SNAPSHOT_VERIFY", "Verifying %d chunks", len(*allChunkHashes))
|
||||
for chunkHash := range *allChunkHashes {
|
||||
manager.chunkDownloader.AddChunk(chunkHash)
|
||||
}
|
||||
manager.chunkDownloader.WaitForCompletion()
|
||||
LOG_INFO("SNAPSHOT_VERIFY", "All %d chunks have been successfully verified", len(*allChunkHashes))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
@@ -620,7 +620,7 @@ func TestPruneNewSnapshots(t *testing.T) {
|
||||
// Now chunkHash1 wil be resurrected
|
||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||
checkTestSnapshots(snapshotManager, 4, 0)
|
||||
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false)
|
||||
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false, false, 1)
|
||||
}
|
||||
|
||||
// A fossil collection left by an aborted prune should be ignored if any supposedly deleted snapshot exists
|
||||
@@ -669,7 +669,7 @@ func TestPruneGhostSnapshots(t *testing.T) {
|
||||
// Run the prune again but the fossil collection should be igored, since revision 1 still exists
|
||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||
checkTestSnapshots(snapshotManager, 3, 2)
|
||||
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, true /*searchFossils*/, false)
|
||||
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, false, true /*searchFossils*/, false, 1)
|
||||
|
||||
// Prune snapshot 1 again
|
||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||
@@ -683,5 +683,5 @@ func TestPruneGhostSnapshots(t *testing.T) {
|
||||
// Run the prune again and this time the fossil collection will be processed and the fossils removed
|
||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||
checkTestSnapshots(snapshotManager, 3, 0)
|
||||
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false)
|
||||
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false, false, 1)
|
||||
}
|
||||
|
||||
@@ -268,7 +268,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
if matched == nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Unrecognizable storage URL: %s", storageURL)
|
||||
return nil
|
||||
} else if matched[1] == "sftp" {
|
||||
} else if matched[1] == "sftp" || matched[1] == "sftpc" {
|
||||
server := matched[3]
|
||||
username := matched[2]
|
||||
storageDir := matched[5]
|
||||
@@ -336,7 +336,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
keyFile = GetPassword(preference, "ssh_key_file", "Enter the path of the private key file:",
|
||||
true, resetPassword)
|
||||
|
||||
var key ssh.Signer
|
||||
var keySigner ssh.Signer
|
||||
var err error
|
||||
|
||||
if keyFile == "" {
|
||||
@@ -347,15 +347,15 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
if err != nil {
|
||||
LOG_INFO("SSH_PUBLICKEY", "Failed to read the private key file: %v", err)
|
||||
} else {
|
||||
key, err = ssh.ParsePrivateKey(content)
|
||||
keySigner, err = ssh.ParsePrivateKey(content)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "cannot decode encrypted private keys") {
|
||||
if _, ok := err.(*ssh.PassphraseMissingError); ok {
|
||||
LOG_TRACE("SSH_PUBLICKEY", "The private key file is encrypted")
|
||||
passphrase = GetPassword(preference, "ssh_passphrase", "Enter the passphrase to decrypt the private key file:", false, resetPassword)
|
||||
if len(passphrase) == 0 {
|
||||
LOG_INFO("SSH_PUBLICKEY", "No passphrase to descrypt the private key file %s", keyFile)
|
||||
} else {
|
||||
key, err = ssh.ParsePrivateKeyWithPassphrase(content, []byte(passphrase))
|
||||
keySigner, err = ssh.ParsePrivateKeyWithPassphrase(content, []byte(passphrase))
|
||||
if err != nil {
|
||||
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the encrypted private key file %s: %v", keyFile, err)
|
||||
}
|
||||
@@ -364,11 +364,35 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the private key file %s: %v", keyFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
if keySigner != nil {
|
||||
certFile := keyFile + "-cert.pub"
|
||||
if stat, err := os.Stat(certFile); err == nil && !stat.IsDir() {
|
||||
LOG_DEBUG("SSH_CERTIFICATE", "Attempting to use ssh certificate from file %s", certFile)
|
||||
var content []byte
|
||||
content, err = ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
LOG_INFO("SSH_CERTIFICATE", "Failed to read ssh certificate file %s: %v", certFile, err)
|
||||
} else {
|
||||
pubKey, _, _, _, err := ssh.ParseAuthorizedKey(content)
|
||||
if err != nil {
|
||||
LOG_INFO("SSH_CERTIFICATE", "Failed parse ssh certificate file %s: %v", certFile, err)
|
||||
} else {
|
||||
certSigner, err := ssh.NewCertSigner(pubKey.(*ssh.Certificate), keySigner)
|
||||
if err != nil {
|
||||
LOG_INFO("SSH_CERTIFICATE", "Failed to create certificate signer: %v", err)
|
||||
} else {
|
||||
keySigner = certSigner
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if key != nil {
|
||||
signers = append(signers, key)
|
||||
if keySigner != nil {
|
||||
signers = append(signers, keySigner)
|
||||
}
|
||||
|
||||
if len(signers) > 0 {
|
||||
@@ -416,7 +440,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
return checkHostKey(hostname, remote, key)
|
||||
}
|
||||
|
||||
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, 2, authMethods, hostKeyChecker, threads)
|
||||
sftpStorage, err := CreateSFTPStorage(matched[1] == "sftpc", server, port, username, storageDir, 2, authMethods, hostKeyChecker, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the SFTP storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
@@ -600,26 +624,35 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
SavePassword(preference, "gcs_token", tokenFile)
|
||||
return gcsStorage
|
||||
} else if matched[1] == "gcd" {
|
||||
// Handle writing directly to the root of the drive
|
||||
// For gcd://driveid@/, driveid@ is match[3] not match[2]
|
||||
if matched[2] == "" && strings.HasSuffix(matched[3], "@") {
|
||||
matched[2], matched[3] = matched[3], matched[2]
|
||||
}
|
||||
driveID := matched[2]
|
||||
if driveID != "" {
|
||||
driveID = driveID[:len(driveID)-1]
|
||||
}
|
||||
storagePath := matched[3] + matched[4]
|
||||
prompt := fmt.Sprintf("Enter the path of the Google Drive token file (downloadable from https://duplicacy.com/gcd_start):")
|
||||
tokenFile := GetPassword(preference, "gcd_token", prompt, true, resetPassword)
|
||||
gcdStorage, err := CreateGCDStorage(tokenFile, storagePath, threads)
|
||||
gcdStorage, err := CreateGCDStorage(tokenFile, driveID, storagePath, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Drive storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
SavePassword(preference, "gcd_token", tokenFile)
|
||||
return gcdStorage
|
||||
} else if matched[1] == "one" {
|
||||
} else if matched[1] == "one" || matched[1] == "odb" {
|
||||
storagePath := matched[3] + matched[4]
|
||||
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
|
||||
tokenFile := GetPassword(preference, "one_token", prompt, true, resetPassword)
|
||||
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, storagePath, threads)
|
||||
tokenFile := GetPassword(preference, matched[1] + "_token", prompt, true, resetPassword)
|
||||
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, matched[1] == "odb", storagePath, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
SavePassword(preference, "one_token", tokenFile)
|
||||
SavePassword(preference, matched[1] + "_token", tokenFile)
|
||||
return oneDriveStorage
|
||||
} else if matched[1] == "hubic" {
|
||||
storagePath := matched[3] + matched[4]
|
||||
@@ -645,6 +678,10 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
} else if matched[1] == "webdav" || matched[1] == "webdav-http" {
|
||||
server := matched[3]
|
||||
username := matched[2]
|
||||
if username == "" {
|
||||
LOG_ERROR("STORAGE_CREATE", "No username is provided to access the WebDAV storage")
|
||||
return nil
|
||||
}
|
||||
username = username[:len(username)-1]
|
||||
storageDir := matched[5]
|
||||
port := 0
|
||||
@@ -665,6 +702,18 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
}
|
||||
SavePassword(preference, "webdav_password", password)
|
||||
return webDAVStorage
|
||||
} else if matched[1] == "fabric" {
|
||||
endpoint := matched[3]
|
||||
storageDir := matched[5]
|
||||
prompt := fmt.Sprintf("Enter the token for accessing the Storage Made Easy File Fabric storage:")
|
||||
token := GetPassword(preference, "fabric_token", prompt, true, resetPassword)
|
||||
smeStorage, err := CreateFileFabricStorage(endpoint, token, storageDir, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the File Fabric storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
SavePassword(preference, "fabric_token", token)
|
||||
return smeStorage
|
||||
} else {
|
||||
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
|
||||
return nil
|
||||
|
||||
@@ -133,11 +133,23 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "gcd" {
|
||||
storage, err := CreateGCDStorage(config["token_file"], config["storage_path"], threads)
|
||||
storage, err := CreateGCDStorage(config["token_file"], "", config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "gcd-shared" {
|
||||
storage, err := CreateGCDStorage(config["token_file"], config["drive"], config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "one" {
|
||||
storage, err := CreateOneDriveStorage(config["token_file"], config["storage_path"], threads)
|
||||
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "odb" {
|
||||
storage, err := CreateOneDriveStorage(config["token_file"], true, config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "one" {
|
||||
storage, err := CreateOneDriveStorage(config["token_file"], false, config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "hubic" {
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
//"net/http/httputil"
|
||||
@@ -214,53 +213,56 @@ type WebDAVMultiStatus struct {
|
||||
|
||||
func (storage *WebDAVStorage) getProperties(uri string, depth int, properties ...string) (map[string]WebDAVProperties, error) {
|
||||
|
||||
propfind := "<prop>"
|
||||
for _, p := range properties {
|
||||
propfind += fmt.Sprintf("<%s/>", p)
|
||||
}
|
||||
propfind += "</prop>"
|
||||
maxTries := 3
|
||||
for tries := 0; ; tries++ {
|
||||
propfind := "<prop>"
|
||||
for _, p := range properties {
|
||||
propfind += fmt.Sprintf("<%s/>", p)
|
||||
}
|
||||
propfind += "</prop>"
|
||||
|
||||
body := fmt.Sprintf(`<?xml version="1.0" encoding="utf-8" ?><propfind xmlns="DAV:">%s</propfind>`, propfind)
|
||||
body := fmt.Sprintf(`<?xml version="1.0" encoding="utf-8" ?><propfind xmlns="DAV:">%s</propfind>`, propfind)
|
||||
|
||||
readCloser, _, err := storage.sendRequest("PROPFIND", uri, depth, []byte(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer readCloser.Close()
|
||||
content, err := ioutil.ReadAll(readCloser)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
readCloser, _, err := storage.sendRequest("PROPFIND", uri, depth, []byte(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer readCloser.Close()
|
||||
|
||||
object := WebDAVMultiStatus{}
|
||||
err = xml.Unmarshal(content, &object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if object.Responses == nil || len(object.Responses) == 0 {
|
||||
return nil, errors.New("no WebDAV responses")
|
||||
}
|
||||
|
||||
responses := make(map[string]WebDAVProperties)
|
||||
|
||||
for _, responseTag := range object.Responses {
|
||||
|
||||
if responseTag.PropStat == nil || responseTag.PropStat.Prop == nil || responseTag.PropStat.Prop.PropList == nil {
|
||||
return nil, errors.New("no WebDAV properties")
|
||||
object := WebDAVMultiStatus{}
|
||||
err = xml.NewDecoder(readCloser).Decode(&object)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "unexpected EOF") && tries < maxTries {
|
||||
LOG_WARN("WEBDAV_RETRY", "Retrying on %v", err)
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
properties := make(WebDAVProperties)
|
||||
for _, prop := range responseTag.PropStat.Prop.PropList {
|
||||
properties[prop.XMLName.Local] = prop.Value
|
||||
if object.Responses == nil || len(object.Responses) == 0 {
|
||||
return nil, errors.New("no WebDAV responses")
|
||||
}
|
||||
|
||||
responseKey := responseTag.Href
|
||||
responses[responseKey] = properties
|
||||
responses := make(map[string]WebDAVProperties)
|
||||
|
||||
for _, responseTag := range object.Responses {
|
||||
|
||||
if responseTag.PropStat == nil || responseTag.PropStat.Prop == nil || responseTag.PropStat.Prop.PropList == nil {
|
||||
return nil, errors.New("no WebDAV properties")
|
||||
}
|
||||
|
||||
properties := make(WebDAVProperties)
|
||||
for _, prop := range responseTag.PropStat.Prop.PropList {
|
||||
properties[prop.XMLName.Local] = prop.Value
|
||||
}
|
||||
|
||||
responseKey := responseTag.Href
|
||||
responses[responseKey] = properties
|
||||
|
||||
}
|
||||
|
||||
return responses, nil
|
||||
}
|
||||
|
||||
return responses, nil
|
||||
}
|
||||
|
||||
// ListFiles return the list of files and subdirectories under 'dir'. A subdirectories returned must have a trailing '/', with
|
||||
|
||||
Reference in New Issue
Block a user