mirror of
https://github.com/gilbertchen/duplicacy
synced 2025-12-06 00:03:38 +00:00
Compare commits
71 Commits
download_p
...
v2.4.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
808ae4eb75 | ||
|
|
6699e2f440 | ||
|
|
733b68be2c | ||
|
|
b61906c99e | ||
|
|
a0a07d18cc | ||
|
|
a6ce64e715 | ||
|
|
499b612a0d | ||
|
|
46ce0ba1fb | ||
|
|
cc88abd547 | ||
|
|
e888b6d7e5 | ||
|
|
d43fe1a282 | ||
|
|
504d07bd51 | ||
|
|
0abb4099f6 | ||
|
|
694494ea54 | ||
|
|
165152493c | ||
|
|
e02041f4ed | ||
|
|
a99f059b52 | ||
|
|
f022a6f684 | ||
|
|
791c61eecb | ||
|
|
6ad27adaea | ||
|
|
9abfbe1ee0 | ||
|
|
b32c3b2cd5 | ||
|
|
9baafdafa2 | ||
|
|
ca7d927840 | ||
|
|
0ca9cd476e | ||
|
|
abf9a94fc9 | ||
|
|
9a0d60ca84 | ||
|
|
90833f9d86 | ||
|
|
58387c0951 | ||
|
|
81bb188211 | ||
|
|
5821cad8c5 | ||
|
|
662805fbbd | ||
|
|
fc35ddf7d1 | ||
|
|
6efcd37c5c | ||
|
|
58558b8a2f | ||
|
|
045be3905b | ||
|
|
4da7f7b6f9 | ||
|
|
41668d4bbd | ||
|
|
9d4ac34f4b | ||
|
|
eba5aa6eea | ||
|
|
47c4c25d8b | ||
|
|
37781f9540 | ||
|
|
282fe4edd2 | ||
|
|
33c71ca5f8 | ||
|
|
6e7d45caac | ||
|
|
8e9caea201 | ||
|
|
18ba415f56 | ||
|
|
458687d543 | ||
|
|
57a408a577 | ||
|
|
a73ed462b6 | ||
|
|
e56efc1d3a | ||
|
|
bb58f42a37 | ||
|
|
22e8d9e60a | ||
|
|
4eb174cec5 | ||
|
|
6fd3fbd568 | ||
|
|
a6fe3d785e | ||
|
|
1da151f9d9 | ||
|
|
4b69c1162e | ||
|
|
abcb4d75c1 | ||
|
|
10d2058738 | ||
|
|
43a5ffe011 | ||
|
|
d16273fe2b | ||
|
|
2b56d576c7 | ||
|
|
82c6c15f1c | ||
|
|
df7487cc0b | ||
|
|
5e8baab4ec | ||
|
|
e1fa39008d | ||
|
|
aaebf4510c | ||
|
|
96dd28995b | ||
|
|
166f6e6266 | ||
|
|
86c89f43a0 |
18
.github/ISSUE_TEMPLATE.md
vendored
18
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,5 +1,17 @@
|
||||
Please submit an issue for bug reports or feature requests. If you have any questions please post them on https://forum.duplicacy.com.
|
||||
---
|
||||
name: Please use the official forum
|
||||
about: Please use the official forum instead of Github
|
||||
title: 'Please use the official forum'
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
When you're reporting a bug, please specify the OS, version, command line arguments, or any info that you think is helpful for the diagnosis. If Duplicacy reports an error, please post the program output here.
|
||||
---
|
||||
|
||||
Note that this repository hosts the CLI version of Duplicacy only. If you're reporting anything related to the GUI version, please visit https://forum.duplicacy.com.
|
||||
|
||||
Please **use the [Duplicacy Forum](https://forum.duplicacy.com/)** when reporting bugs, making feature requests, asking for help or simply praising Duplicacy for its ease of use.
|
||||
|
||||
We strongly encourage you to create an account on the forum and use that platform for discussion as there is a higher chance that someone there will talk to you.
|
||||
|
||||
There is a handful of people watching the Github Issues and we are in the process of moving **all** of them to the forum as well. Most likely you will not receive an answer here or it will be very slow and you will be pointed to the forum.
|
||||
|
||||
We have already created a comprehensive [Guide](https://forum.duplicacy.com/t/duplicacy-user-guide/1197), and a [How-To](https://forum.duplicacy.com/c/how-to) category which stores more wisdom than these issues on Github.
|
||||
|
||||
@@ -14,3 +14,4 @@ Duplicacy is based on the following open source projects:
|
||||
|https://github.com/pcwizz/xattr | BSD-2-Clause |
|
||||
|https://github.com/minio/blake2b-simd | Apache-2.0 |
|
||||
|https://github.com/go-ole/go-ole | MIT |
|
||||
https://github.com/ncw/swift | MIT |
|
||||
|
||||
8
Gopkg.lock
generated
8
Gopkg.lock
generated
@@ -71,7 +71,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/go.dbus"
|
||||
packages = ["."]
|
||||
revision = "9e442e6378618c083fd3b85b703ffd202721fb17"
|
||||
revision = "8591994fa32f1dbe3fa9486bc6f4d4361ac16649"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -153,8 +153,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "98203f5a8333288eb3163b7c667d4260fe1333e9"
|
||||
version = "1.0.0"
|
||||
revision = "3edd153f213d8d4191a0ee4577c61cca19436632"
|
||||
version = "v1.10.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
@@ -225,6 +225,6 @@
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "eff5ae2d9507f0d62cd2e5bdedebb5c59d64f70f476b087c01c35d4a5e1be72d"
|
||||
inputs-digest = "8636a9db1eb54be5374f9914687693122efdde511f11c47d10c22f9e245e7f70"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
@@ -75,7 +75,7 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/sftp"
|
||||
version = "1.0.0"
|
||||
version = "1.10.1"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
Copyright © 2017 Acrosync LLC
|
||||
|
||||
* Free for personal use or commercial trial
|
||||
* Non-trial commercial use requires per-user CLI licenses available from [duplicacy.com](https://duplicacy.com/buy) at a cost of $20 per year
|
||||
* A user is defined as the computer account that creates or edits the files to be backed up; if a backup contains files created or edited by multiple users for commercial purposes, one CLI license is required for each user
|
||||
* Non-trial commercial use requires per-computer CLI licenses available from [duplicacy.com](https://duplicacy.com/buy.html) at a cost of $50 per year
|
||||
* The computer with a valid commercial license for the GUI version may run the CLI version without a CLI license
|
||||
* CLI licenses are not required to restore or manage backups; only the backup command requires valid CLI licenses
|
||||
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
||||
|
||||
@@ -90,8 +90,7 @@ The following table compares the feature lists of all these backup tools:
|
||||
## License
|
||||
|
||||
* Free for personal use or commercial trial
|
||||
* Non-trial commercial use requires per-user CLI licenses available from [duplicacy.com](https://duplicacy.com/buy) at a cost of $20 per year
|
||||
* A user is defined as the computer account that creates or edits the files to be backed up; if a backup contains files created or edited by multiple users for commercial purposes, one CLI license is required for each user
|
||||
* Non-trial commercial use requires per-computer CLI licenses available from [duplicacy.com](https://duplicacy.com/buy.html) at a cost of $50 per year
|
||||
* The computer with a valid commercial license for the GUI version may run the CLI version without a CLI license
|
||||
* CLI licenses are not required to restore or manage backups; only the backup command requires valid CLI licenses
|
||||
* Modification and redistribution are permitted, but commercial use of derivative works is subject to the same requirements of this license
|
||||
|
||||
@@ -201,13 +201,24 @@ func runScript(context *cli.Context, storageName string, phase string) bool {
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
duplicacy.LOG_WARN("SCRIPT_ERROR", "Failed to run script: %v", err)
|
||||
duplicacy.LOG_ERROR("SCRIPT_ERROR", "Failed to run %s script: %v", script, err)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func loadRSAPrivateKey(keyFile string, preference *duplicacy.Preference, backupManager *duplicacy.BackupManager, resetPasswords bool) {
|
||||
if keyFile == "" {
|
||||
return
|
||||
}
|
||||
|
||||
prompt := fmt.Sprintf("Enter the passphrase for %s:", keyFile)
|
||||
passphrase := duplicacy.GetPassword(*preference, "rsa_passphrase", prompt, false, resetPasswords)
|
||||
backupManager.LoadRSAPrivateKey(keyFile, passphrase)
|
||||
duplicacy.SavePassword(*preference, "rsa_passphrase", passphrase)
|
||||
}
|
||||
|
||||
func initRepository(context *cli.Context) {
|
||||
configRepository(context, true)
|
||||
}
|
||||
@@ -319,6 +330,11 @@ func configRepository(context *cli.Context, init bool) {
|
||||
if preference.Encrypted {
|
||||
prompt := fmt.Sprintf("Enter storage password for %s:", preference.StorageURL)
|
||||
storagePassword = duplicacy.GetPassword(preference, "password", prompt, false, true)
|
||||
} else {
|
||||
if context.String("key") != "" {
|
||||
duplicacy.LOG_ERROR("STORAGE_CONFIG", "RSA encryption can't be enabled with an unencrypted storage")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
existingConfig, _, err := duplicacy.DownloadConfig(storage, storagePassword)
|
||||
@@ -434,7 +450,7 @@ func configRepository(context *cli.Context, init bool) {
|
||||
iterations = duplicacy.CONFIG_DEFAULT_ITERATIONS
|
||||
}
|
||||
duplicacy.ConfigStorage(storage, iterations, compressionLevel, averageChunkSize, maximumChunkSize,
|
||||
minimumChunkSize, storagePassword, otherConfig, bitCopy)
|
||||
minimumChunkSize, storagePassword, otherConfig, bitCopy, context.String("key"))
|
||||
}
|
||||
|
||||
duplicacy.Preferences = append(duplicacy.Preferences, preference)
|
||||
@@ -532,7 +548,13 @@ func setPreference(context *cli.Context) {
|
||||
newPreference.DoNotSavePassword = triBool.IsTrue()
|
||||
}
|
||||
|
||||
newPreference.NobackupFile = context.String("nobackup-file")
|
||||
if context.String("nobackup-file") != "" {
|
||||
newPreference.NobackupFile = context.String("nobackup-file")
|
||||
}
|
||||
|
||||
if context.String("filters") != "" {
|
||||
newPreference.FiltersFile = context.String("filters")
|
||||
}
|
||||
|
||||
key := context.String("key")
|
||||
value := context.String("value")
|
||||
@@ -715,7 +737,7 @@ func backupRepository(context *cli.Context) {
|
||||
uploadRateLimit := context.Int("limit-rate")
|
||||
enumOnly := context.Bool("enum-only")
|
||||
storage.SetRateLimits(0, uploadRateLimit)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile)
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
@@ -782,31 +804,21 @@ func restoreRepository(context *cli.Context) {
|
||||
pattern = pattern[1:]
|
||||
}
|
||||
|
||||
if duplicacy.IsUnspecifiedFilter(pattern) {
|
||||
pattern = "+" + pattern
|
||||
}
|
||||
|
||||
if duplicacy.IsEmptyFilter(pattern) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
||||
valid, err := duplicacy.IsValidRegex(pattern[2:])
|
||||
if !valid || err != nil {
|
||||
duplicacy.LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
|
||||
}
|
||||
}
|
||||
|
||||
patterns = append(patterns, pattern)
|
||||
|
||||
}
|
||||
|
||||
patterns = duplicacy.ProcessFilterLines(patterns, make([]string, 0))
|
||||
|
||||
duplicacy.LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(duplicacy.RegexMap))
|
||||
|
||||
duplicacy.LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
|
||||
|
||||
storage.SetRateLimits(context.Int("limit-rate"), 0)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile)
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, setOwner, showStatistics, patterns)
|
||||
|
||||
@@ -844,7 +856,7 @@ func listSnapshots(context *cli.Context) {
|
||||
tag := context.String("t")
|
||||
revisions := getRevisions(context)
|
||||
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
id := preference.SnapshotID
|
||||
@@ -857,6 +869,9 @@ func listSnapshots(context *cli.Context) {
|
||||
showFiles := context.Bool("files")
|
||||
showChunks := context.Bool("chunks")
|
||||
|
||||
// list doesn't need to decrypt file chunks; but we need -key here so we can reset the passphrase for the private key
|
||||
loadRSAPrivateKey(context.String("key"), preference, backupManager, resetPassword)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
backupManager.SnapshotManager.ListSnapshots(id, revisions, tag, showFiles, showChunks)
|
||||
|
||||
@@ -892,9 +907,11 @@ func checkSnapshots(context *cli.Context) {
|
||||
tag := context.String("t")
|
||||
revisions := getRevisions(context)
|
||||
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
|
||||
|
||||
id := preference.SnapshotID
|
||||
if context.Bool("all") {
|
||||
id = ""
|
||||
@@ -947,9 +964,11 @@ func printFile(context *cli.Context) {
|
||||
snapshotID = context.String("id")
|
||||
}
|
||||
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
|
||||
file := ""
|
||||
@@ -1003,11 +1022,13 @@ func diff(context *cli.Context) {
|
||||
}
|
||||
|
||||
compareByHash := context.Bool("hash")
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile)
|
||||
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile, preference.FiltersFile)
|
||||
|
||||
runScript(context, preference.Name, "post")
|
||||
}
|
||||
@@ -1046,7 +1067,7 @@ func showHistory(context *cli.Context) {
|
||||
|
||||
revisions := getRevisions(context)
|
||||
showLocalHash := context.Bool("hash")
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
@@ -1109,7 +1130,7 @@ func pruneSnapshots(context *cli.Context) {
|
||||
os.Exit(ArgumentExitCode)
|
||||
}
|
||||
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
|
||||
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
|
||||
duplicacy.SavePassword(*preference, "password", password)
|
||||
|
||||
backupManager.SetupSnapshotCache(preference.Name)
|
||||
@@ -1149,10 +1170,12 @@ func copySnapshots(context *cli.Context) {
|
||||
sourcePassword = duplicacy.GetPassword(*source, "password", "Enter source storage password:", false, false)
|
||||
}
|
||||
|
||||
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, source.NobackupFile)
|
||||
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, "", "")
|
||||
sourceManager.SetupSnapshotCache(source.Name)
|
||||
duplicacy.SavePassword(*source, "password", sourcePassword)
|
||||
|
||||
loadRSAPrivateKey(context.String("key"), source, sourceManager, false)
|
||||
|
||||
_, destination := getRepositoryPreference(context, context.String("to"))
|
||||
|
||||
if destination.Name == source.Name {
|
||||
@@ -1182,7 +1205,7 @@ func copySnapshots(context *cli.Context) {
|
||||
destinationStorage.SetRateLimits(0, context.Int("upload-limit-rate"))
|
||||
|
||||
destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository,
|
||||
destinationPassword, destination.NobackupFile)
|
||||
destinationPassword, "", "")
|
||||
duplicacy.SavePassword(*destination, "password", destinationPassword)
|
||||
destinationManager.SetupSnapshotCache(destination.Name)
|
||||
|
||||
@@ -1360,6 +1383,11 @@ func main() {
|
||||
Usage: "initialize a new repository at the specified path rather than the current working directory",
|
||||
Argument: "<path>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA public key to encrypt file chunks",
|
||||
Argument: "<public key>",
|
||||
},
|
||||
},
|
||||
Usage: "Initialize the storage if necessary and the current directory as the repository",
|
||||
ArgsUsage: "<snapshot id> <storage url>",
|
||||
@@ -1467,6 +1495,11 @@ func main() {
|
||||
Usage: "restore from the specified storage instead of the default one",
|
||||
Argument: "<storage name>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA private key to decrypt file chunks",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
},
|
||||
Usage: "Restore the repository to a previously saved snapshot",
|
||||
ArgsUsage: "[--] [pattern] ...",
|
||||
@@ -1512,6 +1545,11 @@ func main() {
|
||||
Usage: "retrieve snapshots from the specified storage",
|
||||
Argument: "<storage name>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA private key to decrypt file chunks",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
},
|
||||
Usage: "List snapshots",
|
||||
ArgsUsage: " ",
|
||||
@@ -1564,6 +1602,11 @@ func main() {
|
||||
Usage: "retrieve snapshots from the specified storage",
|
||||
Argument: "<storage name>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA private key to decrypt file chunks",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
},
|
||||
Usage: "Check the integrity of snapshots",
|
||||
ArgsUsage: " ",
|
||||
@@ -1587,6 +1630,11 @@ func main() {
|
||||
Usage: "retrieve the file from the specified storage",
|
||||
Argument: "<storage name>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA private key to decrypt file chunks",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
},
|
||||
Usage: "Print to stdout the specified file, or the snapshot content if no file is specified",
|
||||
ArgsUsage: "[<file>]",
|
||||
@@ -1615,6 +1663,11 @@ func main() {
|
||||
Usage: "retrieve files from the specified storage",
|
||||
Argument: "<storage name>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA private key to decrypt file chunks",
|
||||
Argument: "<private key>",
|
||||
},
|
||||
},
|
||||
Usage: "Compare two snapshots or two revisions of a file",
|
||||
ArgsUsage: "[<file>]",
|
||||
@@ -1779,6 +1832,11 @@ func main() {
|
||||
Usage: "specify the path of the repository (instead of the current working directory)",
|
||||
Argument: "<path>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA public key to encrypt file chunks",
|
||||
Argument: "<public key>",
|
||||
},
|
||||
},
|
||||
Usage: "Add an additional storage to be used for the existing repository",
|
||||
ArgsUsage: "<storage name> <snapshot id> <storage url>",
|
||||
@@ -1831,6 +1889,11 @@ func main() {
|
||||
Usage: "use the specified storage instead of the default one",
|
||||
Argument: "<storage name>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "filters",
|
||||
Usage: "specify the path of the filters file containing include/exclude patterns",
|
||||
Argument: "<file path>",
|
||||
},
|
||||
},
|
||||
Usage: "Change the options for the default or specified storage",
|
||||
ArgsUsage: " ",
|
||||
@@ -1877,6 +1940,11 @@ func main() {
|
||||
Usage: "number of uploading threads",
|
||||
Argument: "<n>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "key",
|
||||
Usage: "the RSA private key to decrypt file chunks from the source storage",
|
||||
Argument: "<public key>",
|
||||
},
|
||||
},
|
||||
Usage: "Copy snapshots between compatible storages",
|
||||
ArgsUsage: " ",
|
||||
@@ -1991,7 +2059,7 @@ func main() {
|
||||
app.Name = "duplicacy"
|
||||
app.HelpName = "duplicacy"
|
||||
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
||||
app.Version = "2.1.2" + " (" + GitCommit + ")"
|
||||
app.Version = "2.4.1" + " (" + GitCommit + ")"
|
||||
|
||||
// If the program is interrupted, call the RunAtError function.
|
||||
c := make(chan os.Signal, 1)
|
||||
|
||||
@@ -166,9 +166,21 @@ func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chun
|
||||
|
||||
// UploadFile writes 'content' to the file at 'filePath'.
|
||||
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.containers))
|
||||
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||
return blob.CreateBlockBlobFromReader(reader, nil)
|
||||
|
||||
tries := 0
|
||||
|
||||
for {
|
||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.containers))
|
||||
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||
err = blob.CreateBlockBlobFromReader(reader, nil)
|
||||
|
||||
if err == nil || !strings.Contains(err.Error(), "write: broken pipe") || tries >= 3 {
|
||||
return err
|
||||
}
|
||||
|
||||
LOG_INFO("AZURE_RETRY", "Connection unexpectedly terminated: %v; retrying", err)
|
||||
tries++
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
@@ -5,19 +5,22 @@
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
"fmt"
|
||||
"bytes"
|
||||
"time"
|
||||
"sync"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"net/url"
|
||||
"net/http"
|
||||
"math/rand"
|
||||
"io/ioutil"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/base64"
|
||||
)
|
||||
|
||||
type B2Error struct {
|
||||
@@ -39,67 +42,115 @@ var B2AuthorizationURL = "https://api.backblazeb2.com/b2api/v1/b2_authorize_acco
|
||||
|
||||
type B2Client struct {
|
||||
HTTPClient *http.Client
|
||||
|
||||
AccountID string
|
||||
ApplicationKeyID string
|
||||
ApplicationKey string
|
||||
BucketName string
|
||||
BucketID string
|
||||
StorageDir string
|
||||
|
||||
Lock sync.Mutex
|
||||
AuthorizationToken string
|
||||
APIURL string
|
||||
DownloadURL string
|
||||
BucketName string
|
||||
BucketID string
|
||||
IsAuthorized bool
|
||||
|
||||
UploadURL string
|
||||
UploadToken string
|
||||
UploadURLs []string
|
||||
UploadTokens []string
|
||||
|
||||
TestMode bool
|
||||
Threads int
|
||||
MaximumRetries int
|
||||
TestMode bool
|
||||
|
||||
LastAuthorizationTime int64
|
||||
}
|
||||
|
||||
func NewB2Client(applicationKeyID string, applicationKey string) *B2Client {
|
||||
// URL encode the given path but keep the slashes intact
|
||||
func B2Escape(path string) string {
|
||||
var components []string
|
||||
for _, c := range strings.Split(path, "/") {
|
||||
components = append(components, url.QueryEscape(c))
|
||||
}
|
||||
return strings.Join(components, "/")
|
||||
}
|
||||
|
||||
func NewB2Client(applicationKeyID string, applicationKey string, downloadURL string, storageDir string, threads int) *B2Client {
|
||||
|
||||
for storageDir != "" && storageDir[0] == '/' {
|
||||
storageDir = storageDir[1:]
|
||||
}
|
||||
|
||||
if storageDir != "" && storageDir[len(storageDir) - 1] != '/' {
|
||||
storageDir += "/"
|
||||
}
|
||||
|
||||
maximumRetries := 15
|
||||
if value, found := os.LookupEnv("DUPLICACY_B2_RETRIES"); found && value != "" {
|
||||
maximumRetries, _ = strconv.Atoi(value)
|
||||
LOG_INFO("B2_RETRIES", "Setting maximum retries for B2 to %d", maximumRetries)
|
||||
}
|
||||
|
||||
client := &B2Client{
|
||||
HTTPClient: http.DefaultClient,
|
||||
ApplicationKeyID: applicationKeyID,
|
||||
ApplicationKey: applicationKey,
|
||||
DownloadURL: downloadURL,
|
||||
StorageDir: storageDir,
|
||||
UploadURLs: make([]string, threads),
|
||||
UploadTokens: make([]string, threads),
|
||||
Threads: threads,
|
||||
MaximumRetries: maximumRetries,
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
func (client *B2Client) retry(backoff int, response *http.Response) int {
|
||||
func (client *B2Client) getAPIURL() string {
|
||||
client.Lock.Lock()
|
||||
defer client.Lock.Unlock()
|
||||
return client.APIURL
|
||||
}
|
||||
|
||||
func (client *B2Client) getDownloadURL() string {
|
||||
client.Lock.Lock()
|
||||
defer client.Lock.Unlock()
|
||||
return client.DownloadURL
|
||||
}
|
||||
|
||||
func (client *B2Client) retry(retries int, response *http.Response) int {
|
||||
if response != nil {
|
||||
if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 {
|
||||
retryAfter, _ := strconv.Atoi(backoffList[0])
|
||||
if retryAfter >= 1 {
|
||||
time.Sleep(time.Duration(retryAfter) * time.Second)
|
||||
return 0
|
||||
return 1
|
||||
}
|
||||
}
|
||||
}
|
||||
if backoff == 0 {
|
||||
backoff = 1
|
||||
} else {
|
||||
backoff *= 2
|
||||
|
||||
if retries >= client.MaximumRetries + 1 {
|
||||
return 0
|
||||
}
|
||||
time.Sleep(time.Duration(backoff) * time.Second)
|
||||
return backoff
|
||||
retries++
|
||||
delay := 1 << uint(retries)
|
||||
if delay > 64 {
|
||||
delay = 64
|
||||
}
|
||||
delayInSeconds := (rand.Float32() + 1.0) * float32(delay) / 2.0
|
||||
|
||||
time.Sleep(time.Duration(delayInSeconds) * time.Second)
|
||||
return retries
|
||||
}
|
||||
|
||||
func (client *B2Client) call(url string, method string, requestHeaders map[string]string, input interface{}) (io.ReadCloser, http.Header, int64, error) {
|
||||
|
||||
switch method {
|
||||
case http.MethodGet:
|
||||
break
|
||||
case http.MethodHead:
|
||||
break
|
||||
case http.MethodPost:
|
||||
break
|
||||
default:
|
||||
return nil, nil, 0, fmt.Errorf("unhandled http request method: " + method)
|
||||
}
|
||||
func (client *B2Client) call(threadIndex int, requestURL string, method string, requestHeaders map[string]string, input interface{}) (
|
||||
io.ReadCloser, http.Header, int64, error) {
|
||||
|
||||
var response *http.Response
|
||||
|
||||
backoff := 0
|
||||
for i := 0; i < 8; i++ {
|
||||
var inputReader *bytes.Reader
|
||||
retries := 0
|
||||
for {
|
||||
var inputReader io.Reader
|
||||
isUpload := false
|
||||
|
||||
switch input.(type) {
|
||||
default:
|
||||
@@ -108,21 +159,43 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
inputReader = bytes.NewReader(jsonInput)
|
||||
case []byte:
|
||||
inputReader = bytes.NewReader(input.([]byte))
|
||||
case int:
|
||||
inputReader = bytes.NewReader([]byte(""))
|
||||
case []byte:
|
||||
isUpload = true
|
||||
inputReader = bytes.NewReader(input.([]byte))
|
||||
case *RateLimitedReader:
|
||||
isUpload = true
|
||||
rateLimitedReader := input.(*RateLimitedReader)
|
||||
rateLimitedReader.Reset()
|
||||
inputReader = rateLimitedReader
|
||||
}
|
||||
|
||||
request, err := http.NewRequest(method, url, inputReader)
|
||||
|
||||
if isUpload {
|
||||
if client.UploadURLs[threadIndex] == "" || client.UploadTokens[threadIndex] == "" {
|
||||
err := client.getUploadURL(threadIndex)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
}
|
||||
requestURL = client.UploadURLs[threadIndex]
|
||||
}
|
||||
|
||||
request, err := http.NewRequest(method, requestURL, inputReader)
|
||||
if err != nil {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
if url == B2AuthorizationURL {
|
||||
if requestURL == B2AuthorizationURL {
|
||||
request.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(client.ApplicationKeyID+":"+client.ApplicationKey)))
|
||||
} else if isUpload {
|
||||
request.ContentLength, _ = strconv.ParseInt(requestHeaders["Content-Length"], 10, 64)
|
||||
request.Header.Set("Authorization", client.UploadTokens[threadIndex])
|
||||
} else {
|
||||
client.Lock.Lock()
|
||||
request.Header.Set("Authorization", client.AuthorizationToken)
|
||||
client.Lock.Unlock()
|
||||
}
|
||||
|
||||
if requestHeaders != nil {
|
||||
@@ -133,7 +206,9 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
|
||||
|
||||
if client.TestMode {
|
||||
r := rand.Float32()
|
||||
if r < 0.5 {
|
||||
if r < 0.5 && isUpload {
|
||||
request.Header.Set("X-Bz-Test-Mode", "fail_some_uploads")
|
||||
} else if r < 0.75 {
|
||||
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
|
||||
} else {
|
||||
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
|
||||
@@ -142,28 +217,51 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
|
||||
|
||||
response, err = client.HTTPClient.Do(request)
|
||||
if err != nil {
|
||||
if url != B2AuthorizationURL {
|
||||
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned an error: %v", url, err)
|
||||
backoff = client.retry(backoff, response)
|
||||
continue
|
||||
|
||||
// Don't retry when the first authorization request fails
|
||||
if requestURL == B2AuthorizationURL && !client.IsAuthorized {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
return nil, nil, 0, err
|
||||
|
||||
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s' returned an error: %v", threadIndex, requestURL, err)
|
||||
|
||||
retries = client.retry(retries, response)
|
||||
if retries <= 0 {
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
|
||||
// Clear the upload url to requrest a new one on retry
|
||||
if isUpload {
|
||||
client.UploadURLs[threadIndex] = ""
|
||||
client.UploadTokens[threadIndex] = ""
|
||||
}
|
||||
continue
|
||||
|
||||
}
|
||||
|
||||
if response.StatusCode < 300 {
|
||||
return response.Body, response.Header, response.ContentLength, nil
|
||||
}
|
||||
|
||||
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s %s' returned status code %d", method, url, response.StatusCode)
|
||||
e := &B2Error{}
|
||||
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
|
||||
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s %s' returned status code %d", threadIndex, method, requestURL, response.StatusCode)
|
||||
} else {
|
||||
LOG_TRACE("BACKBLAZE_CALL", "[%d] URL request '%s %s' returned %d %s", threadIndex, method, requestURL, response.StatusCode, e.Message)
|
||||
}
|
||||
|
||||
io.Copy(ioutil.Discard, response.Body)
|
||||
response.Body.Close()
|
||||
|
||||
if response.StatusCode == 401 {
|
||||
if url == B2AuthorizationURL {
|
||||
if requestURL == B2AuthorizationURL {
|
||||
return nil, nil, 0, fmt.Errorf("Authorization failure")
|
||||
}
|
||||
client.AuthorizeAccount()
|
||||
continue
|
||||
|
||||
// Attempt authorization again. If authorization is actually not done, run the random backoff
|
||||
_, allowed := client.AuthorizeAccount(threadIndex)
|
||||
if allowed {
|
||||
continue
|
||||
}
|
||||
} else if response.StatusCode == 403 {
|
||||
if !client.TestMode {
|
||||
return nil, nil, 0, fmt.Errorf("B2 cap exceeded")
|
||||
@@ -176,32 +274,21 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
|
||||
} else if response.StatusCode == 416 {
|
||||
if http.MethodHead == method {
|
||||
// 416 Requested Range Not Satisfiable
|
||||
return nil, nil, 0, fmt.Errorf("URL request '%s' returned status code %d", url, response.StatusCode)
|
||||
return nil, nil, 0, fmt.Errorf("URL request '%s' returned %d %s", requestURL, response.StatusCode, e.Message)
|
||||
}
|
||||
} else if response.StatusCode == 429 || response.StatusCode == 408 {
|
||||
backoff = client.retry(backoff, response)
|
||||
continue
|
||||
} else if response.StatusCode >= 500 && response.StatusCode <= 599 {
|
||||
backoff = client.retry(backoff, response)
|
||||
continue
|
||||
} else {
|
||||
LOG_INFO("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
|
||||
backoff = client.retry(backoff, response)
|
||||
continue
|
||||
}
|
||||
|
||||
defer response.Body.Close()
|
||||
|
||||
e := &B2Error{}
|
||||
|
||||
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
|
||||
return nil, nil, 0, err
|
||||
retries = client.retry(retries, response)
|
||||
if retries <= 0 {
|
||||
return nil, nil, 0, fmt.Errorf("URL request '%s' returned %d %s", requestURL, response.StatusCode, e.Message)
|
||||
}
|
||||
|
||||
return nil, nil, 0, e
|
||||
if isUpload {
|
||||
client.UploadURLs[threadIndex] = ""
|
||||
client.UploadTokens[threadIndex] = ""
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil, 0, fmt.Errorf("Maximum backoff reached")
|
||||
}
|
||||
|
||||
type B2AuthorizeAccountOutput struct {
|
||||
@@ -211,11 +298,18 @@ type B2AuthorizeAccountOutput struct {
|
||||
DownloadURL string
|
||||
}
|
||||
|
||||
func (client *B2Client) AuthorizeAccount() (err error) {
|
||||
func (client *B2Client) AuthorizeAccount(threadIndex int) (err error, allowed bool) {
|
||||
client.Lock.Lock()
|
||||
defer client.Lock.Unlock()
|
||||
|
||||
readCloser, _, _, err := client.call(B2AuthorizationURL, http.MethodPost, nil, make(map[string]string))
|
||||
// Don't authorize if the previous one was done less than 30 seconds ago
|
||||
if client.LastAuthorizationTime != 0 && client.LastAuthorizationTime > time.Now().Unix() - 30 {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
readCloser, _, _, err := client.call(threadIndex, B2AuthorizationURL, http.MethodPost, nil, make(map[string]string))
|
||||
if err != nil {
|
||||
return err
|
||||
return err, true
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
@@ -223,7 +317,7 @@ func (client *B2Client) AuthorizeAccount() (err error) {
|
||||
output := &B2AuthorizeAccountOutput{}
|
||||
|
||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||
return err
|
||||
return err, true
|
||||
}
|
||||
|
||||
// The account id may be different from the application key id so we're getting the account id from the returned
|
||||
@@ -232,9 +326,15 @@ func (client *B2Client) AuthorizeAccount() (err error) {
|
||||
|
||||
client.AuthorizationToken = output.AuthorizationToken
|
||||
client.APIURL = output.APIURL
|
||||
client.DownloadURL = output.DownloadURL
|
||||
if client.DownloadURL == "" {
|
||||
client.DownloadURL = output.DownloadURL
|
||||
}
|
||||
LOG_INFO("BACKBLAZE_URL", "download URL is: %s", client.DownloadURL)
|
||||
client.IsAuthorized = true
|
||||
|
||||
return nil
|
||||
client.LastAuthorizationTime = time.Now().Unix()
|
||||
|
||||
return nil, true
|
||||
}
|
||||
|
||||
type ListBucketOutput struct {
|
||||
@@ -248,10 +348,11 @@ func (client *B2Client) FindBucket(bucketName string) (err error) {
|
||||
|
||||
input := make(map[string]string)
|
||||
input["accountId"] = client.AccountID
|
||||
input["bucketName"] = bucketName
|
||||
|
||||
url := client.APIURL + "/b2api/v1/b2_list_buckets"
|
||||
url := client.getAPIURL() + "/b2api/v1/b2_list_buckets"
|
||||
|
||||
readCloser, _, _, err := client.call(url, http.MethodPost, nil, input)
|
||||
readCloser, _, _, err := client.call(0, url, http.MethodPost, nil, input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -293,7 +394,7 @@ type B2ListFileNamesOutput struct {
|
||||
NextFileId string
|
||||
}
|
||||
|
||||
func (client *B2Client) ListFileNames(startFileName string, singleFile bool, includeVersions bool) (files []*B2Entry, err error) {
|
||||
func (client *B2Client) ListFileNames(threadIndex int, startFileName string, singleFile bool, includeVersions bool) (files []*B2Entry, err error) {
|
||||
|
||||
maxFileCount := 1000
|
||||
if singleFile {
|
||||
@@ -311,20 +412,21 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
|
||||
input := make(map[string]interface{})
|
||||
input["bucketId"] = client.BucketID
|
||||
input["startFileName"] = startFileName
|
||||
input["startFileName"] = client.StorageDir + startFileName
|
||||
input["maxFileCount"] = maxFileCount
|
||||
input["prefix"] = client.StorageDir
|
||||
|
||||
for {
|
||||
url := client.APIURL + "/b2api/v1/b2_list_file_names"
|
||||
apiURL := client.getAPIURL() + "/b2api/v1/b2_list_file_names"
|
||||
requestHeaders := map[string]string{}
|
||||
requestMethod := http.MethodPost
|
||||
var requestInput interface{}
|
||||
requestInput = input
|
||||
if includeVersions {
|
||||
url = client.APIURL + "/b2api/v1/b2_list_file_versions"
|
||||
apiURL = client.getAPIURL() + "/b2api/v1/b2_list_file_versions"
|
||||
} else if singleFile {
|
||||
// handle a single file with no versions as a special case to download the last byte of the file
|
||||
url = client.DownloadURL + "/file/" + client.BucketName + "/" + startFileName
|
||||
apiURL = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + startFileName)
|
||||
// requesting byte -1 works for empty files where 0-0 fails with a 416 error
|
||||
requestHeaders["Range"] = "bytes=-1"
|
||||
// HEAD request
|
||||
@@ -334,7 +436,7 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
var readCloser io.ReadCloser
|
||||
var responseHeader http.Header
|
||||
var err error
|
||||
readCloser, responseHeader, _, err = client.call(url, requestMethod, requestHeaders, requestInput)
|
||||
readCloser, responseHeader, _, err = client.call(threadIndex, apiURL, requestMethod, requestHeaders, requestInput)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -347,7 +449,7 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
|
||||
if singleFile && !includeVersions {
|
||||
if responseHeader == nil {
|
||||
LOG_DEBUG("BACKBLAZE_LIST", "b2_download_file_by_name did not return headers")
|
||||
LOG_DEBUG("BACKBLAZE_LIST", "%s did not return headers", apiURL)
|
||||
return []*B2Entry{}, nil
|
||||
}
|
||||
requiredHeaders := []string{
|
||||
@@ -361,11 +463,17 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
}
|
||||
}
|
||||
if len(missingKeys) > 0 {
|
||||
return nil, fmt.Errorf("b2_download_file_by_name missing headers: %s", missingKeys)
|
||||
return nil, fmt.Errorf("%s missing headers: %s", apiURL, missingKeys)
|
||||
}
|
||||
// construct the B2Entry from the response headers of the download request
|
||||
fileID := responseHeader.Get("x-bz-file-id")
|
||||
fileName := responseHeader.Get("x-bz-file-name")
|
||||
unescapedFileName, err := url.QueryUnescape(fileName)
|
||||
if err == nil {
|
||||
fileName = unescapedFileName
|
||||
} else {
|
||||
LOG_WARN("BACKBLAZE_UNESCAPE", "Failed to unescape the file name %s", fileName)
|
||||
}
|
||||
fileAction := "upload"
|
||||
// byte range that is returned: "bytes #-#/#
|
||||
rangeString := responseHeader.Get("Content-Range")
|
||||
@@ -378,14 +486,14 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
// this should only execute if the requested file is empty and the range request didn't result in a Content-Range header
|
||||
fileSize, _ = strconv.ParseInt(lengthString, 0, 64)
|
||||
if fileSize != 0 {
|
||||
return nil, fmt.Errorf("b2_download_file_by_name returned non-zero file length")
|
||||
return nil, fmt.Errorf("%s returned non-zero file length", apiURL)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("could not parse b2_download_file_by_name headers")
|
||||
return nil, fmt.Errorf("could not parse headers returned by %s", apiURL)
|
||||
}
|
||||
fileUploadTimestamp, _ := strconv.ParseInt(responseHeader.Get("X-Bz-Upload-Timestamp"), 0, 64)
|
||||
|
||||
return []*B2Entry{{fileID, fileName, fileAction, fileSize, fileUploadTimestamp}}, nil
|
||||
return []*B2Entry{{fileID, fileName[len(client.StorageDir):], fileAction, fileSize, fileUploadTimestamp}}, nil
|
||||
}
|
||||
|
||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||
@@ -394,31 +502,27 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
|
||||
ioutil.ReadAll(readCloser)
|
||||
|
||||
if startFileName == "" {
|
||||
files = append(files, output.Files...)
|
||||
} else {
|
||||
for _, file := range output.Files {
|
||||
if singleFile {
|
||||
if file.FileName == startFileName {
|
||||
files = append(files, file)
|
||||
if !includeVersions {
|
||||
output.NextFileName = ""
|
||||
break
|
||||
}
|
||||
} else {
|
||||
for _, file := range output.Files {
|
||||
file.FileName = file.FileName[len(client.StorageDir):]
|
||||
if singleFile {
|
||||
if file.FileName == startFileName {
|
||||
files = append(files, file)
|
||||
if !includeVersions {
|
||||
output.NextFileName = ""
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if strings.HasPrefix(file.FileName, startFileName) {
|
||||
files = append(files, file)
|
||||
} else {
|
||||
output.NextFileName = ""
|
||||
break
|
||||
}
|
||||
output.NextFileName = ""
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if strings.HasPrefix(file.FileName, startFileName) {
|
||||
files = append(files, file)
|
||||
} else {
|
||||
output.NextFileName = ""
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(output.NextFileName) == 0 {
|
||||
@@ -434,14 +538,14 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
||||
return files, nil
|
||||
}
|
||||
|
||||
func (client *B2Client) DeleteFile(fileName string, fileID string) (err error) {
|
||||
func (client *B2Client) DeleteFile(threadIndex int, fileName string, fileID string) (err error) {
|
||||
|
||||
input := make(map[string]string)
|
||||
input["fileName"] = fileName
|
||||
input["fileName"] = client.StorageDir + fileName
|
||||
input["fileId"] = fileID
|
||||
|
||||
url := client.APIURL + "/b2api/v1/b2_delete_file_version"
|
||||
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
|
||||
url := client.getAPIURL() + "/b2api/v1/b2_delete_file_version"
|
||||
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -454,14 +558,14 @@ type B2HideFileOutput struct {
|
||||
FileID string
|
||||
}
|
||||
|
||||
func (client *B2Client) HideFile(fileName string) (fileID string, err error) {
|
||||
func (client *B2Client) HideFile(threadIndex int, fileName string) (fileID string, err error) {
|
||||
|
||||
input := make(map[string]string)
|
||||
input["bucketId"] = client.BucketID
|
||||
input["fileName"] = fileName
|
||||
input["fileName"] = client.StorageDir + fileName
|
||||
|
||||
url := client.APIURL + "/b2api/v1/b2_hide_file"
|
||||
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
|
||||
url := client.getAPIURL() + "/b2api/v1/b2_hide_file"
|
||||
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -478,11 +582,11 @@ func (client *B2Client) HideFile(fileName string) (fileID string, err error) {
|
||||
return output.FileID, nil
|
||||
}
|
||||
|
||||
func (client *B2Client) DownloadFile(filePath string) (io.ReadCloser, int64, error) {
|
||||
func (client *B2Client) DownloadFile(threadIndex int, filePath string) (io.ReadCloser, int64, error) {
|
||||
|
||||
url := client.DownloadURL + "/file/" + client.BucketName + "/" + filePath
|
||||
url := client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + filePath)
|
||||
|
||||
readCloser, _, len, err := client.call(url, http.MethodGet, make(map[string]string), 0)
|
||||
readCloser, _, len, err := client.call(threadIndex, url, http.MethodGet, make(map[string]string), 0)
|
||||
return readCloser, len, err
|
||||
}
|
||||
|
||||
@@ -492,12 +596,12 @@ type B2GetUploadArgumentOutput struct {
|
||||
AuthorizationToken string
|
||||
}
|
||||
|
||||
func (client *B2Client) getUploadURL() error {
|
||||
func (client *B2Client) getUploadURL(threadIndex int) error {
|
||||
input := make(map[string]string)
|
||||
input["bucketId"] = client.BucketID
|
||||
|
||||
url := client.APIURL + "/b2api/v1/b2_get_upload_url"
|
||||
readCloser, _, _, err := client.call(url, http.MethodPost, make(map[string]string), input)
|
||||
url := client.getAPIURL() + "/b2api/v1/b2_get_upload_url"
|
||||
readCloser, _, _, err := client.call(threadIndex, url, http.MethodPost, make(map[string]string), input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -510,96 +614,29 @@ func (client *B2Client) getUploadURL() error {
|
||||
return err
|
||||
}
|
||||
|
||||
client.UploadURL = output.UploadURL
|
||||
client.UploadToken = output.AuthorizationToken
|
||||
client.UploadURLs[threadIndex] = output.UploadURL
|
||||
client.UploadTokens[threadIndex] = output.AuthorizationToken
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit int) (err error) {
|
||||
func (client *B2Client) UploadFile(threadIndex int, filePath string, content []byte, rateLimit int) (err error) {
|
||||
|
||||
hasher := sha1.New()
|
||||
hasher.Write(content)
|
||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
headers := make(map[string]string)
|
||||
headers["X-Bz-File-Name"] = filePath
|
||||
headers["X-Bz-File-Name"] = B2Escape(client.StorageDir + filePath)
|
||||
headers["Content-Length"] = fmt.Sprintf("%d", len(content))
|
||||
headers["Content-Type"] = "application/octet-stream"
|
||||
headers["X-Bz-Content-Sha1"] = hash
|
||||
|
||||
var response *http.Response
|
||||
|
||||
backoff := 0
|
||||
for i := 0; i < 8; i++ {
|
||||
|
||||
if client.UploadURL == "" || client.UploadToken == "" {
|
||||
err = client.getUploadURL()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
request, err := http.NewRequest("POST", client.UploadURL, CreateRateLimitedReader(content, rateLimit))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
request.ContentLength = int64(len(content))
|
||||
|
||||
request.Header.Set("Authorization", client.UploadToken)
|
||||
request.Header.Set("X-Bz-File-Name", filePath)
|
||||
request.Header.Set("Content-Type", "application/octet-stream")
|
||||
request.Header.Set("X-Bz-Content-Sha1", hash)
|
||||
|
||||
for key, value := range headers {
|
||||
request.Header.Set(key, value)
|
||||
}
|
||||
|
||||
if client.TestMode {
|
||||
r := rand.Float32()
|
||||
if r < 0.8 {
|
||||
request.Header.Set("X-Bz-Test-Mode", "fail_some_uploads")
|
||||
} else if r < 0.9 {
|
||||
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
|
||||
} else {
|
||||
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
|
||||
}
|
||||
}
|
||||
|
||||
response, err = client.HTTPClient.Do(request)
|
||||
if err != nil {
|
||||
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned an error: %v", client.UploadURL, err)
|
||||
backoff = client.retry(backoff, response)
|
||||
client.UploadURL = ""
|
||||
client.UploadToken = ""
|
||||
continue
|
||||
}
|
||||
|
||||
io.Copy(ioutil.Discard, response.Body)
|
||||
response.Body.Close()
|
||||
|
||||
if response.StatusCode < 300 {
|
||||
return nil
|
||||
}
|
||||
|
||||
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
|
||||
|
||||
if response.StatusCode == 401 {
|
||||
LOG_INFO("BACKBLAZE_UPLOAD", "Re-authorization required")
|
||||
client.UploadURL = ""
|
||||
client.UploadToken = ""
|
||||
continue
|
||||
} else if response.StatusCode == 403 {
|
||||
if !client.TestMode {
|
||||
return fmt.Errorf("B2 cap exceeded")
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
LOG_INFO("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
|
||||
backoff = client.retry(backoff, response)
|
||||
client.UploadURL = ""
|
||||
client.UploadToken = ""
|
||||
}
|
||||
readCloser, _, _, err := client.call(threadIndex, "", http.MethodPost, headers, CreateRateLimitedReader(content, rateLimit))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fmt.Errorf("Maximum backoff reached")
|
||||
readCloser.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ func createB2ClientForTest(t *testing.T) (*B2Client, string) {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
return NewB2Client(b2["account"], b2["key"]), b2["bucket"]
|
||||
return NewB2Client(b2["account"], b2["key"], "", b2["directory"], 1), b2["bucket"]
|
||||
|
||||
}
|
||||
|
||||
@@ -50,7 +50,7 @@ func TestB2Client(t *testing.T) {
|
||||
|
||||
b2Client.TestMode = true
|
||||
|
||||
err := b2Client.AuthorizeAccount()
|
||||
err, _ := b2Client.AuthorizeAccount(0)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to authorize the b2 account: %v", err)
|
||||
return
|
||||
@@ -64,14 +64,14 @@ func TestB2Client(t *testing.T) {
|
||||
|
||||
testDirectory := "b2client_test/"
|
||||
|
||||
files, err := b2Client.ListFileNames(testDirectory, false, false)
|
||||
files, err := b2Client.ListFileNames(0, testDirectory, false, false)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to list files: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
err = b2Client.DeleteFile(file.FileName, file.FileID)
|
||||
err = b2Client.DeleteFile(0, file.FileName, file.FileID)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
|
||||
}
|
||||
@@ -90,14 +90,14 @@ func TestB2Client(t *testing.T) {
|
||||
hash := sha256.Sum256(content)
|
||||
name := hex.EncodeToString(hash[:])
|
||||
|
||||
err = b2Client.UploadFile(testDirectory+name, content, 100)
|
||||
err = b2Client.UploadFile(0, testDirectory+name, content, 100)
|
||||
if err != nil {
|
||||
t.Errorf("Error uploading file '%s': %v", name, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
files, err = b2Client.ListFileNames(testDirectory, false, false)
|
||||
files, err = b2Client.ListFileNames(0, testDirectory, false, false)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to list files: %v", err)
|
||||
return
|
||||
@@ -105,7 +105,7 @@ func TestB2Client(t *testing.T) {
|
||||
|
||||
for _, file := range files {
|
||||
|
||||
readCloser, _, err := b2Client.DownloadFile(file.FileName)
|
||||
readCloser, _, err := b2Client.DownloadFile(0, file.FileName)
|
||||
if err != nil {
|
||||
t.Errorf("Error downloading file '%s': %v", file.FileName, err)
|
||||
return
|
||||
@@ -125,7 +125,7 @@ func TestB2Client(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
err = b2Client.DeleteFile(file.FileName, file.FileID)
|
||||
err = b2Client.DeleteFile(0, file.FileName, file.FileID)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
|
||||
}
|
||||
|
||||
@@ -11,32 +11,26 @@ import (
|
||||
type B2Storage struct {
|
||||
StorageBase
|
||||
|
||||
clients []*B2Client
|
||||
client *B2Client
|
||||
}
|
||||
|
||||
// CreateB2Storage creates a B2 storage object.
|
||||
func CreateB2Storage(accountID string, applicationKey string, bucket string, threads int) (storage *B2Storage, err error) {
|
||||
func CreateB2Storage(accountID string, applicationKey string, downloadURL string, bucket string, storageDir string, threads int) (storage *B2Storage, err error) {
|
||||
|
||||
var clients []*B2Client
|
||||
client := NewB2Client(accountID, applicationKey, downloadURL, storageDir, threads)
|
||||
|
||||
for i := 0; i < threads; i++ {
|
||||
client := NewB2Client(accountID, applicationKey)
|
||||
err, _ = client.AuthorizeAccount(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = client.AuthorizeAccount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = client.FindBucket(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clients = append(clients, client)
|
||||
err = client.FindBucket(bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storage = &B2Storage{
|
||||
clients: clients,
|
||||
client: client,
|
||||
}
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
@@ -56,7 +50,7 @@ func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string
|
||||
includeVersions = true
|
||||
}
|
||||
|
||||
entries, err := storage.clients[threadIndex].ListFileNames(dir, false, includeVersions)
|
||||
entries, err := storage.client.ListFileNames(threadIndex, dir, false, includeVersions)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -102,7 +96,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
|
||||
|
||||
if strings.HasSuffix(filePath, ".fsl") {
|
||||
filePath = filePath[:len(filePath)-len(".fsl")]
|
||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
|
||||
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -116,7 +110,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
|
||||
|
||||
toBeDeleted = true
|
||||
|
||||
err = storage.clients[threadIndex].DeleteFile(filePath, entry.FileID)
|
||||
err = storage.client.DeleteFile(threadIndex, filePath, entry.FileID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -125,7 +119,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
|
||||
return nil
|
||||
|
||||
} else {
|
||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, false)
|
||||
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -133,7 +127,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
|
||||
if len(entries) == 0 {
|
||||
return nil
|
||||
}
|
||||
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
|
||||
return storage.client.DeleteFile(threadIndex, filePath, entries[0].FileID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,10 +154,10 @@ func (storage *B2Storage) MoveFile(threadIndex int, from string, to string) (err
|
||||
}
|
||||
|
||||
if filePath == from {
|
||||
_, err = storage.clients[threadIndex].HideFile(from)
|
||||
_, err = storage.client.HideFile(threadIndex, from)
|
||||
return err
|
||||
} else {
|
||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
|
||||
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -171,7 +165,7 @@ func (storage *B2Storage) MoveFile(threadIndex int, from string, to string) (err
|
||||
return nil
|
||||
}
|
||||
|
||||
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
|
||||
return storage.client.DeleteFile(threadIndex, filePath, entries[0].FileID)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -188,7 +182,7 @@ func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist b
|
||||
filePath = filePath[:len(filePath)-len(".fsl")]
|
||||
}
|
||||
|
||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, isFossil)
|
||||
entries, err := storage.client.ListFileNames(threadIndex, filePath, true, isFossil)
|
||||
if err != nil {
|
||||
return false, false, 0, err
|
||||
}
|
||||
@@ -210,22 +204,20 @@ func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist b
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
|
||||
filePath = strings.Replace(filePath, " ", "%20", -1)
|
||||
readCloser, _, err := storage.clients[threadIndex].DownloadFile(filePath)
|
||||
readCloser, _, err := storage.client.DownloadFile(threadIndex, filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer readCloser.Close()
|
||||
|
||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.clients))
|
||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.client.Threads)
|
||||
return err
|
||||
}
|
||||
|
||||
// UploadFile writes 'content' to the file at 'filePath'.
|
||||
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||
filePath = strings.Replace(filePath, " ", "%20", -1)
|
||||
return storage.clients[threadIndex].UploadFile(filePath, content, storage.UploadRateLimit/len(storage.clients))
|
||||
return storage.client.UploadFile(threadIndex, filePath, content, storage.UploadRateLimit/storage.client.Threads)
|
||||
}
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
@@ -243,7 +235,5 @@ func (storage *B2Storage) IsFastListing() bool { return true }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *B2Storage) EnableTestMode() {
|
||||
for _, client := range storage.clients {
|
||||
client.TestMode = true
|
||||
}
|
||||
storage.client.TestMode = true
|
||||
}
|
||||
|
||||
@@ -35,6 +35,7 @@ type BackupManager struct {
|
||||
config *Config // contains a number of options
|
||||
|
||||
nobackupFile string // don't backup directory when this file name is found
|
||||
filtersFile string // the path to the filters file
|
||||
}
|
||||
|
||||
func (manager *BackupManager) SetDryRun(dryRun bool) {
|
||||
@@ -44,7 +45,7 @@ func (manager *BackupManager) SetDryRun(dryRun bool) {
|
||||
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
|
||||
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
|
||||
// master key which can be nil if encryption is not enabled.
|
||||
func CreateBackupManager(snapshotID string, storage Storage, top string, password string, nobackupFile string) *BackupManager {
|
||||
func CreateBackupManager(snapshotID string, storage Storage, top string, password string, nobackupFile string, filtersFile string) *BackupManager {
|
||||
|
||||
config, _, err := DownloadConfig(storage, password)
|
||||
if err != nil {
|
||||
@@ -67,6 +68,7 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
|
||||
config: config,
|
||||
|
||||
nobackupFile: nobackupFile,
|
||||
filtersFile: filtersFile,
|
||||
}
|
||||
|
||||
if IsDebugging() {
|
||||
@@ -76,6 +78,11 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
|
||||
return backupManager
|
||||
}
|
||||
|
||||
// loadRSAPrivateKey loads the specifed private key file for decrypting file chunks
|
||||
func (manager *BackupManager) LoadRSAPrivateKey(keyFile string, passphrase string) {
|
||||
manager.config.loadRSAPrivateKey(keyFile, passphrase)
|
||||
}
|
||||
|
||||
// SetupSnapshotCache creates the snapshot cache, which is merely a local storage under the default .duplicacy
|
||||
// directory
|
||||
func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
|
||||
@@ -103,6 +110,7 @@ func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
// setEntryContent sets the 4 content pointers for each entry in 'entries'. 'offset' indicates the value
|
||||
// to be added to the StartChunk and EndChunk points, used when intending to append 'entries' to the
|
||||
// original unchanged entry list.
|
||||
@@ -176,6 +184,10 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
|
||||
LOG_DEBUG("BACKUP_PARAMETERS", "top: %s, quick: %t, tag: %s", top, quickMode, tag)
|
||||
|
||||
if manager.config.rsaPublicKey != nil && len(manager.config.FileKey) > 0 {
|
||||
LOG_INFO("BACKUP_KEY", "RSA encryption is enabled" )
|
||||
}
|
||||
|
||||
remoteSnapshot := manager.SnapshotManager.downloadLatestSnapshot(manager.snapshotID)
|
||||
if remoteSnapshot == nil {
|
||||
remoteSnapshot = CreateEmptySnapshot(manager.snapshotID)
|
||||
@@ -188,7 +200,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
defer DeleteShadowCopy()
|
||||
|
||||
LOG_INFO("BACKUP_INDEXING", "Indexing %s", top)
|
||||
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop, manager.nobackupFile)
|
||||
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop,
|
||||
manager.nobackupFile, manager.filtersFile)
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
|
||||
return false
|
||||
@@ -760,7 +773,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision)
|
||||
manager.SnapshotManager.DownloadSnapshotContents(remoteSnapshot, patterns, true)
|
||||
|
||||
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile)
|
||||
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile,
|
||||
manager.filtersFile)
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the repository: %v", err)
|
||||
return false
|
||||
@@ -807,6 +821,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
if compare == 0 {
|
||||
i++
|
||||
if quickMode && local.IsSameAs(entry) {
|
||||
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", local.Path)
|
||||
skipped = true
|
||||
}
|
||||
}
|
||||
@@ -898,7 +913,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
err = os.MkdirAll(path.Dir(fullPath), 0744)
|
||||
parent, _ := SplitDir(fullPath)
|
||||
err = os.MkdirAll(parent, 0744)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_MKDIR", "Failed to create directory: %v", err)
|
||||
}
|
||||
@@ -1610,6 +1626,9 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
return true
|
||||
}
|
||||
|
||||
// These two maps store hashes of chunks in the source and destination storages, respectively. Note that
|
||||
// the value of 'chunks' is used to indicated if the chunk is a snapshot chunk, while the value of 'otherChunks'
|
||||
// is not used.
|
||||
chunks := make(map[string]bool)
|
||||
otherChunks := make(map[string]bool)
|
||||
|
||||
@@ -1622,21 +1641,15 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
LOG_TRACE("SNAPSHOT_COPY", "Copying snapshot %s at revision %d", snapshot.ID, snapshot.Revision)
|
||||
|
||||
for _, chunkHash := range snapshot.FileSequence {
|
||||
if _, found := chunks[chunkHash]; !found {
|
||||
chunks[chunkHash] = true
|
||||
}
|
||||
chunks[chunkHash] = true // The chunk is a snapshot chunk
|
||||
}
|
||||
|
||||
for _, chunkHash := range snapshot.ChunkSequence {
|
||||
if _, found := chunks[chunkHash]; !found {
|
||||
chunks[chunkHash] = true
|
||||
}
|
||||
chunks[chunkHash] = true // The chunk is a snapshot chunk
|
||||
}
|
||||
|
||||
for _, chunkHash := range snapshot.LengthSequence {
|
||||
if _, found := chunks[chunkHash]; !found {
|
||||
chunks[chunkHash] = true
|
||||
}
|
||||
chunks[chunkHash] = true // The chunk is a snapshot chunk
|
||||
}
|
||||
|
||||
description := manager.SnapshotManager.DownloadSequence(snapshot.ChunkSequence)
|
||||
@@ -1649,9 +1662,11 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
|
||||
for _, chunkHash := range snapshot.ChunkHashes {
|
||||
if _, found := chunks[chunkHash]; !found {
|
||||
chunks[chunkHash] = true
|
||||
chunks[chunkHash] = false // The chunk is a file chunk
|
||||
}
|
||||
}
|
||||
|
||||
snapshot.ChunkHashes = nil
|
||||
}
|
||||
|
||||
otherChunkFiles, otherChunkSizes := otherManager.SnapshotManager.ListAllFiles(otherManager.storage, "chunks/")
|
||||
@@ -1703,7 +1718,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
totalSkipped := 0
|
||||
chunkIndex := 0
|
||||
|
||||
for chunkHash := range chunks {
|
||||
for chunkHash, isSnapshot := range chunks {
|
||||
chunkIndex++
|
||||
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
|
||||
newChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
||||
@@ -1714,6 +1729,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
||||
newChunk := otherManager.config.GetChunk()
|
||||
newChunk.Reset(true)
|
||||
newChunk.Write(chunk.GetBytes())
|
||||
newChunk.isSnapshot = isSnapshot
|
||||
chunkUploader.StartChunk(newChunk, chunkIndex)
|
||||
totalCopied++
|
||||
} else {
|
||||
|
||||
@@ -227,11 +227,11 @@ func TestBackupManager(t *testing.T) {
|
||||
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
if testFixedChunkSize {
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false) {
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "") {
|
||||
t.Errorf("Failed to initialize the storage")
|
||||
}
|
||||
} else {
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false) {
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false, "") {
|
||||
t.Errorf("Failed to initialize the storage")
|
||||
}
|
||||
}
|
||||
@@ -239,7 +239,7 @@ func TestBackupManager(t *testing.T) {
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
backupManager := CreateBackupManager("host1", storage, testDir, password, "")
|
||||
backupManager := CreateBackupManager("host1", storage, testDir, password, "", "")
|
||||
backupManager.SetupSnapshotCache("default")
|
||||
|
||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||
|
||||
@@ -41,7 +41,7 @@ func benchmarkSplit(reader *bytes.Reader, fileSize int64, chunkSize int, compres
|
||||
if encryption {
|
||||
key = "0123456789abcdef0123456789abcdef"
|
||||
}
|
||||
err := chunk.Encrypt([]byte(key), "")
|
||||
err := chunk.Encrypt([]byte(key), "", false)
|
||||
if err != nil {
|
||||
LOG_ERROR("BENCHMARK_ENCRYPT", "Failed to encrypt the chunk: %v", err)
|
||||
}
|
||||
|
||||
@@ -8,11 +8,13 @@ import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"crypto/aes"
|
||||
"crypto/rsa"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
@@ -60,11 +62,17 @@ type Chunk struct {
|
||||
|
||||
config *Config // Every chunk is associated with a Config object. Which hashing algorithm to use is determined
|
||||
// by the config
|
||||
|
||||
isSnapshot bool // Indicates if the chunk is a snapshot chunk (instead of a file chunk). This is only used by RSA
|
||||
// encryption, where a snapshot chunk is not encrypted by RSA
|
||||
}
|
||||
|
||||
// Magic word to identify a duplicacy format encrypted file, plus a version number.
|
||||
var ENCRYPTION_HEADER = "duplicacy\000"
|
||||
|
||||
// RSA encrypted chunks start with "duplicacy\002"
|
||||
var ENCRYPTION_VERSION_RSA byte = 2
|
||||
|
||||
// CreateChunk creates a new chunk.
|
||||
func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
|
||||
|
||||
@@ -113,6 +121,7 @@ func (chunk *Chunk) Reset(hashNeeded bool) {
|
||||
chunk.hash = nil
|
||||
chunk.id = ""
|
||||
chunk.size = 0
|
||||
chunk.isSnapshot = false
|
||||
}
|
||||
|
||||
// Write implements the Writer interface.
|
||||
@@ -170,7 +179,7 @@ func (chunk *Chunk) VerifyID() {
|
||||
|
||||
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
|
||||
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
|
||||
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err error) {
|
||||
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapshot bool) (err error) {
|
||||
|
||||
var aesBlock cipher.Block
|
||||
var gcm cipher.AEAD
|
||||
@@ -186,8 +195,17 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
if len(encryptionKey) > 0 {
|
||||
|
||||
key := encryptionKey
|
||||
|
||||
if len(derivationKey) > 0 {
|
||||
usingRSA := false
|
||||
// Enable RSA encryption only when the chunk is not a snapshot chunk
|
||||
if chunk.config.rsaPublicKey != nil && !isSnapshot && !chunk.isSnapshot {
|
||||
randomKey := make([]byte, 32)
|
||||
_, err := rand.Read(randomKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key = randomKey
|
||||
usingRSA = true
|
||||
} else if len(derivationKey) > 0 {
|
||||
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
|
||||
hasher.Write(encryptionKey)
|
||||
key = hasher.Sum(nil)
|
||||
@@ -204,7 +222,21 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
}
|
||||
|
||||
// Start with the magic number and the version number.
|
||||
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER))
|
||||
if usingRSA {
|
||||
// RSA encryption starts "duplicacy\002"
|
||||
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER)[:len(ENCRYPTION_HEADER) - 1])
|
||||
encryptedBuffer.Write([]byte{ENCRYPTION_VERSION_RSA})
|
||||
|
||||
// Then the encrypted key
|
||||
encryptedKey, err := rsa.EncryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPublicKey, key, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
binary.Write(encryptedBuffer, binary.LittleEndian, uint16(len(encryptedKey)))
|
||||
encryptedBuffer.Write(encryptedKey)
|
||||
} else {
|
||||
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER))
|
||||
}
|
||||
|
||||
// Followed by the nonce
|
||||
nonce = make([]byte, gcm.NonceSize())
|
||||
@@ -214,7 +246,6 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
}
|
||||
encryptedBuffer.Write(nonce)
|
||||
offset = encryptedBuffer.Len()
|
||||
|
||||
}
|
||||
|
||||
// offset is either 0 or the length of header + nonce
|
||||
@@ -291,6 +322,7 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
}()
|
||||
|
||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||
headerLength := len(ENCRYPTION_HEADER)
|
||||
|
||||
if len(encryptionKey) > 0 {
|
||||
|
||||
@@ -308,6 +340,41 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
key = hasher.Sum(nil)
|
||||
}
|
||||
|
||||
if len(encryptedBuffer.Bytes()) < headerLength + 12 {
|
||||
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||
}
|
||||
|
||||
if string(encryptedBuffer.Bytes()[:headerLength-1]) != ENCRYPTION_HEADER[:headerLength-1] {
|
||||
return fmt.Errorf("The storage doesn't seem to be encrypted")
|
||||
}
|
||||
|
||||
encryptionVersion := encryptedBuffer.Bytes()[headerLength-1]
|
||||
if encryptionVersion != 0 && encryptionVersion != ENCRYPTION_VERSION_RSA {
|
||||
return fmt.Errorf("Unsupported encryption version %d", encryptionVersion)
|
||||
}
|
||||
|
||||
if encryptionVersion == ENCRYPTION_VERSION_RSA {
|
||||
if chunk.config.rsaPrivateKey == nil {
|
||||
LOG_ERROR("CHUNK_DECRYPT", "An RSA private key is required to decrypt the chunk")
|
||||
return fmt.Errorf("An RSA private key is required to decrypt the chunk")
|
||||
}
|
||||
|
||||
encryptedKeyLength := binary.LittleEndian.Uint16(encryptedBuffer.Bytes()[headerLength:headerLength+2])
|
||||
|
||||
if len(encryptedBuffer.Bytes()) < headerLength + 14 + int(encryptedKeyLength) {
|
||||
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||
}
|
||||
|
||||
encryptedKey := encryptedBuffer.Bytes()[headerLength + 2:headerLength + 2 + int(encryptedKeyLength)]
|
||||
headerLength += 2 + int(encryptedKeyLength)
|
||||
|
||||
decryptedKey, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPrivateKey, encryptedKey, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key = decryptedKey
|
||||
}
|
||||
|
||||
aesBlock, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -318,21 +385,7 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
return err
|
||||
}
|
||||
|
||||
headerLength := len(ENCRYPTION_HEADER)
|
||||
offset = headerLength + gcm.NonceSize()
|
||||
|
||||
if len(encryptedBuffer.Bytes()) < offset {
|
||||
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||
}
|
||||
|
||||
if string(encryptedBuffer.Bytes()[:headerLength-1]) != ENCRYPTION_HEADER[:headerLength-1] {
|
||||
return fmt.Errorf("The storage doesn't seem to be encrypted")
|
||||
}
|
||||
|
||||
if encryptedBuffer.Bytes()[headerLength-1] != 0 {
|
||||
return fmt.Errorf("Unsupported encryption version %d", encryptedBuffer.Bytes()[headerLength-1])
|
||||
}
|
||||
|
||||
nonce := encryptedBuffer.Bytes()[headerLength:offset]
|
||||
|
||||
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
|
||||
|
||||
@@ -7,6 +7,7 @@ package duplicacy
|
||||
import (
|
||||
"bytes"
|
||||
crypto_rand "crypto/rand"
|
||||
"crypto/rsa"
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
@@ -22,6 +23,15 @@ func TestChunk(t *testing.T) {
|
||||
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
||||
maxSize := 1000000
|
||||
|
||||
if testRSAEncryption {
|
||||
privateKey, err := rsa.GenerateKey(crypto_rand.Reader, 2048)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to generate a random private key: %v", err)
|
||||
}
|
||||
config.rsaPrivateKey = privateKey
|
||||
config.rsaPublicKey = privateKey.Public().(*rsa.PublicKey)
|
||||
}
|
||||
|
||||
remainderLength := -1
|
||||
|
||||
for i := 0; i < 500; i++ {
|
||||
@@ -37,7 +47,7 @@ func TestChunk(t *testing.T) {
|
||||
hash := chunk.GetHash()
|
||||
id := chunk.GetID()
|
||||
|
||||
err := chunk.Encrypt(key, "")
|
||||
err := chunk.Encrypt(key, "", false)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to encrypt the data: %v", err)
|
||||
continue
|
||||
|
||||
@@ -197,6 +197,16 @@ func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
|
||||
downloader.lastChunkIndex = chunkIndex
|
||||
}
|
||||
|
||||
// Return the chunk last downloaded and its hash
|
||||
func (downloader *ChunkDownloader) GetLastDownloadedChunk() (chunk *Chunk, chunkHash string) {
|
||||
if downloader.lastChunkIndex >= len(downloader.taskList) {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
task := downloader.taskList[downloader.lastChunkIndex]
|
||||
return task.chunk, task.chunkHash
|
||||
}
|
||||
|
||||
// WaitForChunk waits until the specified chunk is ready
|
||||
func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
|
||||
|
||||
@@ -220,7 +230,7 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
|
||||
}
|
||||
task := &downloader.taskList[i]
|
||||
if !task.needed {
|
||||
continue
|
||||
break
|
||||
}
|
||||
|
||||
if !task.isDownloading {
|
||||
|
||||
@@ -128,7 +128,7 @@ func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) boo
|
||||
}
|
||||
|
||||
// Encrypt the chunk only after we know that it must be uploaded.
|
||||
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash())
|
||||
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash(), uploader.snapshotCache != nil)
|
||||
if err != nil {
|
||||
LOG_ERROR("UPLOAD_CHUNK", "Failed to encrypt the chunk %s: %v", chunkID, err)
|
||||
return false
|
||||
|
||||
@@ -9,15 +9,20 @@ import (
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"hash"
|
||||
"os"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"sync/atomic"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
|
||||
blake2 "github.com/minio/blake2b-simd"
|
||||
)
|
||||
@@ -65,6 +70,10 @@ type Config struct {
|
||||
// for encrypting a non-chunk file
|
||||
FileKey []byte `json:"-"`
|
||||
|
||||
// for RSA encryption
|
||||
rsaPrivateKey *rsa.PrivateKey
|
||||
rsaPublicKey *rsa.PublicKey
|
||||
|
||||
chunkPool chan *Chunk
|
||||
numberOfChunks int32
|
||||
dryRun bool
|
||||
@@ -80,10 +89,15 @@ type jsonableConfig struct {
|
||||
IDKey string `json:"id-key"`
|
||||
ChunkKey string `json:"chunk-key"`
|
||||
FileKey string `json:"file-key"`
|
||||
RSAPublicKey string `json:"rsa-public-key"`
|
||||
}
|
||||
|
||||
func (config *Config) MarshalJSON() ([]byte, error) {
|
||||
|
||||
publicKey := []byte {}
|
||||
if config.rsaPublicKey != nil {
|
||||
publicKey, _ = x509.MarshalPKIXPublicKey(config.rsaPublicKey)
|
||||
}
|
||||
return json.Marshal(&jsonableConfig{
|
||||
aliasedConfig: (*aliasedConfig)(config),
|
||||
ChunkSeed: hex.EncodeToString(config.ChunkSeed),
|
||||
@@ -91,6 +105,7 @@ func (config *Config) MarshalJSON() ([]byte, error) {
|
||||
IDKey: hex.EncodeToString(config.IDKey),
|
||||
ChunkKey: hex.EncodeToString(config.ChunkKey),
|
||||
FileKey: hex.EncodeToString(config.FileKey),
|
||||
RSAPublicKey: hex.EncodeToString(publicKey),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -120,6 +135,19 @@ func (config *Config) UnmarshalJSON(description []byte) (err error) {
|
||||
return fmt.Errorf("Invalid representation of the file key in the config")
|
||||
}
|
||||
|
||||
if publicKey, err := hex.DecodeString(aliased.RSAPublicKey); err != nil {
|
||||
return fmt.Errorf("Invalid hex encoding of the RSA public key in the config")
|
||||
} else if len(publicKey) > 0 {
|
||||
parsedKey, err := x509.ParsePKIXPublicKey(publicKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid RSA public key in the config: %v", err)
|
||||
}
|
||||
config.rsaPublicKey = parsedKey.(*rsa.PublicKey)
|
||||
if config.rsaPublicKey == nil {
|
||||
return fmt.Errorf("Unsupported public key type %s in the config", reflect.TypeOf(parsedKey))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -140,6 +168,29 @@ func (config *Config) Print() {
|
||||
LOG_INFO("CONFIG_INFO", "Maximum chunk size: %d", config.MaximumChunkSize)
|
||||
LOG_INFO("CONFIG_INFO", "Minimum chunk size: %d", config.MinimumChunkSize)
|
||||
LOG_INFO("CONFIG_INFO", "Chunk seed: %x", config.ChunkSeed)
|
||||
|
||||
LOG_TRACE("CONFIG_INFO", "Hash key: %x", config.HashKey)
|
||||
LOG_TRACE("CONFIG_INFO", "ID key: %x", config.IDKey)
|
||||
|
||||
if len(config.ChunkKey) >= 0 {
|
||||
LOG_TRACE("CONFIG_INFO", "File chunks are encrypted")
|
||||
}
|
||||
|
||||
if len(config.FileKey) >= 0 {
|
||||
LOG_TRACE("CONFIG_INFO", "Metadata chunks are encrypted")
|
||||
}
|
||||
|
||||
if config.rsaPublicKey != nil {
|
||||
pkisPublicKey, _ := x509.MarshalPKIXPublicKey(config.rsaPublicKey)
|
||||
|
||||
publicKey := pem.EncodeToMemory(&pem.Block{
|
||||
Type: "PUBLIC KEY",
|
||||
Bytes: pkisPublicKey,
|
||||
})
|
||||
|
||||
LOG_TRACE("CONFIG_INFO", "RSA public key: %s", publicKey)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
|
||||
@@ -430,7 +481,7 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i
|
||||
|
||||
if len(password) > 0 {
|
||||
// Encrypt the config file with masterKey. If masterKey is nil then no encryption is performed.
|
||||
err = chunk.Encrypt(masterKey, "")
|
||||
err = chunk.Encrypt(masterKey, "", true)
|
||||
if err != nil {
|
||||
LOG_ERROR("CONFIG_CREATE", "Failed to create the config file: %v", err)
|
||||
return false
|
||||
@@ -477,7 +528,7 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i
|
||||
// it simply creates a file named 'config' that stores various parameters as well as a set of keys if encryption
|
||||
// is enabled.
|
||||
func ConfigStorage(storage Storage, iterations int, compressionLevel int, averageChunkSize int, maximumChunkSize int,
|
||||
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool) bool {
|
||||
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool, keyFile string) bool {
|
||||
|
||||
exist, _, _, err := storage.GetFileInfo(0, "config")
|
||||
if err != nil {
|
||||
@@ -496,5 +547,113 @@ func ConfigStorage(storage Storage, iterations int, compressionLevel int, averag
|
||||
return false
|
||||
}
|
||||
|
||||
if keyFile != "" {
|
||||
config.loadRSAPublicKey(keyFile)
|
||||
}
|
||||
return UploadConfig(storage, config, password, iterations)
|
||||
}
|
||||
|
||||
func (config *Config) loadRSAPublicKey(keyFile string) {
|
||||
encodedKey, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
LOG_ERROR("BACKUP_KEY", "Failed to read the public key file: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
decodedKey, _ := pem.Decode(encodedKey)
|
||||
if decodedKey == nil {
|
||||
LOG_ERROR("RSA_PUBLIC", "unrecognized public key in %s", keyFile)
|
||||
return
|
||||
}
|
||||
if decodedKey.Type != "PUBLIC KEY" {
|
||||
LOG_ERROR("RSA_PUBLIC", "Unsupported public key type %s in %s", decodedKey.Type, keyFile)
|
||||
return
|
||||
}
|
||||
|
||||
parsedKey, err := x509.ParsePKIXPublicKey(decodedKey.Bytes)
|
||||
if err != nil {
|
||||
LOG_ERROR("RSA_PUBLIC", "Failed to parse the public key in %s: %v", keyFile, err)
|
||||
return
|
||||
}
|
||||
|
||||
key, ok := parsedKey.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
LOG_ERROR("RSA_PUBLIC", "Unsupported public key type %s in %s", reflect.TypeOf(parsedKey), keyFile)
|
||||
return
|
||||
}
|
||||
|
||||
config.rsaPublicKey = key
|
||||
}
|
||||
|
||||
// loadRSAPrivateKey loads the specifed private key file for decrypting file chunks
|
||||
func (config *Config) loadRSAPrivateKey(keyFile string, passphrase string) {
|
||||
|
||||
if config.rsaPublicKey == nil {
|
||||
LOG_ERROR("RSA_PUBLIC", "The storage was not encrypted by an RSA key")
|
||||
return
|
||||
}
|
||||
|
||||
encodedKey, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
LOG_ERROR("RSA_PRIVATE", "Failed to read the private key file: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
decodedKey, _ := pem.Decode(encodedKey)
|
||||
if decodedKey == nil {
|
||||
LOG_ERROR("RSA_PRIVATE", "unrecognized private key in %s", keyFile)
|
||||
return
|
||||
}
|
||||
if decodedKey.Type != "RSA PRIVATE KEY" {
|
||||
LOG_ERROR("RSA_PRIVATE", "Unsupported private key type %s in %s", decodedKey.Type, keyFile)
|
||||
return
|
||||
}
|
||||
|
||||
var decodedKeyBytes []byte
|
||||
if passphrase != "" {
|
||||
decodedKeyBytes, err = x509.DecryptPEMBlock(decodedKey, []byte(passphrase))
|
||||
} else {
|
||||
decodedKeyBytes = decodedKey.Bytes
|
||||
}
|
||||
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParsePKCS1PrivateKey(decodedKeyBytes); err != nil {
|
||||
if parsedKey, err = x509.ParsePKCS8PrivateKey(decodedKeyBytes); err != nil {
|
||||
LOG_ERROR("RSA_PRIVATE", "Failed to parse the private key in %s: %v", keyFile, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
key, ok := parsedKey.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
LOG_ERROR("RSA_PRIVATE", "Unsupported private key type %s in %s", reflect.TypeOf(parsedKey), keyFile)
|
||||
return
|
||||
}
|
||||
|
||||
data := make([]byte, 32)
|
||||
_, err = rand.Read(data)
|
||||
if err != nil {
|
||||
LOG_ERROR("RSA_PRIVATE", "Failed to generate random data for testing the private key: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Now test if the private key matches the public key
|
||||
encryptedData, err := rsa.EncryptOAEP(sha256.New(), rand.Reader, config.rsaPublicKey, data, nil)
|
||||
if err != nil {
|
||||
LOG_ERROR("RSA_PRIVATE", "Failed to encrypt random data with the public key: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
decryptedData, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, key, encryptedData, nil)
|
||||
if err != nil {
|
||||
LOG_ERROR("RSA_PRIVATE", "Incorrect private key: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if !bytes.Equal(data, decryptedData) {
|
||||
LOG_ERROR("RSA_PRIVATE", "Decrypted data do not match the original data")
|
||||
return
|
||||
}
|
||||
|
||||
config.rsaPrivateKey = key
|
||||
}
|
||||
|
||||
@@ -490,7 +490,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
|
||||
}
|
||||
if entry.IsLink() {
|
||||
isRegular := false
|
||||
isRegular, entry.Link, err = Readlink(filepath.Join(top, entry.Path))
|
||||
isRegular, entry.Link, err = Readlink(joinPath(top, entry.Path))
|
||||
if err != nil {
|
||||
LOG_WARN("LIST_LINK", "Failed to read the symlink %s: %v", entry.Path, err)
|
||||
skippedFiles = append(skippedFiles, entry.Path)
|
||||
@@ -500,7 +500,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
|
||||
if isRegular {
|
||||
entry.Mode ^= uint32(os.ModeSymlink)
|
||||
} else if path == "" && (filepath.IsAbs(entry.Link) || filepath.HasPrefix(entry.Link, `\\`)) && !strings.HasPrefix(entry.Link, normalizedTop) {
|
||||
stat, err := os.Stat(filepath.Join(top, entry.Path))
|
||||
stat, err := os.Stat(joinPath(top, entry.Path))
|
||||
if err != nil {
|
||||
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err)
|
||||
skippedFiles = append(skippedFiles, entry.Path)
|
||||
@@ -513,6 +513,9 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
|
||||
// path from f.Name(); note that a "/" is append assuming a symbolic link is always a directory
|
||||
newEntry.Path = filepath.Join(normalizedPath, f.Name()) + "/"
|
||||
}
|
||||
if len(patterns) > 0 && !MatchPath(newEntry.Path, patterns) {
|
||||
continue
|
||||
}
|
||||
entry = newEntry
|
||||
}
|
||||
}
|
||||
|
||||
@@ -78,6 +78,10 @@ func (storage *GCDStorage) shouldRetry(threadIndex int, err error) (bool, error)
|
||||
// User Rate Limit Exceeded
|
||||
message = e.Message
|
||||
retry = true
|
||||
} else if e.Code == 408 {
|
||||
// Request timeout
|
||||
message = e.Message
|
||||
retry = true
|
||||
} else if e.Code == 401 {
|
||||
// Only retry on authorization error when storage has been connected before
|
||||
if storage.isConnected {
|
||||
|
||||
@@ -25,6 +25,7 @@ type Preference struct {
|
||||
DoNotSavePassword bool `json:"no_save_password"`
|
||||
NobackupFile string `json:"nobackup_file"`
|
||||
Keys map[string]string `json:"keys"`
|
||||
FiltersFile string `json:"filters"`
|
||||
}
|
||||
|
||||
var preferencePath string
|
||||
|
||||
@@ -210,7 +210,7 @@ func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *
|
||||
|
||||
defer output.Body.Close()
|
||||
|
||||
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.bucket))
|
||||
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||
return err
|
||||
|
||||
}
|
||||
@@ -225,7 +225,7 @@ func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content [
|
||||
Bucket: aws.String(storage.bucket),
|
||||
Key: aws.String(storage.storageDir + filePath),
|
||||
ACL: aws.String(s3.ObjectCannedACLPrivate),
|
||||
Body: CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.bucket)),
|
||||
Body: CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads),
|
||||
ContentType: aws.String("application/duplicacy"),
|
||||
}
|
||||
|
||||
|
||||
@@ -91,7 +91,7 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
|
||||
storageDir: storageDir,
|
||||
minimumNesting: minimumNesting,
|
||||
numberOfThreads: threads,
|
||||
numberOfTries: 6,
|
||||
numberOfTries: 8,
|
||||
serverAddress: serverAddress,
|
||||
sftpConfig: sftpConfig,
|
||||
}
|
||||
@@ -129,22 +129,19 @@ func (storage *SFTPStorage) retry(f func () error) error {
|
||||
delay *= 2
|
||||
|
||||
storage.clientLock.Lock()
|
||||
if storage.client != nil {
|
||||
storage.client.Close()
|
||||
storage.client = nil
|
||||
}
|
||||
|
||||
connection, err := ssh.Dial("tcp", storage.serverAddress, storage.sftpConfig)
|
||||
if err != nil {
|
||||
LOG_WARN("SFT_RECONNECT", "Failed to connect to %s: %v; retrying", storage.serverAddress, err)
|
||||
storage.clientLock.Unlock()
|
||||
return err
|
||||
continue
|
||||
}
|
||||
|
||||
client, err := sftp.NewClient(connection)
|
||||
if err != nil {
|
||||
LOG_WARN("SFT_RECONNECT", "Failed to create a new SFTP client to %s: %v; retrying", storage.serverAddress, err)
|
||||
connection.Close()
|
||||
storage.clientLock.Unlock()
|
||||
return err
|
||||
continue
|
||||
}
|
||||
storage.client = client
|
||||
storage.clientLock.Unlock()
|
||||
@@ -275,36 +272,19 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
||||
fullPath := path.Join(storage.storageDir, filePath)
|
||||
|
||||
dirs := strings.Split(filePath, "/")
|
||||
if len(dirs) > 1 {
|
||||
fullDir := path.Dir(fullPath)
|
||||
err = storage.retry(func() error {
|
||||
_, err := storage.getSFTPClient().Stat(fullDir)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
// The error may be caused by a non-existent fullDir, or a broken connection. In either case,
|
||||
// we just assume it is the former because there isn't a way to tell which is the case.
|
||||
for i := range dirs[1 : len(dirs)-1] {
|
||||
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
|
||||
// We don't check the error; just keep going blindly but always store the last err
|
||||
err = storage.getSFTPClient().Mkdir(subDir)
|
||||
}
|
||||
fullDir := path.Dir(fullPath)
|
||||
return storage.retry(func() error {
|
||||
|
||||
// If there is an error creating the dirs, we check fullDir one more time, because another thread
|
||||
// may happen to create the same fullDir ahead of this thread
|
||||
if err != nil {
|
||||
err = storage.retry(func() error {
|
||||
_, err := storage.getSFTPClient().Stat(fullDir)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
if len(dirs) > 1 {
|
||||
_, err := storage.getSFTPClient().Stat(fullDir)
|
||||
if os.IsNotExist(err) {
|
||||
for i := range dirs[1 : len(dirs)-1] {
|
||||
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
|
||||
// We don't check the error; just keep going blindly
|
||||
storage.getSFTPClient().Mkdir(subDir)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return storage.retry(func() error {
|
||||
|
||||
letters := "abcdefghijklmnopqrstuvwxyz"
|
||||
suffix := make([]byte, 8)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
@@ -123,11 +124,11 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
|
||||
}
|
||||
deviceIdRepository, err := GetPathDeviceId(top)
|
||||
if err != nil {
|
||||
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: ", top)
|
||||
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: %s", top)
|
||||
return top
|
||||
}
|
||||
if deviceIdLocal != deviceIdRepository {
|
||||
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: ", top)
|
||||
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: %s", top)
|
||||
return top
|
||||
}
|
||||
|
||||
@@ -145,22 +146,37 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
|
||||
// Use tmutil to create snapshot
|
||||
tmutilOutput, err := CommandWithTimeout(timeoutInSeconds, "tmutil", "snapshot")
|
||||
if err != nil {
|
||||
LOG_ERROR("VSS_CREATE", "Error while calling tmutil: ", err)
|
||||
LOG_ERROR("VSS_CREATE", "Error while calling tmutil: %v", err)
|
||||
return top
|
||||
}
|
||||
|
||||
colonPos := strings.IndexByte(tmutilOutput, ':')
|
||||
if colonPos < 0 {
|
||||
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: ", tmutilOutput)
|
||||
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: %s", tmutilOutput)
|
||||
return top
|
||||
}
|
||||
snapshotDate = strings.TrimSpace(tmutilOutput[colonPos+1:])
|
||||
|
||||
tmutilOutput, err = CommandWithTimeout(timeoutInSeconds, "tmutil", "listlocalsnapshots", ".")
|
||||
if err != nil {
|
||||
LOG_ERROR("VSS_CREATE", "Error while calling 'tmutil listlocalsnapshots': %v", err)
|
||||
return top
|
||||
}
|
||||
snapshotName := "com.apple.TimeMachine." + snapshotDate
|
||||
|
||||
r := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`)
|
||||
snapshotNames := r.FindStringSubmatch(tmutilOutput)
|
||||
if len(snapshotNames) > 0 {
|
||||
snapshotName = snapshotNames[0]
|
||||
} else {
|
||||
LOG_WARN("VSS_CREATE", "Error while using 'tmutil listlocalsnapshots' to find snapshot name. Will fallback to 'com.apple.TimeMachine.SNAPSHOT_DATE'")
|
||||
}
|
||||
|
||||
// Mount snapshot as readonly and hide from GUI i.e. Finder
|
||||
_, err = CommandWithTimeout(timeoutInSeconds,
|
||||
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s=com.apple.TimeMachine."+snapshotDate, "/", snapshotPath)
|
||||
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/", snapshotPath)
|
||||
if err != nil {
|
||||
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: ", err)
|
||||
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: %v", err)
|
||||
return top
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -57,7 +58,7 @@ func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
|
||||
|
||||
// CreateSnapshotFromDirectory creates a snapshot from the local directory 'top'. Only 'Files'
|
||||
// will be constructed, while 'ChunkHashes' and 'ChunkLengths' can only be populated after uploading.
|
||||
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (snapshot *Snapshot, skippedDirectories []string,
|
||||
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, filtersFile string) (snapshot *Snapshot, skippedDirectories []string,
|
||||
skippedFiles []string, err error) {
|
||||
|
||||
snapshot = &Snapshot{
|
||||
@@ -68,47 +69,10 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (sn
|
||||
|
||||
var patterns []string
|
||||
|
||||
patternFile, err := ioutil.ReadFile(path.Join(GetDuplicacyPreferencePath(), "filters"))
|
||||
if err == nil {
|
||||
for _, pattern := range strings.Split(string(patternFile), "\n") {
|
||||
pattern = strings.TrimSpace(pattern)
|
||||
if len(pattern) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if pattern[0] == '#' {
|
||||
continue
|
||||
}
|
||||
|
||||
if IsUnspecifiedFilter(pattern) {
|
||||
pattern = "+" + pattern
|
||||
}
|
||||
|
||||
if IsEmptyFilter(pattern) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
||||
valid, err := IsValidRegex(pattern[2:])
|
||||
if !valid || err != nil {
|
||||
LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
|
||||
}
|
||||
}
|
||||
|
||||
patterns = append(patterns, pattern)
|
||||
}
|
||||
|
||||
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))
|
||||
|
||||
LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
|
||||
|
||||
if IsTracing() {
|
||||
for _, pattern := range patterns {
|
||||
LOG_TRACE("SNAPSHOT_PATTERN", "Pattern: %s", pattern)
|
||||
}
|
||||
}
|
||||
|
||||
if filtersFile == "" {
|
||||
filtersFile = joinPath(GetDuplicacyPreferencePath(), "filters")
|
||||
}
|
||||
patterns = ProcessFilters(filtersFile)
|
||||
|
||||
directories := make([]*Entry, 0, 256)
|
||||
directories = append(directories, CreateEntry("", 0, 0, 0))
|
||||
@@ -150,6 +114,103 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (sn
|
||||
return snapshot, skippedDirectories, skippedFiles, nil
|
||||
}
|
||||
|
||||
func AppendPattern(patterns []string, new_pattern string) (new_patterns []string) {
|
||||
for _, pattern := range patterns {
|
||||
if pattern == new_pattern {
|
||||
LOG_INFO("SNAPSHOT_FILTER", "Ignoring duplicate pattern: %s ...", new_pattern)
|
||||
return patterns
|
||||
}
|
||||
}
|
||||
new_patterns = append(patterns, new_pattern)
|
||||
return new_patterns
|
||||
}
|
||||
func ProcessFilters(filtersFile string) (patterns []string) {
|
||||
patterns = ProcessFilterFile(filtersFile, make([]string, 0))
|
||||
|
||||
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))
|
||||
|
||||
LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
|
||||
|
||||
if IsTracing() {
|
||||
for _, pattern := range patterns {
|
||||
LOG_TRACE("SNAPSHOT_PATTERN", "Pattern: %s", pattern)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return patterns
|
||||
}
|
||||
|
||||
func ProcessFilterFile(patternFile string, includedFiles []string) (patterns []string) {
|
||||
for _, file := range includedFiles {
|
||||
if file == patternFile {
|
||||
// cycle in include mechanism discovered.
|
||||
LOG_ERROR("SNAPSHOT_FILTER", "The filter file %s has already been included", patternFile)
|
||||
return patterns
|
||||
}
|
||||
}
|
||||
includedFiles = append(includedFiles, patternFile)
|
||||
LOG_INFO("SNAPSHOT_FILTER", "Parsing filter file %s", patternFile)
|
||||
patternFileContent, err := ioutil.ReadFile(patternFile)
|
||||
if err == nil {
|
||||
patternFileLines := strings.Split(string(patternFileContent), "\n")
|
||||
patterns = ProcessFilterLines(patternFileLines, includedFiles)
|
||||
}
|
||||
return patterns
|
||||
}
|
||||
|
||||
func ProcessFilterLines(patternFileLines []string, includedFiles []string) (patterns []string) {
|
||||
for _, pattern := range patternFileLines {
|
||||
pattern = strings.TrimSpace(pattern)
|
||||
if len(pattern) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(pattern, "@") {
|
||||
patternIncludeFile := strings.TrimSpace(pattern[1:])
|
||||
if patternIncludeFile == "" {
|
||||
continue
|
||||
}
|
||||
if ! filepath.IsAbs(patternIncludeFile) {
|
||||
basePath := ""
|
||||
if len(includedFiles) == 0 {
|
||||
basePath, _ = os.Getwd()
|
||||
} else {
|
||||
basePath = filepath.Dir(includedFiles[len(includedFiles)-1])
|
||||
}
|
||||
patternIncludeFile = joinPath(basePath, patternIncludeFile)
|
||||
}
|
||||
for _, pattern := range ProcessFilterFile(patternIncludeFile, includedFiles) {
|
||||
patterns = AppendPattern(patterns, pattern)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if pattern[0] == '#' {
|
||||
continue
|
||||
}
|
||||
|
||||
if IsUnspecifiedFilter(pattern) {
|
||||
pattern = "+" + pattern
|
||||
}
|
||||
|
||||
if IsEmptyFilter(pattern) {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
||||
valid, err := IsValidRegex(pattern[2:])
|
||||
if !valid || err != nil {
|
||||
LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
|
||||
}
|
||||
}
|
||||
|
||||
patterns = AppendPattern(patterns, pattern)
|
||||
}
|
||||
|
||||
return patterns
|
||||
}
|
||||
|
||||
// This is the struct used to save/load incomplete snapshots
|
||||
type IncompleteSnapshot struct {
|
||||
Files []*Entry
|
||||
|
||||
@@ -759,8 +759,8 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
|
||||
func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToCheck []int, tag string, showStatistics bool, showTabular bool,
|
||||
checkFiles bool, searchFossils bool, resurrect bool) bool {
|
||||
|
||||
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
|
||||
snapshotID, revisionsToCheck, tag, showStatistics, checkFiles, searchFossils, resurrect)
|
||||
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, showTabular: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
|
||||
snapshotID, revisionsToCheck, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
|
||||
|
||||
snapshotMap := make(map[string][]*Snapshot)
|
||||
var err error
|
||||
@@ -790,7 +790,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
chunkSizeMap[chunk] = allSizes[i]
|
||||
}
|
||||
|
||||
if snapshotID == "" || showStatistics {
|
||||
if snapshotID == "" || showStatistics || showTabular {
|
||||
snapshotIDs, err := manager.ListSnapshotIDs()
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
||||
@@ -810,7 +810,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
for snapshotID = range snapshotMap {
|
||||
|
||||
revisions := revisionsToCheck
|
||||
if len(revisions) == 0 || showStatistics {
|
||||
if len(revisions) == 0 || showStatistics || showTabular {
|
||||
revisions, err = manager.ListSnapshotRevisions(snapshotID)
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", snapshotID, err)
|
||||
@@ -860,6 +860,20 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
_, found := chunkSizeMap[chunkID]
|
||||
|
||||
if !found {
|
||||
|
||||
// Look up the chunk again in case it actually exists, but only if there aren't
|
||||
// too many missing chunks.
|
||||
if missingChunks < 100 {
|
||||
_, exist, _, err := manager.storage.FindChunk(0, chunkID, false)
|
||||
if err != nil {
|
||||
LOG_WARN("SNAPSHOT_VALIDATE", "Failed to check the existence of chunk %s: %v",
|
||||
chunkID, err)
|
||||
} else if exist {
|
||||
LOG_INFO("SNAPSHOT_VALIDATE", "Chunk %s is confirmed to exist", chunkID)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !searchFossils {
|
||||
missingChunks += 1
|
||||
LOG_WARN("SNAPSHOT_VALIDATE",
|
||||
@@ -870,7 +884,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
|
||||
chunkPath, exist, size, err := manager.storage.FindChunk(0, chunkID, true)
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_VALIDATE", "Failed to check the existence of chunk %s: %v",
|
||||
LOG_ERROR("SNAPSHOT_VALIDATE", "Failed to check the existence of fossil %s: %v",
|
||||
chunkID, err)
|
||||
return false
|
||||
}
|
||||
@@ -998,18 +1012,20 @@ func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string][]*
|
||||
earliestSeenChunks := make(map[string]int)
|
||||
|
||||
for _, snapshot := range snapshotList {
|
||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot, true) {
|
||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot, false) {
|
||||
if earliestSeenChunks[chunkID] == 0 {
|
||||
earliestSeenChunks[chunkID] = math.MaxInt32
|
||||
}
|
||||
earliestSeenChunks[chunkID] = MinInt(earliestSeenChunks[chunkID], snapshot.Revision)
|
||||
if earliestSeenChunks[chunkID] > snapshot.Revision {
|
||||
earliestSeenChunks[chunkID] = snapshot.Revision
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, snapshot := range snapshotList {
|
||||
|
||||
chunks := make(map[string]bool)
|
||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot, true) {
|
||||
for _, chunkID := range manager.GetSnapshotChunks(snapshot, false) {
|
||||
chunks[chunkID] = true
|
||||
snapshotChunks[chunkID] = true
|
||||
}
|
||||
@@ -1178,7 +1194,6 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
|
||||
}
|
||||
|
||||
var chunk *Chunk
|
||||
currentHash := ""
|
||||
|
||||
for i := file.StartChunk; i <= file.EndChunk; i++ {
|
||||
start := 0
|
||||
@@ -1191,10 +1206,12 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
|
||||
}
|
||||
|
||||
hash := snapshot.ChunkHashes[i]
|
||||
if currentHash != hash {
|
||||
lastChunk, lastChunkHash := manager.chunkDownloader.GetLastDownloadedChunk()
|
||||
if lastChunkHash != hash {
|
||||
i := manager.chunkDownloader.AddChunk(hash)
|
||||
chunk = manager.chunkDownloader.WaitForChunk(i)
|
||||
currentHash = hash
|
||||
} else {
|
||||
chunk = lastChunk
|
||||
}
|
||||
|
||||
output(chunk.GetBytes()[start:end])
|
||||
@@ -1269,21 +1286,20 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
|
||||
}
|
||||
|
||||
file := manager.FindFile(snapshot, path, false)
|
||||
var content []byte
|
||||
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) { content = append(content, chunk...) }) {
|
||||
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) {
|
||||
fmt.Printf("%s", chunk)
|
||||
}) {
|
||||
LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d",
|
||||
path, snapshot.ID, snapshot.Revision)
|
||||
return false
|
||||
}
|
||||
|
||||
fmt.Printf("%s", string(content))
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Diff compares two snapshots, or two revision of a file if the file argument is given.
|
||||
func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []int,
|
||||
filePath string, compareByHash bool, nobackupFile string) bool {
|
||||
filePath string, compareByHash bool, nobackupFile string, filtersFile string) bool {
|
||||
|
||||
LOG_DEBUG("DIFF_PARAMETERS", "top: %s, id: %s, revision: %v, path: %s, compareByHash: %t",
|
||||
top, snapshotID, revisions, filePath, compareByHash)
|
||||
@@ -1296,7 +1312,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
||||
if len(revisions) <= 1 {
|
||||
// Only scan the repository if filePath is not provided
|
||||
if len(filePath) == 0 {
|
||||
rightSnapshot, _, _, err = CreateSnapshotFromDirectory(snapshotID, top, nobackupFile)
|
||||
rightSnapshot, _, _, err = CreateSnapshotFromDirectory(snapshotID, top, nobackupFile, filtersFile)
|
||||
if err != nil {
|
||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
|
||||
return false
|
||||
@@ -1467,7 +1483,11 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
||||
same = right.IsSameAs(left)
|
||||
}
|
||||
} else {
|
||||
same = left.Hash == right.Hash
|
||||
if left.Size == 0 && right.Size == 0 {
|
||||
same = true
|
||||
} else {
|
||||
same = left.Hash == right.Hash
|
||||
}
|
||||
}
|
||||
|
||||
if !same {
|
||||
@@ -1838,7 +1858,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
||||
if _, found := newChunks[chunk]; found {
|
||||
// The fossil is referenced so it can't be deleted.
|
||||
if dryRun {
|
||||
LOG_INFO("FOSSIL_RESURRECT", "Fossil %s would be resurrected: %v", chunk)
|
||||
LOG_INFO("FOSSIL_RESURRECT", "Fossil %s would be resurrected", chunk)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -2446,7 +2466,7 @@ func (manager *SnapshotManager) UploadFile(path string, derivationKey string, co
|
||||
derivationKey = derivationKey[len(derivationKey)-64:]
|
||||
}
|
||||
|
||||
err := manager.fileChunk.Encrypt(manager.config.FileKey, derivationKey)
|
||||
err := manager.fileChunk.Encrypt(manager.config.FileKey, derivationKey, true)
|
||||
if err != nil {
|
||||
LOG_ERROR("UPLOAD_File", "Failed to encrypt the file %s: %v", path, err)
|
||||
return false
|
||||
|
||||
@@ -526,11 +526,30 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
return dropboxStorage
|
||||
} else if matched[1] == "b2" {
|
||||
bucket := matched[3]
|
||||
storageDir := matched[5]
|
||||
|
||||
accountID := GetPassword(preference, "b2_id", "Enter Backblaze Account ID:", true, resetPassword)
|
||||
applicationKey := GetPassword(preference, "b2_key", "Enter Backblaze Application Key:", true, resetPassword)
|
||||
accountID := GetPassword(preference, "b2_id", "Enter Backblaze account or application id:", true, resetPassword)
|
||||
applicationKey := GetPassword(preference, "b2_key", "Enter corresponding Backblaze application key:", true, resetPassword)
|
||||
|
||||
b2Storage, err := CreateB2Storage(accountID, applicationKey, bucket, threads)
|
||||
b2Storage, err := CreateB2Storage(accountID, applicationKey, "", bucket, storageDir, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
SavePassword(preference, "b2_id", accountID)
|
||||
SavePassword(preference, "b2_key", applicationKey)
|
||||
return b2Storage
|
||||
} else if matched[1] == "b2-custom" {
|
||||
b2customUrlRegex := regexp.MustCompile(`^b2-custom://([^/]+)/([^/]+)(/(.+))?`)
|
||||
matched := b2customUrlRegex.FindStringSubmatch(storageURL)
|
||||
downloadURL := "https://" + matched[1]
|
||||
bucket := matched[2]
|
||||
storageDir := matched[4]
|
||||
|
||||
accountID := GetPassword(preference, "b2_id", "Enter Backblaze account or application id:", true, resetPassword)
|
||||
applicationKey := GetPassword(preference, "b2_key", "Enter corresponding Backblaze application key:", true, resetPassword)
|
||||
|
||||
b2Storage, err := CreateB2Storage(accountID, applicationKey, downloadURL, bucket, storageDir, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
|
||||
@@ -27,6 +27,7 @@ var testRateLimit int
|
||||
var testQuickMode bool
|
||||
var testThreads int
|
||||
var testFixedChunkSize bool
|
||||
var testRSAEncryption bool
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
|
||||
@@ -34,6 +35,7 @@ func init() {
|
||||
flag.BoolVar(&testQuickMode, "quick", false, "quick test")
|
||||
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
|
||||
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
|
||||
flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
@@ -107,7 +109,7 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "b2" {
|
||||
storage, err := CreateB2Storage(config["account"], config["key"], config["bucket"], threads)
|
||||
storage, err := CreateB2Storage(config["account"], config["key"], "", config["bucket"], config["directory"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "gcs-s3" {
|
||||
@@ -296,7 +298,8 @@ func TestStorage(t *testing.T) {
|
||||
|
||||
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
|
||||
|
||||
storage, err := loadStorage(testDir, 1)
|
||||
threads := 8
|
||||
storage, err := loadStorage(testDir, threads)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create storage: %v", err)
|
||||
return
|
||||
@@ -326,16 +329,16 @@ func TestStorage(t *testing.T) {
|
||||
storage.CreateDirectory(0, "shared")
|
||||
|
||||
// Upload to the same directory by multiple goroutines
|
||||
count := 8
|
||||
count := threads
|
||||
finished := make(chan int, count)
|
||||
for i := 0; i < count; i++ {
|
||||
go func(name string) {
|
||||
err := storage.UploadFile(0, name, []byte("this is a test file"))
|
||||
go func(threadIndex int, name string) {
|
||||
err := storage.UploadFile(threadIndex, name, []byte("this is a test file"))
|
||||
if err != nil {
|
||||
t.Errorf("Error to upload '%s': %v", name, err)
|
||||
}
|
||||
finished <- 0
|
||||
}(fmt.Sprintf("shared/a/b/c/%d", i))
|
||||
}(i, fmt.Sprintf("shared/a/b/c/%d", i))
|
||||
}
|
||||
|
||||
for i := 0; i < count; i++ {
|
||||
@@ -384,7 +387,6 @@ func TestStorage(t *testing.T) {
|
||||
|
||||
snapshotIDs := []string{}
|
||||
for _, snapshotDir := range snapshotDirs {
|
||||
LOG_INFO("debug", "snapshot dir: %s", snapshotDir)
|
||||
if len(snapshotDir) > 0 && snapshotDir[len(snapshotDir)-1] == '/' {
|
||||
snapshotIDs = append(snapshotIDs, snapshotDir[:len(snapshotDir)-1])
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func IsEmptyFilter(pattern string) bool {
|
||||
}
|
||||
|
||||
func IsUnspecifiedFilter(pattern string) bool {
|
||||
if pattern[0] != '+' && pattern[0] != '-' && pattern[0] != 'i' && pattern[0] != 'e' {
|
||||
if pattern[0] != '+' && pattern[0] != '-' && !strings.HasPrefix(pattern, "i:") && !strings.HasPrefix(pattern, "e:") {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
@@ -460,10 +460,3 @@ func AtoSize(sizeString string) int {
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
func MinInt(x, y int) int {
|
||||
if x < y {
|
||||
return x
|
||||
}
|
||||
return y
|
||||
}
|
||||
|
||||
@@ -88,3 +88,7 @@ func (entry *Entry) SetAttributesToFile(fullPath string) {
|
||||
func joinPath(components ...string) string {
|
||||
return path.Join(components...)
|
||||
}
|
||||
|
||||
func SplitDir(fullPath string) (dir string, file string) {
|
||||
return path.Split(fullPath)
|
||||
}
|
||||
|
||||
@@ -92,6 +92,17 @@ func TestMatchPattern(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
for _, pattern := range []string{ "+", "-", "i:", "e:", "+a", "-a", "i:a", "e:a"} {
|
||||
if IsUnspecifiedFilter(pattern) {
|
||||
t.Errorf("pattern %s has a specified filter", pattern)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pattern := range []string{ "i", "e", "ia", "ib", "a", "b"} {
|
||||
if !IsUnspecifiedFilter(pattern) {
|
||||
t.Errorf("pattern %s does not have a specified filter", pattern)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRateLimit(t *testing.T) {
|
||||
|
||||
@@ -126,3 +126,8 @@ func joinPath(components ...string) string {
|
||||
}
|
||||
return combinedPath
|
||||
}
|
||||
|
||||
func SplitDir(fullPath string) (dir string, file string) {
|
||||
i := strings.LastIndex(fullPath, "\\")
|
||||
return fullPath[:i+1], fullPath[i+1:]
|
||||
}
|
||||
|
||||
@@ -93,49 +93,49 @@ func (storage *WasabiStorage) DeleteFile(
|
||||
// rename. It's designed to get the job done with as few dependencies
|
||||
// on other packages as possible rather than being somethng
|
||||
// general-purpose and reusable.
|
||||
func (storage *WasabiStorage) MoveFile(
|
||||
threadIndex int, from string, to string,
|
||||
) (err error) {
|
||||
func (storage *WasabiStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||
|
||||
var from_path string
|
||||
var fromPath string
|
||||
// The from path includes the bucket. Take care not to include an empty storageDir
|
||||
// string as Wasabi's backend will return 404 on URLs with double slashes.
|
||||
if storage.storageDir == "" {
|
||||
from_path = fmt.Sprintf("/%s/%s", storage.bucket, from)
|
||||
fromPath = fmt.Sprintf("/%s/%s", storage.bucket, from)
|
||||
} else {
|
||||
from_path = fmt.Sprintf("/%s/%s/%s", storage.bucket, storage.storageDir, from)
|
||||
fromPath = fmt.Sprintf("/%s/%s/%s", storage.bucket, storage.storageDir, from)
|
||||
}
|
||||
|
||||
object := fmt.Sprintf("https://%s@%s%s",
|
||||
storage.region, storage.endpoint, from_path)
|
||||
object := fmt.Sprintf("https://%s@%s%s", storage.region, storage.endpoint, fromPath)
|
||||
|
||||
toPath := to
|
||||
// The object's new name is relative to the top of the bucket.
|
||||
new_name := fmt.Sprintf("%s/%s", storage.storageDir, to)
|
||||
if storage.storageDir != "" {
|
||||
toPath = fmt.Sprintf("%s/%s", storage.storageDir, to)
|
||||
}
|
||||
|
||||
timestamp := time.Now().Format(time.RFC1123Z)
|
||||
|
||||
signing_string := fmt.Sprintf("MOVE\n\n\n%s\n%s", timestamp, from_path)
|
||||
signingString := fmt.Sprintf("MOVE\n\n\n%s\n%s", timestamp, fromPath)
|
||||
|
||||
signer := hmac.New(sha1.New, []byte(storage.secret))
|
||||
signer.Write([]byte(signing_string))
|
||||
signer.Write([]byte(signingString))
|
||||
|
||||
signature := base64.StdEncoding.EncodeToString(signer.Sum(nil))
|
||||
|
||||
authorization := fmt.Sprintf("AWS %s:%s", storage.key, signature)
|
||||
|
||||
request, error := http.NewRequest("MOVE", object, nil)
|
||||
if error != nil {
|
||||
return error
|
||||
request, err := http.NewRequest("MOVE", object, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
request.Header.Add("Authorization", authorization)
|
||||
request.Header.Add("Date", timestamp)
|
||||
request.Header.Add("Destination", new_name)
|
||||
request.Header.Add("Destination", toPath)
|
||||
request.Header.Add("Host", storage.endpoint)
|
||||
request.Header.Add("Overwrite", "true")
|
||||
|
||||
response, error := storage.client.Do(request)
|
||||
if error != nil {
|
||||
return error
|
||||
response, err := storage.client.Do(request)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer response.Body.Close()
|
||||
|
||||
|
||||
@@ -49,7 +49,7 @@ var (
|
||||
)
|
||||
|
||||
func CreateWebDAVStorage(host string, port int, username string, password string, storageDir string, useHTTP bool, threads int) (storage *WebDAVStorage, err error) {
|
||||
if storageDir[len(storageDir)-1] != '/' {
|
||||
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||
storageDir += "/"
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ func CreateWebDAVStorage(host string, port int, username string, password string
|
||||
username: username,
|
||||
password: password,
|
||||
storageDir: "",
|
||||
useHTTP: false,
|
||||
useHTTP: useHTTP,
|
||||
|
||||
client: http.DefaultClient,
|
||||
threads: threads,
|
||||
@@ -151,6 +151,10 @@ func (storage *WebDAVStorage) sendRequest(method string, uri string, depth int,
|
||||
request.Header.Set(key, value)
|
||||
}
|
||||
|
||||
if method == "PUT" {
|
||||
request.ContentLength = int64(len(data))
|
||||
}
|
||||
|
||||
//requestDump, err := httputil.DumpRequest(request, true)
|
||||
//LOG_INFO("debug", "Request: %s", requestDump)
|
||||
|
||||
@@ -313,6 +317,7 @@ func (storage *WebDAVStorage) ListFiles(threadIndex int, dir string) (files []st
|
||||
|
||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||
func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||
|
||||
properties, err := storage.getProperties(filePath, 0, "getcontentlength", "resourcetype")
|
||||
if err != nil {
|
||||
if err == errWebDAVNotExist {
|
||||
@@ -325,7 +330,14 @@ func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exi
|
||||
return false, false, 0, err
|
||||
}
|
||||
|
||||
if m, exist := properties["/"+storage.storageDir+filePath]; !exist {
|
||||
m, exist := properties["/"+storage.storageDir+filePath]
|
||||
|
||||
// If no properties exist for the given filePath, remove the trailing / from filePath and search again
|
||||
if !exist && filePath != "" && filePath[len(filePath) - 1] == '/' {
|
||||
m, exist = properties["/"+storage.storageDir+filePath[:len(filePath) - 1]]
|
||||
}
|
||||
|
||||
if !exist {
|
||||
return false, false, 0, nil
|
||||
} else if resourceType, exist := m["resourcetype"]; exist && strings.Contains(resourceType, "collection") {
|
||||
return true, true, 0, nil
|
||||
|
||||
Reference in New Issue
Block a user