1
0
mirror of https://github.com/gilbertchen/duplicacy synced 2025-12-06 00:03:38 +00:00

Compare commits

...

23 Commits

Author SHA1 Message Date
Gilbert Chen
b61906c99e Bump version to 2.4.0 2020-03-05 22:06:24 -05:00
gilbertchen
a0a07d18cc Merge pull request #589 from fracai/b2_download_url
support downloading from a custom URL pointed at B2
2020-03-05 22:00:53 -05:00
Gilbert Chen
a6ce64e715 Fixed handling of repository ids with spaces in the b2 backend
Usually a repository id should not contain spaces or other non-alphanum
characters, but if it does we should be able to handle it correctly.  This
commit fixes the b2 backend to convert file names in a proper way.
2020-03-05 14:45:09 -05:00
Arno Hautala
499b612a0d moving download url config from a key to the storage url pattern 2020-02-25 20:53:19 -05:00
Arno Hautala
46ce0ba1fb support downloading from a custom URL pointed at B2 2020-02-22 22:12:36 -05:00
Gilbert Chen
cc88abd547 Fixed a bug that caused all copied chunks to be RSA encrypted
The field encryptionVersion in the Chunk struct is supposed to pass the status
of RSA encrytpion from a source chunk to a destination chunk in a copy command.
This field needs to be a 3-state boolean in order to pass the status correctly.
2020-02-13 14:03:07 -05:00
Gilbert Chen
e888b6d7e5 Fix bugs in sftp retrying
* Fixed a bug caused by nil sftp client during retry
* Simplify the rework logic in UploadFile
* Change the number of tries from 6 to 8
2020-01-13 16:26:43 -05:00
Gilbert Chen
d43fe1a282 Release the list of chunk hashes after processing each snapshot.
The chunk hash list isn't needed any more after being consolidated.
Releasing it immediately after use helps reduce the memory usage.
2019-12-09 22:45:16 -05:00
Gilbert Chen
504d07bd51 Bump version to 2.3.0 2019-11-25 15:45:41 -05:00
Gilbert Chen
0abb4099f6 Fixed test errors -- parse test flags in one place 2019-11-25 15:44:03 -05:00
Gilbert Chen
694494ea54 Throw an error, instead of a warning, if pre/post script fails 2019-11-24 22:38:29 -05:00
Gilbert Chen
165152493c For the check command, -tabular should imply -all just like -stats 2019-11-24 20:45:05 -05:00
Gilbert Chen
e02041f4ed Increase the number of retries for the b2 backend from 10 to 15
Retrying 10 times means a retry window of about 5 minutes, which might be too
short.  15 corresponds to about 10 minutes.
2019-11-23 15:28:03 -05:00
Gilbert Chen
a99f059b52 Allow a custom location for the filters file
You can now add a key 'filters' in the preferences file that points to the
path of the filters file.  If this key is not found in the preferences,
the default location '.duplicacy/filters' is used.

There is a new option '-filters' for the set command that set this key in
the preferences, but you can also edit the file directly.
2019-11-23 15:23:26 -05:00
Gilbert Chen
f022a6f684 Fixed build errors in tests 2019-11-22 21:17:17 -05:00
Gilbert Chen
791c61eecb Fixed missing format parameters 2019-11-22 20:32:19 -05:00
gilbertchen
6ad27adaea Merge pull request #578 from gboudreau/vss-catalina
Bugfix: allow -vss usage on Mac OS Catalina
2019-11-22 16:46:31 -05:00
Gilbert Chen
9abfbe1ee0 Update pkg/sftp to 1.10.1
The old version has a bug where a connection closed by the server may cause
a deadlock due to a full channel buffer.
2019-11-21 23:36:17 -05:00
Gilbert Chen
b32c3b2cd5 If a symlink is a directory, match it against the patterns as a directory 2019-11-21 23:10:54 -05:00
Gilbert Chen
9baafdafa2 Remove a log message meant for debugging only 2019-11-21 21:23:31 -05:00
Gilbert Chen
ca7d927840 Use joinPath instead of filepath.Join to generate UNC paths
This fix isn't probably necessary since filepath.Join can now produce UNC
paths too with the latest versions of go.  However, we still want to keep
it for consistency.
2019-11-21 14:56:31 -05:00
Guillaume Boudreau
0ca9cd476e Bugfix: allow -vss usage on Mac OS Catalina
Using `tmutil listlocalsnapshots` to find the snapshot name we need to use; fallback to `com.apple.TimeMachine.SNAPSHOT_DATE` (same as before) if we can't find it.
2019-10-28 11:55:15 -04:00
gilbertchen
abf9a94fc9 Merge pull request #575 from gilbertchen/rsa_encryption
Implement RSA encryption
2019-10-12 11:14:29 -04:00
18 changed files with 159 additions and 106 deletions

6
Gopkg.lock generated
View File

@@ -153,8 +153,8 @@
[[projects]]
name = "github.com/pkg/sftp"
packages = ["."]
revision = "98203f5a8333288eb3163b7c667d4260fe1333e9"
version = "1.0.0"
revision = "3edd153f213d8d4191a0ee4577c61cca19436632"
version = "v1.10.1"
[[projects]]
name = "github.com/satori/go.uuid"
@@ -225,6 +225,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "eff5ae2d9507f0d62cd2e5bdedebb5c59d64f70f476b087c01c35d4a5e1be72d"
inputs-digest = "8636a9db1eb54be5374f9914687693122efdde511f11c47d10c22f9e245e7f70"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -75,7 +75,7 @@
[[constraint]]
name = "github.com/pkg/sftp"
version = "1.0.0"
version = "1.10.1"
[[constraint]]
branch = "master"

View File

@@ -201,7 +201,7 @@ func runScript(context *cli.Context, storageName string, phase string) bool {
}
if err != nil {
duplicacy.LOG_WARN("SCRIPT_ERROR", "Failed to run script: %v", err)
duplicacy.LOG_ERROR("SCRIPT_ERROR", "Failed to run %s script: %v", script, err)
return false
}
@@ -548,7 +548,13 @@ func setPreference(context *cli.Context) {
newPreference.DoNotSavePassword = triBool.IsTrue()
}
if context.String("nobackup-file") != "" {
newPreference.NobackupFile = context.String("nobackup-file")
}
if context.String("filters") != "" {
newPreference.FiltersFile = context.String("filters")
}
key := context.String("key")
value := context.String("value")
@@ -731,7 +737,7 @@ func backupRepository(context *cli.Context) {
uploadRateLimit := context.Int("limit-rate")
enumOnly := context.Bool("enum-only")
storage.SetRateLimits(0, uploadRateLimit)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile)
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
@@ -808,7 +814,7 @@ func restoreRepository(context *cli.Context) {
duplicacy.LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
storage.SetRateLimits(context.Int("limit-rate"), 0)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile, preference.FiltersFile)
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
@@ -850,7 +856,7 @@ func listSnapshots(context *cli.Context) {
tag := context.String("t")
revisions := getRevisions(context)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
id := preference.SnapshotID
@@ -901,7 +907,7 @@ func checkSnapshots(context *cli.Context) {
tag := context.String("t")
revisions := getRevisions(context)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
@@ -958,7 +964,7 @@ func printFile(context *cli.Context) {
snapshotID = context.String("id")
}
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
@@ -1016,13 +1022,13 @@ func diff(context *cli.Context) {
}
compareByHash := context.Bool("hash")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
loadRSAPrivateKey(context.String("key"), preference, backupManager, false)
backupManager.SetupSnapshotCache(preference.Name)
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile)
backupManager.SnapshotManager.Diff(repository, snapshotID, revisions, path, compareByHash, preference.NobackupFile, preference.FiltersFile)
runScript(context, preference.Name, "post")
}
@@ -1061,7 +1067,7 @@ func showHistory(context *cli.Context) {
revisions := getRevisions(context)
showLocalHash := context.Bool("hash")
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
@@ -1124,7 +1130,7 @@ func pruneSnapshots(context *cli.Context) {
os.Exit(ArgumentExitCode)
}
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, preference.NobackupFile)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password, "", "")
duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name)
@@ -1164,7 +1170,7 @@ func copySnapshots(context *cli.Context) {
sourcePassword = duplicacy.GetPassword(*source, "password", "Enter source storage password:", false, false)
}
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, source.NobackupFile)
sourceManager := duplicacy.CreateBackupManager(source.SnapshotID, sourceStorage, repository, sourcePassword, "", "")
sourceManager.SetupSnapshotCache(source.Name)
duplicacy.SavePassword(*source, "password", sourcePassword)
@@ -1199,7 +1205,7 @@ func copySnapshots(context *cli.Context) {
destinationStorage.SetRateLimits(0, context.Int("upload-limit-rate"))
destinationManager := duplicacy.CreateBackupManager(destination.SnapshotID, destinationStorage, repository,
destinationPassword, destination.NobackupFile)
destinationPassword, "", "")
duplicacy.SavePassword(*destination, "password", destinationPassword)
destinationManager.SetupSnapshotCache(destination.Name)
@@ -1883,6 +1889,11 @@ func main() {
Usage: "use the specified storage instead of the default one",
Argument: "<storage name>",
},
cli.StringFlag{
Name: "filters",
Usage: "specify the path of the filters file containing include/exclude patterns",
Argument: "<file path>",
},
},
Usage: "Change the options for the default or specified storage",
ArgsUsage: " ",
@@ -2048,7 +2059,7 @@ func main() {
app.Name = "duplicacy"
app.HelpName = "duplicacy"
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
app.Version = "2.2.3" + " (" + GitCommit + ")"
app.Version = "2.4.0" + " (" + GitCommit + ")"
// If the program is interrupted, call the RunAtError function.
c := make(chan os.Signal, 1)

View File

@@ -75,7 +75,7 @@ func B2Escape(path string) string {
return strings.Join(components, "/")
}
func NewB2Client(applicationKeyID string, applicationKey string, storageDir string, threads int) *B2Client {
func NewB2Client(applicationKeyID string, applicationKey string, downloadURL string, storageDir string, threads int) *B2Client {
for storageDir != "" && storageDir[0] == '/' {
storageDir = storageDir[1:]
@@ -85,7 +85,7 @@ func NewB2Client(applicationKeyID string, applicationKey string, storageDir stri
storageDir += "/"
}
maximumRetries := 10
maximumRetries := 15
if value, found := os.LookupEnv("DUPLICACY_B2_RETRIES"); found && value != "" {
maximumRetries, _ = strconv.Atoi(value)
LOG_INFO("B2_RETRIES", "Setting maximum retries for B2 to %d", maximumRetries)
@@ -95,6 +95,7 @@ func NewB2Client(applicationKeyID string, applicationKey string, storageDir stri
HTTPClient: http.DefaultClient,
ApplicationKeyID: applicationKeyID,
ApplicationKey: applicationKey,
DownloadURL: downloadURL,
StorageDir: storageDir,
UploadURLs: make([]string, threads),
UploadTokens: make([]string, threads),
@@ -325,7 +326,10 @@ func (client *B2Client) AuthorizeAccount(threadIndex int) (err error, allowed bo
client.AuthorizationToken = output.AuthorizationToken
client.APIURL = output.APIURL
if client.DownloadURL == "" {
client.DownloadURL = output.DownloadURL
}
LOG_INFO("BACKBLAZE_URL", "download URL is: %s", client.DownloadURL)
client.IsAuthorized = true
client.LastAuthorizationTime = time.Now().Unix()
@@ -413,16 +417,16 @@ func (client *B2Client) ListFileNames(threadIndex int, startFileName string, sin
input["prefix"] = client.StorageDir
for {
url := client.getAPIURL() + "/b2api/v1/b2_list_file_names"
apiURL := client.getAPIURL() + "/b2api/v1/b2_list_file_names"
requestHeaders := map[string]string{}
requestMethod := http.MethodPost
var requestInput interface{}
requestInput = input
if includeVersions {
url = client.getAPIURL() + "/b2api/v1/b2_list_file_versions"
apiURL = client.getAPIURL() + "/b2api/v1/b2_list_file_versions"
} else if singleFile {
// handle a single file with no versions as a special case to download the last byte of the file
url = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + startFileName)
apiURL = client.getDownloadURL() + "/file/" + client.BucketName + "/" + B2Escape(client.StorageDir + startFileName)
// requesting byte -1 works for empty files where 0-0 fails with a 416 error
requestHeaders["Range"] = "bytes=-1"
// HEAD request
@@ -432,7 +436,7 @@ func (client *B2Client) ListFileNames(threadIndex int, startFileName string, sin
var readCloser io.ReadCloser
var responseHeader http.Header
var err error
readCloser, responseHeader, _, err = client.call(threadIndex, url, requestMethod, requestHeaders, requestInput)
readCloser, responseHeader, _, err = client.call(threadIndex, apiURL, requestMethod, requestHeaders, requestInput)
if err != nil {
return nil, err
}
@@ -445,7 +449,7 @@ func (client *B2Client) ListFileNames(threadIndex int, startFileName string, sin
if singleFile && !includeVersions {
if responseHeader == nil {
LOG_DEBUG("BACKBLAZE_LIST", "%s did not return headers", url)
LOG_DEBUG("BACKBLAZE_LIST", "%s did not return headers", apiURL)
return []*B2Entry{}, nil
}
requiredHeaders := []string{
@@ -459,11 +463,17 @@ func (client *B2Client) ListFileNames(threadIndex int, startFileName string, sin
}
}
if len(missingKeys) > 0 {
return nil, fmt.Errorf("%s missing headers: %s", url, missingKeys)
return nil, fmt.Errorf("%s missing headers: %s", apiURL, missingKeys)
}
// construct the B2Entry from the response headers of the download request
fileID := responseHeader.Get("x-bz-file-id")
fileName := responseHeader.Get("x-bz-file-name")
unescapedFileName, err := url.QueryUnescape(fileName)
if err == nil {
fileName = unescapedFileName
} else {
LOG_WARN("BACKBLAZE_UNESCAPE", "Failed to unescape the file name %s", fileName)
}
fileAction := "upload"
// byte range that is returned: "bytes #-#/#
rangeString := responseHeader.Get("Content-Range")
@@ -476,10 +486,10 @@ func (client *B2Client) ListFileNames(threadIndex int, startFileName string, sin
// this should only execute if the requested file is empty and the range request didn't result in a Content-Range header
fileSize, _ = strconv.ParseInt(lengthString, 0, 64)
if fileSize != 0 {
return nil, fmt.Errorf("%s returned non-zero file length", url)
return nil, fmt.Errorf("%s returned non-zero file length", apiURL)
}
} else {
return nil, fmt.Errorf("could not parse headers returned by %s", url)
return nil, fmt.Errorf("could not parse headers returned by %s", apiURL)
}
fileUploadTimestamp, _ := strconv.ParseInt(responseHeader.Get("X-Bz-Upload-Timestamp"), 0, 64)

View File

@@ -37,7 +37,7 @@ func createB2ClientForTest(t *testing.T) (*B2Client, string) {
return nil, ""
}
return NewB2Client(b2["account"], b2["key"], b2["directory"], 1), b2["bucket"]
return NewB2Client(b2["account"], b2["key"], "", b2["directory"], 1), b2["bucket"]
}

View File

@@ -15,9 +15,9 @@ type B2Storage struct {
}
// CreateB2Storage creates a B2 storage object.
func CreateB2Storage(accountID string, applicationKey string, bucket string, storageDir string, threads int) (storage *B2Storage, err error) {
func CreateB2Storage(accountID string, applicationKey string, downloadURL string, bucket string, storageDir string, threads int) (storage *B2Storage, err error) {
client := NewB2Client(accountID, applicationKey, storageDir, threads)
client := NewB2Client(accountID, applicationKey, downloadURL, storageDir, threads)
err, _ = client.AuthorizeAccount(0)
if err != nil {
@@ -204,7 +204,6 @@ func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist b
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
filePath = strings.Replace(filePath, " ", "%20", -1)
readCloser, _, err := storage.client.DownloadFile(threadIndex, filePath)
if err != nil {
return err
@@ -218,7 +217,6 @@ func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
filePath = strings.Replace(filePath, " ", "%20", -1)
return storage.client.UploadFile(threadIndex, filePath, content, storage.UploadRateLimit/storage.client.Threads)
}

View File

@@ -35,6 +35,7 @@ type BackupManager struct {
config *Config // contains a number of options
nobackupFile string // don't backup directory when this file name is found
filtersFile string // the path to the filters file
}
func (manager *BackupManager) SetDryRun(dryRun bool) {
@@ -44,7 +45,7 @@ func (manager *BackupManager) SetDryRun(dryRun bool) {
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
// master key which can be nil if encryption is not enabled.
func CreateBackupManager(snapshotID string, storage Storage, top string, password string, nobackupFile string) *BackupManager {
func CreateBackupManager(snapshotID string, storage Storage, top string, password string, nobackupFile string, filtersFile string) *BackupManager {
config, _, err := DownloadConfig(storage, password)
if err != nil {
@@ -67,6 +68,7 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
config: config,
nobackupFile: nobackupFile,
filtersFile: filtersFile,
}
if IsDebugging() {
@@ -198,7 +200,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
defer DeleteShadowCopy()
LOG_INFO("BACKUP_INDEXING", "Indexing %s", top)
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop, manager.nobackupFile)
localSnapshot, skippedDirectories, skippedFiles, err := CreateSnapshotFromDirectory(manager.snapshotID, shadowTop,
manager.nobackupFile, manager.filtersFile)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
return false
@@ -770,7 +773,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision)
manager.SnapshotManager.DownloadSnapshotContents(remoteSnapshot, patterns, true)
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile)
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top, manager.nobackupFile,
manager.filtersFile)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the repository: %v", err)
return false
@@ -1664,6 +1668,8 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
chunks[chunkHash] = true
}
}
snapshot.ChunkHashes = nil
}
otherChunkFiles, otherChunkSizes := otherManager.SnapshotManager.ListAllFiles(otherManager.storage, "chunks/")
@@ -1726,7 +1732,11 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
newChunk := otherManager.config.GetChunk()
newChunk.Reset(true)
newChunk.Write(chunk.GetBytes())
newChunk.encryptionVersion = chunk.encryptionVersion
if chunk.encryptionVersion == ENCRYPTION_VERSION_RSA {
newChunk.encryptionVersion = CHUNK_RSA_ENCRYPTION_ENABLED
} else {
newChunk.encryptionVersion = CHUNK_RSA_ENCRYPTION_DISABLED
}
chunkUploader.StartChunk(newChunk, chunkIndex)
totalCopied++
} else {

View File

@@ -227,11 +227,11 @@ func TestBackupManager(t *testing.T) {
time.Sleep(time.Duration(delay) * time.Second)
if testFixedChunkSize {
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false) {
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "") {
t.Errorf("Failed to initialize the storage")
}
} else {
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false) {
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false, "") {
t.Errorf("Failed to initialize the storage")
}
}
@@ -239,7 +239,7 @@ func TestBackupManager(t *testing.T) {
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager := CreateBackupManager("host1", storage, testDir, password, "")
backupManager := CreateBackupManager("host1", storage, testDir, password, "", "")
backupManager.SetupSnapshotCache("default")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")

View File

@@ -63,14 +63,21 @@ type Chunk struct {
config *Config // Every chunk is associated with a Config object. Which hashing algorithm to use is determined
// by the config
encryptionVersion byte // The version type in the encrytion header
encryptionVersion byte // The version type in the encrytion header; for a chunk to be copied, this field contains
// one of the CHUNK_RSA_ENCRYPTION_* constants to indicate how the new chunk should be encrypted
}
// Magic word to identify a duplicacy format encrypted file, plus a version number.
var ENCRYPTION_HEADER = "duplicacy\000"
// RSA encrypted chunks start with "duplicacy\002"
var ENCRYPTION_VERSION_RSA byte = 2
// These constants are used to control how a new chunk should be encrypted by the copy command
var CHUNK_RSA_ENCRYPTION_DEFAULT byte = 0 // No RSA encryption explicitly requested
var CHUNK_RSA_ENCRYPTION_DISABLED byte = 1 // The RSA encryption should be turned off
var CHUNK_RSA_ENCRYPTION_ENABLED byte = 2 // The RSA encryption should be forced on
// CreateChunk creates a new chunk.
func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
@@ -193,7 +200,10 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
key := encryptionKey
usingRSA := false
if chunk.config.rsaPublicKey != nil && (!isSnapshot || chunk.encryptionVersion == ENCRYPTION_VERSION_RSA) {
// If encryptionVersion is not set, use the default setting (RSA for file chunks only);
// otherwise, enable RSA encryption only when explicitly requested
if chunk.config.rsaPublicKey != nil &&
((!isSnapshot && chunk.encryptionVersion == CHUNK_RSA_ENCRYPTION_DEFAULT) || chunk.encryptionVersion == CHUNK_RSA_ENCRYPTION_ENABLED) {
// If the chunk is not a snpashot chunk, we attempt to encrypt it with the RSA publick key if there is one
randomKey := make([]byte, 32)
_, err := rand.Read(randomKey)

View File

@@ -5,7 +5,6 @@
package duplicacy
import (
"flag"
"bytes"
crypto_rand "crypto/rand"
"crypto/rsa"
@@ -13,14 +12,6 @@ import (
"testing"
)
var testRSAEncryption bool
func init() {
flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption")
flag.Parse()
}
func TestChunk(t *testing.T) {
key := []byte("duplicacydefault")

View File

@@ -490,7 +490,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
}
if entry.IsLink() {
isRegular := false
isRegular, entry.Link, err = Readlink(filepath.Join(top, entry.Path))
isRegular, entry.Link, err = Readlink(joinPath(top, entry.Path))
if err != nil {
LOG_WARN("LIST_LINK", "Failed to read the symlink %s: %v", entry.Path, err)
skippedFiles = append(skippedFiles, entry.Path)
@@ -500,7 +500,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
if isRegular {
entry.Mode ^= uint32(os.ModeSymlink)
} else if path == "" && (filepath.IsAbs(entry.Link) || filepath.HasPrefix(entry.Link, `\\`)) && !strings.HasPrefix(entry.Link, normalizedTop) {
stat, err := os.Stat(filepath.Join(top, entry.Path))
stat, err := os.Stat(joinPath(top, entry.Path))
if err != nil {
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err)
skippedFiles = append(skippedFiles, entry.Path)
@@ -513,6 +513,9 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
// path from f.Name(); note that a "/" is append assuming a symbolic link is always a directory
newEntry.Path = filepath.Join(normalizedPath, f.Name()) + "/"
}
if len(patterns) > 0 && !MatchPath(newEntry.Path, patterns) {
continue
}
entry = newEntry
}
}

View File

@@ -25,6 +25,7 @@ type Preference struct {
DoNotSavePassword bool `json:"no_save_password"`
NobackupFile string `json:"nobackup_file"`
Keys map[string]string `json:"keys"`
FiltersFile string `json:"filters"`
}
var preferencePath string

View File

@@ -91,7 +91,7 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
storageDir: storageDir,
minimumNesting: minimumNesting,
numberOfThreads: threads,
numberOfTries: 6,
numberOfTries: 8,
serverAddress: serverAddress,
sftpConfig: sftpConfig,
}
@@ -129,22 +129,19 @@ func (storage *SFTPStorage) retry(f func () error) error {
delay *= 2
storage.clientLock.Lock()
if storage.client != nil {
storage.client.Close()
storage.client = nil
}
connection, err := ssh.Dial("tcp", storage.serverAddress, storage.sftpConfig)
if err != nil {
LOG_WARN("SFT_RECONNECT", "Failed to connect to %s: %v; retrying", storage.serverAddress, err)
storage.clientLock.Unlock()
return err
continue
}
client, err := sftp.NewClient(connection)
if err != nil {
LOG_WARN("SFT_RECONNECT", "Failed to create a new SFTP client to %s: %v; retrying", storage.serverAddress, err)
connection.Close()
storage.clientLock.Unlock()
return err
continue
}
storage.client = client
storage.clientLock.Unlock()
@@ -275,36 +272,19 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
fullPath := path.Join(storage.storageDir, filePath)
dirs := strings.Split(filePath, "/")
if len(dirs) > 1 {
fullDir := path.Dir(fullPath)
err = storage.retry(func() error {
return storage.retry(func() error {
if len(dirs) > 1 {
_, err := storage.getSFTPClient().Stat(fullDir)
return err
})
if err != nil {
// The error may be caused by a non-existent fullDir, or a broken connection. In either case,
// we just assume it is the former because there isn't a way to tell which is the case.
if os.IsNotExist(err) {
for i := range dirs[1 : len(dirs)-1] {
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
// We don't check the error; just keep going blindly but always store the last err
err = storage.getSFTPClient().Mkdir(subDir)
}
// If there is an error creating the dirs, we check fullDir one more time, because another thread
// may happen to create the same fullDir ahead of this thread
if err != nil {
err = storage.retry(func() error {
_, err := storage.getSFTPClient().Stat(fullDir)
return err
})
if err != nil {
return err
// We don't check the error; just keep going blindly
storage.getSFTPClient().Mkdir(subDir)
}
}
}
}
return storage.retry(func() error {
letters := "abcdefghijklmnopqrstuvwxyz"
suffix := make([]byte, 8)

View File

@@ -13,6 +13,7 @@ import (
"io/ioutil"
"os"
"os/exec"
"regexp"
"strings"
"syscall"
"time"
@@ -123,11 +124,11 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
}
deviceIdRepository, err := GetPathDeviceId(top)
if err != nil {
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: ", top)
LOG_ERROR("VSS_INIT", "Unable to get device ID of path: %s", top)
return top
}
if deviceIdLocal != deviceIdRepository {
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: ", top)
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: %s", top)
return top
}
@@ -145,22 +146,37 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
// Use tmutil to create snapshot
tmutilOutput, err := CommandWithTimeout(timeoutInSeconds, "tmutil", "snapshot")
if err != nil {
LOG_ERROR("VSS_CREATE", "Error while calling tmutil: ", err)
LOG_ERROR("VSS_CREATE", "Error while calling tmutil: %v", err)
return top
}
colonPos := strings.IndexByte(tmutilOutput, ':')
if colonPos < 0 {
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: ", tmutilOutput)
LOG_ERROR("VSS_CREATE", "Snapshot creation failed: %s", tmutilOutput)
return top
}
snapshotDate = strings.TrimSpace(tmutilOutput[colonPos+1:])
tmutilOutput, err = CommandWithTimeout(timeoutInSeconds, "tmutil", "listlocalsnapshots", ".")
if err != nil {
LOG_ERROR("VSS_CREATE", "Error while calling 'tmutil listlocalsnapshots': %v", err)
return top
}
snapshotName := "com.apple.TimeMachine." + snapshotDate
r := regexp.MustCompile(`(?m)^(.+` + snapshotDate + `.*)$`)
snapshotNames := r.FindStringSubmatch(tmutilOutput)
if len(snapshotNames) > 0 {
snapshotName = snapshotNames[0]
} else {
LOG_WARN("VSS_CREATE", "Error while using 'tmutil listlocalsnapshots' to find snapshot name. Will fallback to 'com.apple.TimeMachine.SNAPSHOT_DATE'")
}
// Mount snapshot as readonly and hide from GUI i.e. Finder
_, err = CommandWithTimeout(timeoutInSeconds,
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s=com.apple.TimeMachine."+snapshotDate, "/", snapshotPath)
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s="+snapshotName, "/", snapshotPath)
if err != nil {
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: ", err)
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: %v", err)
return top
}

View File

@@ -58,7 +58,7 @@ func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
// CreateSnapshotFromDirectory creates a snapshot from the local directory 'top'. Only 'Files'
// will be constructed, while 'ChunkHashes' and 'ChunkLengths' can only be populated after uploading.
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (snapshot *Snapshot, skippedDirectories []string,
func CreateSnapshotFromDirectory(id string, top string, nobackupFile string, filtersFile string) (snapshot *Snapshot, skippedDirectories []string,
skippedFiles []string, err error) {
snapshot = &Snapshot{
@@ -69,7 +69,10 @@ func CreateSnapshotFromDirectory(id string, top string, nobackupFile string) (sn
var patterns []string
patterns = ProcessFilters()
if filtersFile == "" {
filtersFile = joinPath(GetDuplicacyPreferencePath(), "filters")
}
patterns = ProcessFilters(filtersFile)
directories := make([]*Entry, 0, 256)
directories = append(directories, CreateEntry("", 0, 0, 0))
@@ -121,8 +124,8 @@ func AppendPattern(patterns []string, new_pattern string) (new_patterns []string
new_patterns = append(patterns, new_pattern)
return new_patterns
}
func ProcessFilters() (patterns []string) {
patterns = ProcessFilterFile(joinPath(GetDuplicacyPreferencePath(), "filters"), make([]string, 0))
func ProcessFilters(filtersFile string) (patterns []string) {
patterns = ProcessFilterFile(filtersFile, make([]string, 0))
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))

View File

@@ -759,8 +759,8 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToCheck []int, tag string, showStatistics bool, showTabular bool,
checkFiles bool, searchFossils bool, resurrect bool) bool {
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
snapshotID, revisionsToCheck, tag, showStatistics, checkFiles, searchFossils, resurrect)
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, showTabular: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
snapshotID, revisionsToCheck, tag, showStatistics, showTabular, checkFiles, searchFossils, resurrect)
snapshotMap := make(map[string][]*Snapshot)
var err error
@@ -790,7 +790,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
chunkSizeMap[chunk] = allSizes[i]
}
if snapshotID == "" || showStatistics {
if snapshotID == "" || showStatistics || showTabular {
snapshotIDs, err := manager.ListSnapshotIDs()
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
@@ -810,7 +810,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
for snapshotID = range snapshotMap {
revisions := revisionsToCheck
if len(revisions) == 0 || showStatistics {
if len(revisions) == 0 || showStatistics || showTabular {
revisions, err = manager.ListSnapshotRevisions(snapshotID)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", snapshotID, err)
@@ -1299,7 +1299,7 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
// Diff compares two snapshots, or two revision of a file if the file argument is given.
func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []int,
filePath string, compareByHash bool, nobackupFile string) bool {
filePath string, compareByHash bool, nobackupFile string, filtersFile string) bool {
LOG_DEBUG("DIFF_PARAMETERS", "top: %s, id: %s, revision: %v, path: %s, compareByHash: %t",
top, snapshotID, revisions, filePath, compareByHash)
@@ -1312,7 +1312,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
if len(revisions) <= 1 {
// Only scan the repository if filePath is not provided
if len(filePath) == 0 {
rightSnapshot, _, _, err = CreateSnapshotFromDirectory(snapshotID, top, nobackupFile)
rightSnapshot, _, _, err = CreateSnapshotFromDirectory(snapshotID, top, nobackupFile, filtersFile)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the directory %s: %v", top, err)
return false

View File

@@ -531,7 +531,25 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
accountID := GetPassword(preference, "b2_id", "Enter Backblaze account or application id:", true, resetPassword)
applicationKey := GetPassword(preference, "b2_key", "Enter corresponding Backblaze application key:", true, resetPassword)
b2Storage, err := CreateB2Storage(accountID, applicationKey, bucket, storageDir, threads)
b2Storage, err := CreateB2Storage(accountID, applicationKey, "", bucket, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "b2_id", accountID)
SavePassword(preference, "b2_key", applicationKey)
return b2Storage
} else if matched[1] == "b2-custom" {
b2customUrlRegex := regexp.MustCompile(`^b2-custom://([^/]+)/([^/]+)(/(.+))?`)
matched := b2customUrlRegex.FindStringSubmatch(storageURL)
downloadURL := "https://" + matched[1]
bucket := matched[2]
storageDir := matched[4]
accountID := GetPassword(preference, "b2_id", "Enter Backblaze account or application id:", true, resetPassword)
applicationKey := GetPassword(preference, "b2_key", "Enter corresponding Backblaze application key:", true, resetPassword)
b2Storage, err := CreateB2Storage(accountID, applicationKey, downloadURL, bucket, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
return nil

View File

@@ -27,6 +27,7 @@ var testRateLimit int
var testQuickMode bool
var testThreads int
var testFixedChunkSize bool
var testRSAEncryption bool
func init() {
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
@@ -34,6 +35,7 @@ func init() {
flag.BoolVar(&testQuickMode, "quick", false, "quick test")
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption")
flag.Parse()
}
@@ -107,7 +109,7 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "b2" {
storage, err := CreateB2Storage(config["account"], config["key"], config["bucket"], config["directory"], threads)
storage, err := CreateB2Storage(config["account"], config["key"], "", config["bucket"], config["directory"], threads)
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
return storage, err
} else if testStorageName == "gcs-s3" {