1
0
mirror of https://github.com/gilbertchen/duplicacy synced 2025-12-15 07:43:21 +00:00
This commit is contained in:
Michael Cook
2018-12-29 13:19:32 +01:00
parent 741644b575
commit 0762c448c4
21 changed files with 92 additions and 96 deletions

View File

@@ -7,6 +7,7 @@ package main
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/http"
"os" "os"
"os/exec" "os/exec"
"os/signal" "os/signal"
@@ -16,7 +17,6 @@ import (
"runtime" "runtime"
"strconv" "strconv"
"strings" "strings"
"net/http"
_ "net/http/pprof" _ "net/http/pprof"
@@ -159,8 +159,6 @@ func setGlobalOptions(context *cli.Context) {
}() }()
} }
duplicacy.RunInBackground = context.GlobalBool("background") duplicacy.RunInBackground = context.GlobalBool("background")
} }
@@ -650,7 +648,7 @@ func changePassword(context *cli.Context) {
duplicacy.LOG_INFO("CONFIG_CLEAN", "The local copy of the old config has been removed") duplicacy.LOG_INFO("CONFIG_CLEAN", "The local copy of the old config has been removed")
} }
} }
} () }()
err = storage.DeleteFile(0, "config") err = storage.DeleteFile(0, "config")
if err != nil { if err != nil {
@@ -1262,7 +1260,7 @@ func infoStorage(context *cli.Context) {
for _, dir := range dirs { for _, dir := range dirs {
if len(dir) > 0 && dir[len(dir)-1] == '/' { if len(dir) > 0 && dir[len(dir)-1] == '/' {
duplicacy.LOG_INFO("STORAGE_SNAPSHOT", "%s", dir[0:len(dir) - 1]) duplicacy.LOG_INFO("STORAGE_SNAPSHOT", "%s", dir[0:len(dir)-1])
} }
} }
@@ -1298,7 +1296,7 @@ func benchmark(context *cli.Context) {
} }
threads := downloadThreads threads := downloadThreads
if (threads < uploadThreads) { if threads < uploadThreads {
threads = uploadThreads threads = uploadThreads
} }
@@ -1309,7 +1307,7 @@ func benchmark(context *cli.Context) {
if storage == nil { if storage == nil {
return return
} }
duplicacy.Benchmark(repository, storage, int64(fileSize) * 1000000, chunkSize * 1024 * 1024, chunkCount, uploadThreads, downloadThreads) duplicacy.Benchmark(repository, storage, int64(fileSize)*1000000, chunkSize*1024*1024, chunkCount, uploadThreads, downloadThreads)
} }
func main() { func main() {
@@ -1999,7 +1997,7 @@ func main() {
c := make(chan os.Signal, 1) c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt) signal.Notify(c, os.Interrupt)
go func() { go func() {
for _ = range c { for range c {
duplicacy.RunAtError() duplicacy.RunAtError()
os.Exit(1) os.Exit(1)
} }

View File

@@ -104,7 +104,7 @@ func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files
if dir == "snapshots/" { if dir == "snapshots/" {
for subDir, _ := range subDirs { for subDir := range subDirs {
files = append(files, subDir) files = append(files, subDir)
} }

View File

@@ -385,7 +385,7 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
} }
fileUploadTimestamp, _ := strconv.ParseInt(responseHeader.Get("X-Bz-Upload-Timestamp"), 0, 64) fileUploadTimestamp, _ := strconv.ParseInt(responseHeader.Get("X-Bz-Upload-Timestamp"), 0, 64)
return []*B2Entry{&B2Entry{fileID, fileName, fileAction, fileSize, fileUploadTimestamp}}, nil return []*B2Entry{{fileID, fileName, fileAction, fileSize, fileUploadTimestamp}}, nil
} }
if err = json.NewDecoder(readCloser).Decode(&output); err != nil { if err = json.NewDecoder(readCloser).Decode(&output); err != nil {

View File

@@ -71,7 +71,7 @@ func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string
subDirs[subDir+"/"] = true subDirs[subDir+"/"] = true
} }
for subDir, _ := range subDirs { for subDir := range subDirs {
files = append(files, subDir) files = append(files, subDir)
} }
} else if dir == "chunks" { } else if dir == "chunks" {

View File

@@ -1324,7 +1324,6 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
} }
} }
for i := entry.StartChunk; i <= entry.EndChunk; i++ { for i := entry.StartChunk; i <= entry.EndChunk; i++ {
if _, found := offsetMap[chunkDownloader.taskList[i].chunkHash]; !found { if _, found := offsetMap[chunkDownloader.taskList[i].chunkHash]; !found {
chunkDownloader.taskList[i].needed = true chunkDownloader.taskList[i].needed = true
@@ -1674,7 +1673,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
chunksToCopy := 0 chunksToCopy := 0
chunksToSkip := 0 chunksToSkip := 0
for chunkHash, _ := range chunks { for chunkHash := range chunks {
otherChunkID := otherManager.config.GetChunkIDFromHash(chunkHash) otherChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
if _, found := otherChunks[otherChunkID]; found { if _, found := otherChunks[otherChunkID]; found {
chunksToSkip++ chunksToSkip++
@@ -1704,7 +1703,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
totalSkipped := 0 totalSkipped := 0
chunkIndex := 0 chunkIndex := 0
for chunkHash, _ := range chunks { for chunkHash := range chunks {
chunkIndex++ chunkIndex++
chunkID := manager.config.GetChunkIDFromHash(chunkHash) chunkID := manager.config.GetChunkIDFromHash(chunkHash)
newChunkID := otherManager.config.GetChunkIDFromHash(chunkHash) newChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)

View File

@@ -246,8 +246,8 @@ func TestBackupManager(t *testing.T) {
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false) backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
time.Sleep(time.Duration(delay) * time.Second) time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", threads, /*inPlace=*/false, /*quickMode=*/false, threads, /*overwrite=*/true, backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/ nil) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
for _, f := range []string{"file1", "file2", "dir1/file3"} { for _, f := range []string{"file1", "file2", "dir1/file3"} {
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) { if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
@@ -270,8 +270,8 @@ func TestBackupManager(t *testing.T) {
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false) backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false)
time.Sleep(time.Duration(delay) * time.Second) time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", 2, /*inPlace=*/true, /*quickMode=*/true, threads, /*overwrite=*/true, backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/nil) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
for _, f := range []string{"file1", "file2", "dir1/file3"} { for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f) hash1 := getFileHash(testDir + "/repository1/" + f)
@@ -298,8 +298,8 @@ func TestBackupManager(t *testing.T) {
createRandomFile(testDir+"/repository2/dir5/file5", 100) createRandomFile(testDir+"/repository2/dir5/file5", 100)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true, backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/true, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/nil) /*deleteMode=*/ true /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
for _, f := range []string{"file1", "file2", "dir1/file3"} { for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f) hash1 := getFileHash(testDir + "/repository1/" + f)
@@ -325,8 +325,8 @@ func TestBackupManager(t *testing.T) {
os.Remove(testDir + "/repository1/file2") os.Remove(testDir + "/repository1/file2")
os.Remove(testDir + "/repository1/dir1/file3") os.Remove(testDir + "/repository1/dir1/file3")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy") SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Restore(testDir+"/repository1", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true, backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/[]string{"+file2", "+dir1/file3", "-*"}) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"})
for _, f := range []string{"file1", "file2", "dir1/file3"} { for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f) hash1 := getFileHash(testDir + "/repository1/" + f)

View File

@@ -5,18 +5,18 @@
package duplicacy package duplicacy
import ( import (
"os"
"bytes" "bytes"
"compress/zlib" "compress/zlib"
"crypto/aes" "crypto/aes"
"crypto/cipher" "crypto/cipher"
"crypto/rand"
"crypto/hmac" "crypto/hmac"
"crypto/rand"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
"fmt" "fmt"
"hash" "hash"
"io" "io"
"os"
"runtime" "runtime"
"github.com/bkaradzic/go-lz4" "github.com/bkaradzic/go-lz4"
@@ -250,7 +250,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
// PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes // PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes
// to be the maximum allowed by PKCS7 // to be the maximum allowed by PKCS7
dataLength := encryptedBuffer.Len() - offset dataLength := encryptedBuffer.Len() - offset
paddingLength := 256 - dataLength % 256 paddingLength := 256 - dataLength%256
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength)) encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead())) encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
@@ -342,7 +342,6 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
return err return err
} }
paddingLength := int(decryptedBytes[len(decryptedBytes)-1]) paddingLength := int(decryptedBytes[len(decryptedBytes)-1])
if paddingLength == 0 { if paddingLength == 0 {
paddingLength = 256 paddingLength = 256

View File

@@ -48,7 +48,7 @@ func TestChunk(t *testing.T) {
if remainderLength == -1 { if remainderLength == -1 {
remainderLength = len(encryptedData) % 256 remainderLength = len(encryptedData) % 256
} else if len(encryptedData) % 256 != remainderLength { } else if len(encryptedData)%256 != remainderLength {
t.Errorf("Incorrect padding size") t.Errorf("Incorrect padding size")
} }

View File

@@ -178,7 +178,7 @@ func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
return return
} }
for i, _ := range downloader.completedTasks { for i := range downloader.completedTasks {
if i < chunkIndex && downloader.taskList[i].chunk != nil { if i < chunkIndex && downloader.taskList[i].chunk != nil {
downloader.config.PutChunk(downloader.taskList[i].chunk) downloader.config.PutChunk(downloader.taskList[i].chunk)
downloader.taskList[i].chunk = nil downloader.taskList[i].chunk = nil
@@ -253,7 +253,7 @@ func (downloader *ChunkDownloader) Stop() {
downloader.numberOfDownloadingChunks-- downloader.numberOfDownloadingChunks--
} }
for i, _ := range downloader.completedTasks { for i := range downloader.completedTasks {
downloader.config.PutChunk(downloader.taskList[i].chunk) downloader.config.PutChunk(downloader.taskList[i].chunk)
downloader.taskList[i].chunk = nil downloader.taskList[i].chunk = nil
downloader.numberOfActiveChunks-- downloader.numberOfActiveChunks--

View File

@@ -272,7 +272,7 @@ func (entry *Entry) IsLink() bool {
} }
func (entry *Entry) GetPermissions() os.FileMode { func (entry *Entry) GetPermissions() os.FileMode {
return os.FileMode(entry.Mode)&fileModeMask return os.FileMode(entry.Mode) & fileModeMask
} }
func (entry *Entry) IsSameAs(other *Entry) bool { func (entry *Entry) IsSameAs(other *Entry) bool {
@@ -308,7 +308,7 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setO
} }
// Only set the permission if the file is not a symlink // Only set the permission if the file is not a symlink
if !entry.IsLink() && (*fileInfo).Mode() & fileModeMask != entry.GetPermissions() { if !entry.IsLink() && (*fileInfo).Mode()&fileModeMask != entry.GetPermissions() {
err := os.Chmod(fullPath, entry.GetPermissions()) err := os.Chmod(fullPath, entry.GetPermissions())
if err != nil { if err != nil {
LOG_ERROR("RESTORE_CHMOD", "Failed to set the file permissions: %v", err) LOG_ERROR("RESTORE_CHMOD", "Failed to set the file permissions: %v", err)
@@ -459,7 +459,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
// This binary search works because ioutil.ReadDir returns files sorted by Name() by default // This binary search works because ioutil.ReadDir returns files sorted by Name() by default
if nobackupFile != "" { if nobackupFile != "" {
ii := sort.Search(len(files), func(ii int) bool { return strings.Compare(files[ii].Name(), nobackupFile) >= 0}) ii := sort.Search(len(files), func(ii int) bool { return strings.Compare(files[ii].Name(), nobackupFile) >= 0 })
if ii < len(files) && files[ii].Name() == nobackupFile { if ii < len(files) && files[ii].Name() == nobackupFile {
LOG_DEBUG("LIST_NOBACKUP", "%s is excluded due to nobackup file", path) LOG_DEBUG("LIST_NOBACKUP", "%s is excluded due to nobackup file", path)
return directoryList, skippedFiles, nil return directoryList, skippedFiles, nil

View File

@@ -291,7 +291,7 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, creat
} else if isDir { } else if isDir {
storage.savePathID(current, fileID) storage.savePathID(current, fileID)
} }
if i != len(names) - 1 && !isDir { if i != len(names)-1 && !isDir {
return "", fmt.Errorf("Path '%s' is not a directory", current) return "", fmt.Errorf("Path '%s' is not a directory", current)
} }
} }
@@ -386,8 +386,8 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
subDirs := []string{} subDirs := []string{}
for _, file := range files { for _, file := range files {
storage.savePathID("snapshots/" + file.Name, file.Id) storage.savePathID("snapshots/"+file.Name, file.Id)
subDirs = append(subDirs, file.Name + "/") subDirs = append(subDirs, file.Name+"/")
} }
return subDirs, nil, nil return subDirs, nil, nil
} else if strings.HasPrefix(dir, "snapshots/") || strings.HasPrefix(dir, "benchmark") { } else if strings.HasPrefix(dir, "snapshots/") || strings.HasPrefix(dir, "benchmark") {
@@ -438,8 +438,8 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
files = append(files, name) files = append(files, name)
sizes = append(sizes, entry.Size) sizes = append(sizes, entry.Size)
} else { } else {
parents = append(parents, parent+ "/" + entry.Name) parents = append(parents, parent+"/"+entry.Name)
storage.savePathID(parent + "/" + entry.Name, entry.Id) storage.savePathID(parent+"/"+entry.Name, entry.Id)
} }
} }
} }

View File

@@ -113,7 +113,7 @@ func (storage *HubicStorage) ListFiles(threadIndex int, dir string) ([]string, [
for _, entry := range entries { for _, entry := range entries {
if entry.Type == "application/directory" { if entry.Type == "application/directory" {
files = append(files, entry.Name + "/") files = append(files, entry.Name+"/")
sizes = append(sizes, 0) sizes = append(sizes, 0)
} else { } else {
files = append(files, entry.Name) files = append(files, entry.Name)

View File

@@ -207,7 +207,7 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
if err != nil { if err != nil {
// The error may be caused by a non-existent fullDir, or a broken connection. In either case, // The error may be caused by a non-existent fullDir, or a broken connection. In either case,
// we just assume it is the former because there isn't a way to tell which is the case. // we just assume it is the former because there isn't a way to tell which is the case.
for i, _ := range dirs[1 : len(dirs)-1] { for i := range dirs[1 : len(dirs)-1] {
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...)) subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
// We don't check the error; just keep going blindly but always store the last err // We don't check the error; just keep going blindly but always store the last err
err = storage.client.Mkdir(subDir) err = storage.client.Mkdir(subDir)

View File

@@ -54,7 +54,7 @@ func GetPathDeviceId(path string) (deviceId int32, err error) {
// Executes shell command with timeout and returns stdout // Executes shell command with timeout and returns stdout
func CommandWithTimeout(timeoutInSeconds int, name string, arg ...string) (output string, err error) { func CommandWithTimeout(timeoutInSeconds int, name string, arg ...string) (output string, err error) {
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutInSeconds) * time.Second) ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutInSeconds)*time.Second)
defer cancel() defer cancel()
cmd := exec.CommandContext(ctx, name, arg...) cmd := exec.CommandContext(ctx, name, arg...)
@@ -158,7 +158,7 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
// Mount snapshot as readonly and hide from GUI i.e. Finder // Mount snapshot as readonly and hide from GUI i.e. Finder
_, err = CommandWithTimeout(timeoutInSeconds, _, err = CommandWithTimeout(timeoutInSeconds,
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s=com.apple.TimeMachine." + snapshotDate, "/", snapshotPath) "/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s=com.apple.TimeMachine."+snapshotDate, "/", snapshotPath)
if err != nil { if err != nil {
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: ", err) LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: ", err)
return top return top

View File

@@ -807,7 +807,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
snapshotIDIndex := 0 snapshotIDIndex := 0
totalMissingChunks := 0 totalMissingChunks := 0
for snapshotID, _ = range snapshotMap { for snapshotID = range snapshotMap {
revisions := revisionsToCheck revisions := revisionsToCheck
if len(revisions) == 0 || showStatistics { if len(revisions) == 0 || showStatistics {
@@ -839,7 +839,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
} }
LOG_INFO("SNAPSHOT_CHECK", "Total chunk size is %s in %d chunks", PrettyNumber(totalChunkSize), len(chunkSizeMap)) LOG_INFO("SNAPSHOT_CHECK", "Total chunk size is %s in %d chunks", PrettyNumber(totalChunkSize), len(chunkSizeMap))
for snapshotID, _ = range snapshotMap { for snapshotID = range snapshotMap {
for _, snapshot := range snapshotMap[snapshotID] { for _, snapshot := range snapshotMap[snapshotID] {
@@ -855,7 +855,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
} }
missingChunks := 0 missingChunks := 0
for chunkID, _ := range chunks { for chunkID := range chunks {
_, found := chunkSizeMap[chunkID] _, found := chunkSizeMap[chunkID]
@@ -953,7 +953,7 @@ func (manager *SnapshotManager) ShowStatistics(snapshotMap map[string][]*Snapsho
var totalChunkSize int64 var totalChunkSize int64
var uniqueChunkSize int64 var uniqueChunkSize int64
for chunkID, _ := range chunks { for chunkID := range chunks {
chunkSize := chunkSizeMap[chunkID] chunkSize := chunkSizeMap[chunkID]
totalChunkSize += chunkSize totalChunkSize += chunkSize
if chunkUniqueMap[chunkID] { if chunkUniqueMap[chunkID] {
@@ -971,7 +971,7 @@ func (manager *SnapshotManager) ShowStatistics(snapshotMap map[string][]*Snapsho
var totalChunkSize int64 var totalChunkSize int64
var uniqueChunkSize int64 var uniqueChunkSize int64
for chunkID, _ := range snapshotChunks { for chunkID := range snapshotChunks {
chunkSize := chunkSizeMap[chunkID] chunkSize := chunkSizeMap[chunkID]
totalChunkSize += chunkSize totalChunkSize += chunkSize
@@ -1021,7 +1021,7 @@ func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string][]*
var newChunkCount int64 var newChunkCount int64
var newChunkSize int64 var newChunkSize int64
for chunkID, _ := range chunks { for chunkID := range chunks {
chunkSize := chunkSizeMap[chunkID] chunkSize := chunkSizeMap[chunkID]
totalChunkSize += chunkSize totalChunkSize += chunkSize
totalChunkCount += 1 totalChunkCount += 1
@@ -1049,7 +1049,7 @@ func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string][]*
var uniqueChunkSize int64 var uniqueChunkSize int64
var totalChunkCount int64 var totalChunkCount int64
var uniqueChunkCount int64 var uniqueChunkCount int64
for chunkID, _ := range snapshotChunks { for chunkID := range snapshotChunks {
chunkSize := chunkSizeMap[chunkID] chunkSize := chunkSizeMap[chunkID]
totalChunkSize += chunkSize totalChunkSize += chunkSize
totalChunkCount += 1 totalChunkCount += 1
@@ -2221,7 +2221,7 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s
continue continue
} }
manager.chunkOperator.Resurrect(chunk, chunkDir + file) manager.chunkOperator.Resurrect(chunk, chunkDir+file)
fmt.Fprintf(logFile, "Found referenced fossil %s\n", file) fmt.Fprintf(logFile, "Found referenced fossil %s\n", file)
} else { } else {
@@ -2232,7 +2232,7 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s
} }
if exclusive { if exclusive {
manager.chunkOperator.Delete(chunk, chunkDir + file) manager.chunkOperator.Delete(chunk, chunkDir+file)
} else { } else {
collection.AddFossil(chunkDir + file) collection.AddFossil(chunkDir + file)
LOG_DEBUG("FOSSIL_FIND", "Found unreferenced fossil %s", file) LOG_DEBUG("FOSSIL_FIND", "Found unreferenced fossil %s", file)
@@ -2409,7 +2409,7 @@ func (manager *SnapshotManager) DownloadFile(path string, derivationKey string)
} }
if len(derivationKey) > 64 { if len(derivationKey) > 64 {
derivationKey = derivationKey[len(derivationKey) - 64:] derivationKey = derivationKey[len(derivationKey)-64:]
} }
err = manager.fileChunk.Decrypt(manager.config.FileKey, derivationKey) err = manager.fileChunk.Decrypt(manager.config.FileKey, derivationKey)
@@ -2443,7 +2443,7 @@ func (manager *SnapshotManager) UploadFile(path string, derivationKey string, co
} }
if len(derivationKey) > 64 { if len(derivationKey) > 64 {
derivationKey = derivationKey[len(derivationKey) - 64:] derivationKey = derivationKey[len(derivationKey)-64:]
} }
err := manager.fileChunk.Encrypt(manager.config.FileKey, derivationKey) err := manager.fileChunk.Encrypt(manager.config.FileKey, derivationKey)

View File

@@ -9,12 +9,12 @@ import (
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil"
"os" "os"
"path" "path"
"strings" "strings"
"testing" "testing"
"time" "time"
"io/ioutil"
) )
func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot { func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot {
@@ -500,7 +500,7 @@ func TestPruneWithRetentionPolicyAndTag(t *testing.T) {
t.Logf("Creating 30 snapshots") t.Logf("Creating 30 snapshots")
for i := 0; i < 30; i++ { for i := 0; i < 30; i++ {
tag := "auto" tag := "auto"
if i % 3 == 0 { if i%3 == 0 {
tag = "manual" tag = "manual"
} }
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]}, tag) createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]}, tag)
@@ -615,12 +615,12 @@ func TestPruneNewSnapshots(t *testing.T) {
// Create another snapshot of vm1 that brings back chunkHash1 // Create another snapshot of vm1 that brings back chunkHash1
createTestSnapshot(snapshotManager, "vm1@host1", 3, now-0*day-3600, now-0*day-60, []string{chunkHash1, chunkHash3}, "tag") createTestSnapshot(snapshotManager, "vm1@host1", 3, now-0*day-3600, now-0*day-60, []string{chunkHash1, chunkHash3}, "tag")
// Create another snapshot of vm2 so the fossil collection will be processed by next prune // Create another snapshot of vm2 so the fossil collection will be processed by next prune
createTestSnapshot(snapshotManager, "vm2@host1", 2, now + 3600, now + 3600 * 2, []string{chunkHash4, chunkHash5}, "tag") createTestSnapshot(snapshotManager, "vm2@host1", 2, now+3600, now+3600*2, []string{chunkHash4, chunkHash5}, "tag")
// Now chunkHash1 wil be resurrected // Now chunkHash1 wil be resurrected
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 4, 0) checkTestSnapshots(snapshotManager, 4, 0)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false); snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false)
} }
// A fossil collection left by an aborted prune should be ignored if any supposedly deleted snapshot exists // A fossil collection left by an aborted prune should be ignored if any supposedly deleted snapshot exists
@@ -664,12 +664,12 @@ func TestPruneGhostSnapshots(t *testing.T) {
// Create another snapshot of vm1 so the fossil collection becomes eligible for processing. // Create another snapshot of vm1 so the fossil collection becomes eligible for processing.
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize) chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 3, now - day - 3600, now - day - 60, []string{chunkHash3, chunkHash4}, "tag") createTestSnapshot(snapshotManager, "vm1@host1", 3, now-day-3600, now-day-60, []string{chunkHash3, chunkHash4}, "tag")
// Run the prune again but the fossil collection should be igored, since revision 1 still exists // Run the prune again but the fossil collection should be igored, since revision 1 still exists
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 2) checkTestSnapshots(snapshotManager, 3, 2)
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, true /*searchFossils*/, false); snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, true /*searchFossils*/, false)
// Prune snapshot 1 again // Prune snapshot 1 again
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
@@ -677,11 +677,11 @@ func TestPruneGhostSnapshots(t *testing.T) {
// Create another snapshot // Create another snapshot
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize) chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 4, now + 3600, now + 3600 * 2, []string{chunkHash5, chunkHash5}, "tag") createTestSnapshot(snapshotManager, "vm1@host1", 4, now+3600, now+3600*2, []string{chunkHash5, chunkHash5}, "tag")
checkTestSnapshots(snapshotManager, 3, 2) checkTestSnapshots(snapshotManager, 3, 2)
// Run the prune again and this time the fossil collection will be processed and the fossils removed // Run the prune again and this time the fossil collection will be processed and the fossils removed
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
checkTestSnapshots(snapshotManager, 3, 0) checkTestSnapshots(snapshotManager, 3, 0)
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false); snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false)
} }

View File

@@ -626,7 +626,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
} else if matched[1] == "webdav" || matched[1] == "webdav-http" { } else if matched[1] == "webdav" || matched[1] == "webdav-http" {
server := matched[3] server := matched[3]
username := matched[2] username := matched[2]
username = username[:len(username) - 1] username = username[:len(username)-1]
storageDir := matched[5] storageDir := matched[5]
port := 0 port := 0
useHTTP := matched[1] == "webdav-http" useHTTP := matched[1] == "webdav-http"

View File

@@ -7,10 +7,10 @@ package duplicacy
import ( import (
"fmt" "fmt"
"os" "os"
"path/filepath"
"strings" "strings"
"syscall" "syscall"
"unsafe" "unsafe"
"path/filepath"
) )
type symbolicLinkReparseBuffer struct { type symbolicLinkReparseBuffer struct {

View File

@@ -100,7 +100,7 @@ func (storage *WasabiStorage) MoveFile(
var from_path string var from_path string
// The from path includes the bucket. Take care not to include an empty storageDir // The from path includes the bucket. Take care not to include an empty storageDir
// string as Wasabi's backend will return 404 on URLs with double slashes. // string as Wasabi's backend will return 404 on URLs with double slashes.
if (storage.storageDir == "") { if storage.storageDir == "" {
from_path = fmt.Sprintf("/%s/%s", storage.bucket, from) from_path = fmt.Sprintf("/%s/%s", storage.bucket, from)
} else { } else {
from_path = fmt.Sprintf("/%s/%s/%s", storage.bucket, storage.storageDir, from) from_path = fmt.Sprintf("/%s/%s/%s", storage.bucket, storage.storageDir, from)

View File

@@ -19,9 +19,9 @@ import (
"net/http" "net/http"
//"net/http/httputil" //"net/http/httputil"
"strconv" "strconv"
"strings"
"sync" "sync"
"time" "time"
"strings"
) )
type WebDAVStorage struct { type WebDAVStorage struct {
@@ -325,11 +325,11 @@ func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exi
return false, false, 0, err return false, false, 0, err
} }
if m, exist := properties["/" + storage.storageDir + filePath]; !exist { if m, exist := properties["/"+storage.storageDir+filePath]; !exist {
return false, false, 0, nil return false, false, 0, nil
} else if resourceType, exist := m["resourcetype"]; exist && strings.Contains(resourceType, "collection") { } else if resourceType, exist := m["resourcetype"]; exist && strings.Contains(resourceType, "collection") {
return true, true, 0, nil return true, true, 0, nil
} else if length, exist := m["getcontentlength"]; exist && length != ""{ } else if length, exist := m["getcontentlength"]; exist && length != "" {
value, _ := strconv.Atoi(length) value, _ := strconv.Atoi(length)
return true, false, int64(value), nil return true, false, int64(value), nil
} else { } else {