1
0
mirror of https://github.com/gilbertchen/duplicacy synced 2025-12-06 00:03:38 +00:00

Follow-up changes for the -persist PR

* Restore/check should report an error instead of a success at the end if there
  were any errors and -persist is specified
* Don't compute the file hash before passing the file to the chunk maker; this is
  redundant as the chunk maker will produce the file hash
* Add a LOG_WERROR function to switch between LOG_WARN and LOG_ERROR dynamically
This commit is contained in:
Gilbert Chen
2020-09-18 11:23:35 -04:00
parent eecbb8fa99
commit 16d2c14c5a
6 changed files with 139 additions and 147 deletions

View File

@@ -830,7 +830,11 @@ func restoreRepository(context *cli.Context) {
loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false) loadRSAPrivateKey(context.String("key"), context.String("key-passphrase"), preference, backupManager, false)
backupManager.SetupSnapshotCache(preference.Name) backupManager.SetupSnapshotCache(preference.Name)
backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, setOwner, showStatistics, patterns, persist) failed := backupManager.Restore(repository, revision, true, quickMode, threads, overwrite, deleteMode, setOwner, showStatistics, patterns, persist)
if failed > 0 {
duplicacy.LOG_ERROR("RESTORE_FAIL", "%d file(s) were not restored correctly", failed)
return
}
runScript(context, preference.Name, "post") runScript(context, preference.Name, "post")
} }

View File

@@ -8,7 +8,6 @@ import (
"bytes" "bytes"
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@@ -747,7 +746,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
// the same as 'top'. 'quickMode' will bypass files with unchanged sizes and timestamps. 'deleteMode' will // the same as 'top'. 'quickMode' will bypass files with unchanged sizes and timestamps. 'deleteMode' will
// remove local files that don't exist in the snapshot. 'patterns' is used to include/exclude certain files. // remove local files that don't exist in the snapshot. 'patterns' is used to include/exclude certain files.
func (manager *BackupManager) Restore(top string, revision int, inPlace bool, quickMode bool, threads int, overwrite bool, func (manager *BackupManager) Restore(top string, revision int, inPlace bool, quickMode bool, threads int, overwrite bool,
deleteMode bool, setOwner bool, showStatistics bool, patterns []string, allowFailures bool) bool { deleteMode bool, setOwner bool, showStatistics bool, patterns []string, allowFailures bool) int {
startTime := time.Now().Unix() startTime := time.Now().Unix()
@@ -770,7 +769,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
err = os.Mkdir(top, 0744) err = os.Mkdir(top, 0744)
if err != nil { if err != nil {
LOG_ERROR("RESTORE_MKDIR", "Can't create the directory to be restored: %v", err) LOG_ERROR("RESTORE_MKDIR", "Can't create the directory to be restored: %v", err)
return false return 0
} }
} }
@@ -778,7 +777,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
err = os.Mkdir(path.Join(top, DUPLICACY_DIRECTORY), 0744) err = os.Mkdir(path.Join(top, DUPLICACY_DIRECTORY), 0744)
if err != nil && !os.IsExist(err) { if err != nil && !os.IsExist(err) {
LOG_ERROR("RESTORE_MKDIR", "Failed to create the preference directory: %v", err) LOG_ERROR("RESTORE_MKDIR", "Failed to create the preference directory: %v", err)
return false return 0
} }
remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision) remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision)
@@ -788,7 +787,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
manager.filtersFile) manager.filtersFile)
if err != nil { if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list the repository: %v", err) LOG_ERROR("SNAPSHOT_LIST", "Failed to list the repository: %v", err)
return false return 0
} }
LOG_INFO("RESTORE_START", "Restoring %s to revision %d", top, revision) LOG_INFO("RESTORE_START", "Restoring %s to revision %d", top, revision)
@@ -815,18 +814,11 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
var totalFileSize int64 var totalFileSize int64
var downloadedFileSize int64 var downloadedFileSize int64
var failedFileSize int64 var failedFiles int
var skippedFileSize int64 var skippedFileSize int64
var skippedFiles int64
var downloadedFiles []*Entry var downloadedFiles []*Entry
var skippedFiles []*Entry
// for storing failed files and reason for failure
type FailedEntry struct {
entry *Entry
failReason string
}
var failedFiles []*FailedEntry
i := 0 i := 0
for _, entry := range remoteSnapshot.Files { for _, entry := range remoteSnapshot.Files {
@@ -846,7 +838,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
if quickMode && local.IsSameAs(entry) { if quickMode && local.IsSameAs(entry) {
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", local.Path) LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", local.Path)
skippedFileSize += entry.Size skippedFileSize += entry.Size
skippedFiles = append(skippedFiles, entry) skippedFiles++
skipped = true skipped = true
} }
} }
@@ -876,7 +868,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
err = os.Symlink(entry.Link, fullPath) err = os.Symlink(entry.Link, fullPath)
if err != nil { if err != nil {
LOG_ERROR("RESTORE_SYMLINK", "Can't create symlink %s: %v", entry.Path, err) LOG_ERROR("RESTORE_SYMLINK", "Can't create symlink %s: %v", entry.Path, err)
return false return 0
} }
entry.RestoreMetadata(fullPath, nil, setOwner) entry.RestoreMetadata(fullPath, nil, setOwner)
LOG_TRACE("DOWNLOAD_DONE", "Symlink %s updated", entry.Path) LOG_TRACE("DOWNLOAD_DONE", "Symlink %s updated", entry.Path)
@@ -885,7 +877,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
if err == nil && !stat.IsDir() { if err == nil && !stat.IsDir() {
LOG_ERROR("RESTORE_NOTDIR", "The path %s is not a directory", fullPath) LOG_ERROR("RESTORE_NOTDIR", "The path %s is not a directory", fullPath)
return false return 0
} }
if os.IsNotExist(err) { if os.IsNotExist(err) {
@@ -894,7 +886,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
err = os.MkdirAll(fullPath, 0700) err = os.MkdirAll(fullPath, 0700)
if err != nil && !os.IsExist(err) { if err != nil && !os.IsExist(err) {
LOG_ERROR("RESTORE_MKDIR", "%v", err) LOG_ERROR("RESTORE_MKDIR", "%v", err)
return false return 0
} }
} }
} else { } else {
@@ -929,7 +921,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
if file.IsSameAsFileInfo(stat) { if file.IsSameAsFileInfo(stat) {
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", file.Path) LOG_TRACE("RESTORE_SKIP", "File %s unchanged (by size and timestamp)", file.Path)
skippedFileSize += file.Size skippedFileSize += file.Size
skippedFiles = append(skippedFiles, file) skippedFiles++
continue continue
} }
} }
@@ -937,7 +929,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
if file.Size == 0 && file.IsSameAsFileInfo(stat) { if file.Size == 0 && file.IsSameAsFileInfo(stat) {
LOG_TRACE("RESTORE_SKIP", "File %s unchanged (size 0)", file.Path) LOG_TRACE("RESTORE_SKIP", "File %s unchanged (size 0)", file.Path)
skippedFileSize += file.Size skippedFileSize += file.Size
skippedFiles = append(skippedFiles, file) skippedFiles++
continue continue
} }
} else { } else {
@@ -953,7 +945,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
newFile, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.GetPermissions()) newFile, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.GetPermissions())
if err != nil { if err != nil {
LOG_ERROR("DOWNLOAD_OPEN", "Failed to create empty file: %v", err) LOG_ERROR("DOWNLOAD_OPEN", "Failed to create empty file: %v", err)
return false return 0
} }
newFile.Close() newFile.Close()
@@ -970,9 +962,10 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
downloaded, err := manager.RestoreFile(chunkDownloader, chunkMaker, file, top, inPlace, overwrite, showStatistics, downloaded, err := manager.RestoreFile(chunkDownloader, chunkMaker, file, top, inPlace, overwrite, showStatistics,
totalFileSize, downloadedFileSize, startDownloadingTime, allowFailures) totalFileSize, downloadedFileSize, startDownloadingTime, allowFailures)
if err != nil { if err != nil {
// RestoreFile produced an error // RestoreFile returned an error; if allowFailures is false RestoerFile would error out and not return so here
failedFileSize += file.Size // we just need to show a warning
failedFiles = append(failedFiles, &FailedEntry{file, err.Error()}) failedFiles++
LOG_WARN("DOWNLOAD_FAIL", "Failed to restore %s: %v", file.Path, err)
continue continue
} }
@@ -985,7 +978,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
} else { } else {
// No error, file was skipped // No error, file was skipped
skippedFileSize += file.Size skippedFileSize += file.Size
skippedFiles = append(skippedFiles, file) skippedFiles++
} }
} }
@@ -1010,16 +1003,10 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
for _, file := range downloadedFiles { for _, file := range downloadedFiles {
LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (%d)", file.Path, file.Size) LOG_INFO("DOWNLOAD_DONE", "Downloaded %s (%d)", file.Path, file.Size)
} }
for _, file := range skippedFiles {
LOG_INFO("DOWNLOAD_SKIP", "Skipped %s (%d)", file.Path, file.Size)
}
} }
if len(failedFiles) > 0 { if failedFiles > 0 {
for _, failed := range failedFiles { return failedFiles
file := failed.entry
LOG_WARN("RESTORE_STATS", "Restore failed %s (%d): %s", file.Path, file.Size, failed.failReason)
}
} }
LOG_INFO("RESTORE_END", "Restored %s to revision %d", top, revision) LOG_INFO("RESTORE_END", "Restored %s to revision %d", top, revision)
@@ -1027,13 +1014,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
LOG_INFO("RESTORE_STATS", "Files: %d total, %s bytes", len(fileEntries), PrettySize(totalFileSize)) LOG_INFO("RESTORE_STATS", "Files: %d total, %s bytes", len(fileEntries), PrettySize(totalFileSize))
LOG_INFO("RESTORE_STATS", "Downloaded %d file, %s bytes, %d chunks", LOG_INFO("RESTORE_STATS", "Downloaded %d file, %s bytes, %d chunks",
len(downloadedFiles), PrettySize(downloadedFileSize), chunkDownloader.numberOfDownloadedChunks) len(downloadedFiles), PrettySize(downloadedFileSize), chunkDownloader.numberOfDownloadedChunks)
LOG_INFO("RESTORE_STATS", "Skipped %d file, %s bytes", LOG_INFO("RESTORE_STATS", "Skipped %d file, %s bytes", skippedFiles, PrettySize(skippedFileSize))
len(skippedFiles), PrettySize(skippedFileSize))
LOG_INFO("RESTORE_STATS", "Failed %d file, %s bytes",
len(failedFiles), PrettySize(failedFileSize))
}
if len(failedFiles) > 0 {
LOG_WARN("RESTORE_STATS", "Some files could not be restored")
} }
runningTime := time.Now().Unix() - startTime runningTime := time.Now().Unix() - startTime
@@ -1045,7 +1026,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
chunkDownloader.Stop() chunkDownloader.Stop()
return true return 0
} }
// fileEncoder encodes one file at a time to avoid loading the full json description of the entire file tree // fileEncoder encodes one file at a time to avoid loading the full json description of the entire file tree
@@ -1220,14 +1201,6 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
temporaryPath := path.Join(preferencePath, "temporary") temporaryPath := path.Join(preferencePath, "temporary")
fullPath := joinPath(top, entry.Path) fullPath := joinPath(top, entry.Path)
// Function to call on errors ignored by allowFailures
var onFailure LogFunc
if allowFailures {
onFailure = LOG_WARN
} else {
onFailure = LOG_ERROR
}
defer func() { defer func() {
if existingFile != nil { if existingFile != nil {
existingFile.Close() existingFile.Close()
@@ -1292,45 +1265,6 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
} else { } else {
LOG_TRACE("DOWNLOAD_OPEN", "Can't open the existing file: %v", err) LOG_TRACE("DOWNLOAD_OPEN", "Can't open the existing file: %v", err)
} }
} else {
// File already exists. Read file and hash entire contents. If fileHash == entry.Hash, skip file.
// This is done before additional processing so that any identical files can be skipped regardless of the
// -overwrite option
fileHasher := manager.config.NewFileHasher()
buffer := make([]byte, 64*1024)
for {
n, err := existingFile.Read(buffer)
if err == io.EOF {
break
}
if err != nil {
LOG_ERROR("DOWNLOAD_OPEN", "Failed to read existing file: %v", err)
return false, nil
}
if n > 0 {
fileHasher.Write(buffer[:n])
}
}
fileHash := hex.EncodeToString(fileHasher.Sum(nil))
if fileHash == entry.Hash && fileHash != "" {
LOG_TRACE("DOWNLOAD_SKIP", "File %s unchanged (by hash)", entry.Path)
return false, nil
}
// fileHash != entry.Hash, warn/error depending on -overwrite option
if !overwrite {
var msg string
if allowFailures {
msg = "File %s already exists. Please specify the -overwrite option to overwrite"
} else {
msg = "File %s already exists. Please specify the -overwrite option to continue"
}
onFailure("DOWNLOAD_OVERWRITE", msg, entry.Path) // will exit program here if allowFailures = false
return false, errors.New("file exists")
}
} }
// The key in this map is the number of zeroes. The value is the corresponding hash. // The key in this map is the number of zeroes. The value is the corresponding hash.
@@ -1420,6 +1354,19 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
} }
fileHash = hex.EncodeToString(fileHasher.Sum(nil)) fileHash = hex.EncodeToString(fileHasher.Sum(nil))
if fileHash == entry.Hash && fileHash != "" {
LOG_TRACE("DOWNLOAD_SKIP", "File %s unchanged (by hash)", entry.Path)
return false, nil
}
// fileHash != entry.Hash, warn/error depending on -overwrite option
if !overwrite {
LOG_WERROR(allowFailures, "DOWNLOAD_OVERWRITE",
"File %s already exists. Please specify the -overwrite option to overwrite", entry.Path)
return false, fmt.Errorf("file exists")
}
} else { } else {
// If it is not inplace, we want to reuse any chunks in the existing file regardless their offets, so // If it is not inplace, we want to reuse any chunks in the existing file regardless their offets, so
// we run the chunk maker to split the original file. // we run the chunk maker to split the original file.
@@ -1520,14 +1467,15 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
} }
} else { } else {
chunk := chunkDownloader.WaitForChunk(i) chunk := chunkDownloader.WaitForChunk(i)
if !chunk.isBroken { // only write if chunk downloaded correctly if chunk.isBroken {
_, err = existingFile.Write(chunk.GetBytes()[start:end]) return false, fmt.Errorf("chunk %s is corrupted", manager.config.GetChunkIDFromHash(hash))
if err != nil {
LOG_ERROR("DOWNLOAD_WRITE", "Failed to write to the file: %v", err)
return false, nil
}
hasher.Write(chunk.GetBytes()[start:end])
} }
_, err = existingFile.Write(chunk.GetBytes()[start:end])
if err != nil {
LOG_ERROR("DOWNLOAD_WRITE", "Failed to write to the file: %v", err)
return false, nil
}
hasher.Write(chunk.GetBytes()[start:end])
} }
offset += int64(end - start) offset += int64(end - start)
@@ -1542,9 +1490,9 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
// Verify the download by hash // Verify the download by hash
hash := hex.EncodeToString(hasher.Sum(nil)) hash := hex.EncodeToString(hasher.Sum(nil))
if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") { if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") {
onFailure("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s (in-place)", LOG_WERROR(allowFailures, "DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s (in-place)",
fullPath, "", entry.Hash) fullPath, "", entry.Hash)
return false, errors.New("file corrupt (hash mismatch)") return false, fmt.Errorf("file corrupt (hash mismatch)")
} }
} else { } else {
@@ -1591,18 +1539,19 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
if !hasLocalCopy { if !hasLocalCopy {
chunk := chunkDownloader.WaitForChunk(i) chunk := chunkDownloader.WaitForChunk(i)
// If the chunk was downloaded from the storage, we may still need a portion of it. if chunk.isBroken {
if !chunk.isBroken { // only get data if chunk downloaded correctly return false, fmt.Errorf("chunk %s is corrupted", manager.config.GetChunkIDFromHash(hash))
start := 0
if i == entry.StartChunk {
start = entry.StartOffset
}
end := chunk.GetLength()
if i == entry.EndChunk {
end = entry.EndOffset
}
data = chunk.GetBytes()[start:end]
} }
// If the chunk was downloaded from the storage, we may still need a portion of it.
start := 0
if i == entry.StartChunk {
start = entry.StartOffset
}
end := chunk.GetLength()
if i == entry.EndChunk {
end = entry.EndOffset
}
data = chunk.GetBytes()[start:end]
} }
_, err = newFile.Write(data) _, err = newFile.Write(data)
@@ -1617,9 +1566,9 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
hash := hex.EncodeToString(hasher.Sum(nil)) hash := hex.EncodeToString(hasher.Sum(nil))
if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") { if hash != entry.Hash && hash != "" && entry.Hash != "" && !strings.HasPrefix(entry.Hash, "#") {
onFailure("DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s", LOG_WERROR(allowFailures, "DOWNLOAD_HASH", "File %s has a mismatched hash: %s instead of %s",
entry.Path, hash, entry.Hash) entry.Path, hash, entry.Hash)
return false, errors.New("file corrupt (hash mismatch)") return false, fmt.Errorf("file corrupt (hash mismatch)")
} }
if existingFile != nil { if existingFile != nil {

View File

@@ -169,6 +169,12 @@ func getFileHash(path string) (hash string) {
return hex.EncodeToString(hasher.Sum(nil)) return hex.EncodeToString(hasher.Sum(nil))
} }
func assertRestoreFailures(t *testing.T, failedFiles int, expectedFailedFiles int) {
if failedFiles != expectedFailedFiles {
t.Errorf("Failed to restore %d instead of %d file(s)", failedFiles, expectedFailedFiles)
}
}
func TestBackupManager(t *testing.T) { func TestBackupManager(t *testing.T) {
rand.Seed(time.Now().UnixNano()) rand.Seed(time.Now().UnixNano())
@@ -246,8 +252,9 @@ func TestBackupManager(t *testing.T) {
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false) backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
time.Sleep(time.Duration(delay) * time.Second) time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true, failedFiles := backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
assertRestoreFailures(t, failedFiles, 0)
for _, f := range []string{"file1", "file2", "dir1/file3"} { for _, f := range []string{"file1", "file2", "dir1/file3"} {
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) { if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
@@ -270,8 +277,9 @@ func TestBackupManager(t *testing.T) {
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false) backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false)
time.Sleep(time.Duration(delay) * time.Second) time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true, failedFiles = backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
assertRestoreFailures(t, failedFiles, 0)
for _, f := range []string{"file1", "file2", "dir1/file3"} { for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f) hash1 := getFileHash(testDir + "/repository1/" + f)
@@ -298,8 +306,9 @@ func TestBackupManager(t *testing.T) {
createRandomFile(testDir+"/repository2/dir5/file5", 100) createRandomFile(testDir+"/repository2/dir5/file5", 100)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true, failedFiles = backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ true /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false) /*deleteMode=*/ true /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
assertRestoreFailures(t, failedFiles, 0)
for _, f := range []string{"file1", "file2", "dir1/file3"} { for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f) hash1 := getFileHash(testDir + "/repository1/" + f)
@@ -325,8 +334,9 @@ func TestBackupManager(t *testing.T) {
os.Remove(testDir + "/repository1/file2") os.Remove(testDir + "/repository1/file2")
os.Remove(testDir + "/repository1/dir1/file3") os.Remove(testDir + "/repository1/dir1/file3")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy") SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true, failedFiles = backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"} /*allowFailures=*/, false) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"} /*allowFailures=*/, false)
assertRestoreFailures(t, failedFiles, 0)
for _, f := range []string{"file1", "file2", "dir1/file3"} { for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f) hash1 := getFileHash(testDir + "/repository1/" + f)
@@ -425,7 +435,7 @@ func corruptFile(path string, start int, length int, seed int64) {
} }
} }
func TestBackupManagerPersist(t *testing.T) { func TestPersistRestore(t *testing.T) {
// We want deterministic output here so we can test the expected files are corrupted by missing or corrupt chunks // We want deterministic output here so we can test the expected files are corrupted by missing or corrupt chunks
// There use rand functions with fixed seed, and known keys // There use rand functions with fixed seed, and known keys
@@ -600,8 +610,9 @@ func TestBackupManagerPersist(t *testing.T) {
// test restore all uncorrupted to repository3 // test restore all uncorrupted to repository3
SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy") SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy")
unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false, failedFiles := unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
assertRestoreFailures(t, failedFiles, 0)
checkAllUncorrupted("/repository3") checkAllUncorrupted("/repository3")
// test for corrupt files and -persist // test for corrupt files and -persist
@@ -636,8 +647,9 @@ func TestBackupManagerPersist(t *testing.T) {
// test restore corrupted, inPlace = true, corrupted files will have hash failures // test restore corrupted, inPlace = true, corrupted files will have hash failures
os.RemoveAll(testDir+"/repository2") os.RemoveAll(testDir+"/repository2")
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false, failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
assertRestoreFailures(t, failedFiles, 1)
// check restore, expect file1 to be corrupted // check restore, expect file1 to be corrupted
checkCorruptedFile("/repository2", "file1") checkCorruptedFile("/repository2", "file1")
@@ -645,8 +657,9 @@ func TestBackupManagerPersist(t *testing.T) {
os.RemoveAll(testDir+"/repository2") os.RemoveAll(testDir+"/repository2")
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false, failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
assertRestoreFailures(t, failedFiles, 1)
// check restore, expect file3 to be corrupted // check restore, expect file3 to be corrupted
checkCorruptedFile("/repository2", "dir1/file3") checkCorruptedFile("/repository2", "dir1/file3")
@@ -655,8 +668,9 @@ func TestBackupManagerPersist(t *testing.T) {
// test restore corrupted, inPlace = false, corrupted files will be missing // test restore corrupted, inPlace = false, corrupted files will be missing
os.RemoveAll(testDir+"/repository2") os.RemoveAll(testDir+"/repository2")
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false, failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
assertRestoreFailures(t, failedFiles, 1)
// check restore, expect file1 to be corrupted // check restore, expect file1 to be corrupted
checkMissingFile("/repository2", "file1") checkMissingFile("/repository2", "file1")
@@ -664,8 +678,9 @@ func TestBackupManagerPersist(t *testing.T) {
os.RemoveAll(testDir+"/repository2") os.RemoveAll(testDir+"/repository2")
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy") SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false, failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, false,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
assertRestoreFailures(t, failedFiles, 1)
// check restore, expect file3 to be corrupted // check restore, expect file3 to be corrupted
checkMissingFile("/repository2", "dir1/file3") checkMissingFile("/repository2", "dir1/file3")
@@ -674,21 +689,26 @@ func TestBackupManagerPersist(t *testing.T) {
// with overwrite=true, corrupted file1 from unenc will be restored correctly from enc // with overwrite=true, corrupted file1 from unenc will be restored correctly from enc
// the latter will not touch the existing file3 with correct hash // the latter will not touch the existing file3 with correct hash
os.RemoveAll(testDir+"/repository2") os.RemoveAll(testDir+"/repository2")
unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false, failedFiles = unencBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, false,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true, assertRestoreFailures(t, failedFiles, 1)
failedFiles = encBackupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
assertRestoreFailures(t, failedFiles, 0)
checkAllUncorrupted("/repository2") checkAllUncorrupted("/repository2")
// restore to repository3, with overwrite and allowFailures (true/false), quickMode = false (use hashes) // restore to repository3, with overwrite and allowFailures (true/false), quickMode = false (use hashes)
// should always succeed as uncorrupted files already exist with correct hash, so these will be ignored // should always succeed as uncorrupted files already exist with correct hash, so these will be ignored
SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy") SetDuplicacyPreferencePath(testDir + "/repository3/.duplicacy")
unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true, failedFiles = unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, false)
assertRestoreFailures(t, failedFiles, 0)
checkAllUncorrupted("/repository3") checkAllUncorrupted("/repository3")
unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true, failedFiles = unencBackupManager.Restore(testDir+"/repository3", threads /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true) /*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil /*allowFailures=*/, true)
assertRestoreFailures(t, failedFiles, 0)
checkAllUncorrupted("/repository3") checkAllUncorrupted("/repository3")
} }

View File

@@ -52,6 +52,8 @@ type ChunkDownloader struct {
numberOfDownloadedChunks int // The number of chunks that have been downloaded numberOfDownloadedChunks int // The number of chunks that have been downloaded
numberOfDownloadingChunks int // The number of chunks still being downloaded numberOfDownloadingChunks int // The number of chunks still being downloaded
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
NumberOfFailedChunks int // The number of chunks that can't be downloaded
} }
func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int, allowFailures bool) *ChunkDownloader { func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int, allowFailures bool) *ChunkDownloader {
@@ -279,6 +281,9 @@ func (downloader *ChunkDownloader) WaitForCompletion() {
downloader.numberOfActiveChunks-- downloader.numberOfActiveChunks--
downloader.numberOfDownloadedChunks++ downloader.numberOfDownloadedChunks++
downloader.numberOfDownloadingChunks-- downloader.numberOfDownloadingChunks--
if completion.chunk.isBroken {
downloader.NumberOfFailedChunks++
}
} }
// Pass the tasks one by one to the download queue // Pass the tasks one by one to the download queue
@@ -305,7 +310,10 @@ func (downloader *ChunkDownloader) Stop() {
downloader.taskList[completion.chunkIndex].chunk = completion.chunk downloader.taskList[completion.chunkIndex].chunk = completion.chunk
downloader.numberOfDownloadedChunks++ downloader.numberOfDownloadedChunks++
downloader.numberOfDownloadingChunks-- downloader.numberOfDownloadingChunks--
} if completion.chunk.isBroken {
downloader.NumberOfFailedChunks++
}
}
for i := range downloader.completedTasks { for i := range downloader.completedTasks {
downloader.config.PutChunk(downloader.taskList[i].chunk) downloader.config.PutChunk(downloader.taskList[i].chunk)
@@ -359,17 +367,11 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
// will be set up before the encryption // will be set up before the encryption
chunk.Reset(false) chunk.Reset(false)
// This lambda function allows different handling of failures depending on allowFailures state // If failures are allowed, complete the task properly
onFailure := func(failureFn LogFunc, logID string, format string, v ...interface{}) { completeFailedChunk := func(chunk *Chunk) {
if downloader.allowFailures { if downloader.allowFailures {
// Allowing failures: Convert message to warning, mark chunk isBroken = true and complete goroutine
LOG_WARN(logID, format, v...)
chunk.isBroken = true chunk.isBroken = true
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex} downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
} else {
// Process failure as normal
failureFn(logID, format, v...)
} }
} }
@@ -379,7 +381,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
// Find the chunk by ID first. // Find the chunk by ID first.
chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false) chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false)
if err != nil { if err != nil {
onFailure(LOG_ERROR, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err) completeFailedChunk(chunk)
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return false return false
} }
@@ -387,7 +390,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
// No chunk is found. Have to find it in the fossil pool again. // No chunk is found. Have to find it in the fossil pool again.
fossilPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, true) fossilPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, true)
if err != nil { if err != nil {
onFailure(LOG_ERROR, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err) completeFailedChunk(chunk)
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return false return false
} }
@@ -409,11 +413,12 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
continue continue
} }
completeFailedChunk(chunk)
// A chunk is not found. This is a serious error and hopefully it will never happen. // A chunk is not found. This is a serious error and hopefully it will never happen.
if err != nil { if err != nil {
onFailure(LOG_FATAL, "DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err) LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
} else { } else {
onFailure(LOG_FATAL, "DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID) LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
} }
return false return false
} }
@@ -422,7 +427,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
// downloading again. // downloading again.
err = downloader.storage.MoveFile(threadIndex, fossilPath, chunkPath) err = downloader.storage.MoveFile(threadIndex, fossilPath, chunkPath)
if err != nil { if err != nil {
onFailure(LOG_FATAL, "DOWNLOAD_CHUNK", "Failed to resurrect chunk %s: %v", chunkID, err) completeFailedChunk(chunk)
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to resurrect chunk %s: %v", chunkID, err)
return false return false
} }
@@ -439,7 +445,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
chunk.Reset(false) chunk.Reset(false)
continue continue
} else { } else {
onFailure(LOG_ERROR, "DOWNLOAD_CHUNK", "Failed to download the chunk %s: %v", chunkID, err) completeFailedChunk(chunk)
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CHUNK", "Failed to download the chunk %s: %v", chunkID, err)
return false return false
} }
} }
@@ -451,7 +458,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
chunk.Reset(false) chunk.Reset(false)
continue continue
} else { } else {
onFailure(LOG_ERROR, "DOWNLOAD_DECRYPT", "Failed to decrypt the chunk %s: %v", chunkID, err) completeFailedChunk(chunk)
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_DECRYPT", "Failed to decrypt the chunk %s: %v", chunkID, err)
return false return false
} }
} }
@@ -463,7 +471,8 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
chunk.Reset(false) chunk.Reset(false)
continue continue
} else { } else {
onFailure(LOG_FATAL, "DOWNLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID) completeFailedChunk(chunk)
LOG_WERROR(downloader.allowFailures, "DOWNLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
return false return false
} }
} }

View File

@@ -87,9 +87,6 @@ func SetLoggingLevel(level int) {
loggingLevel = level loggingLevel = level
} }
// log function type for passing as a parameter to functions
type LogFunc func(logID string, format string, v ...interface{})
func LOG_DEBUG(logID string, format string, v ...interface{}) { func LOG_DEBUG(logID string, format string, v ...interface{}) {
logf(DEBUG, logID, format, v...) logf(DEBUG, logID, format, v...)
} }
@@ -110,6 +107,15 @@ func LOG_ERROR(logID string, format string, v ...interface{}) {
logf(ERROR, logID, format, v...) logf(ERROR, logID, format, v...)
} }
func LOG_WERROR(isWarning bool, logID string, format string, v ...interface{}) {
if isWarning {
logf(WARN, logID, format, v...)
} else {
logf(ERROR, logID, format, v...)
}
}
func LOG_FATAL(logID string, format string, v ...interface{}) { func LOG_FATAL(logID string, format string, v ...interface{}) {
logf(FATAL, logID, format, v...) logf(FATAL, logID, format, v...)
} }

View File

@@ -851,6 +851,10 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
} }
} }
for chunk, size := range chunkSizeMap {
LOG_INFO("debug", "Chunk %s: %d", chunk, size)
}
if snapshotID == "" || showStatistics || showTabular { if snapshotID == "" || showStatistics || showTabular {
snapshotIDs, err := manager.ListSnapshotIDs() snapshotIDs, err := manager.ListSnapshotIDs()
if err != nil { if err != nil {
@@ -1024,8 +1028,8 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
manager.chunkDownloader.AddChunk(chunkHash) manager.chunkDownloader.AddChunk(chunkHash)
} }
manager.chunkDownloader.WaitForCompletion() manager.chunkDownloader.WaitForCompletion()
if allowFailures { if manager.chunkDownloader.NumberOfFailedChunks > 0 {
LOG_INFO("SNAPSHOT_VERIFY", "All %d chunks have been verified, see above for any errors", len(*allChunkHashes)) LOG_ERROR("SNAPSHOT_VERIFY", "%d out of %d chunks are corrupted", manager.chunkDownloader.NumberOfFailedChunks, len(*allChunkHashes))
} else { } else {
LOG_INFO("SNAPSHOT_VERIFY", "All %d chunks have been successfully verified", len(*allChunkHashes)) LOG_INFO("SNAPSHOT_VERIFY", "All %d chunks have been successfully verified", len(*allChunkHashes))
} }