1
0
mirror of https://github.com/gilbertchen/duplicacy synced 2025-12-16 00:03:34 +00:00

Merge pull request #180 from niknah/dry_run

Added backup --dry-run option.
This commit is contained in:
gilbertchen
2017-09-18 22:32:21 -04:00
committed by GitHub
6 changed files with 41 additions and 20 deletions

View File

@@ -50,6 +50,7 @@ OPTIONS:
-stats show statistics during and after backup -stats show statistics during and after backup
-threads <n> number of uploading threads -threads <n> number of uploading threads
-limit-rate <kB/s> the maximum upload rate (in kilobytes/sec) -limit-rate <kB/s> the maximum upload rate (in kilobytes/sec)
-dry-run dry run for testing, don't backup anything. Use with -stats and -d
-vss enable the Volume Shadow Copy service (Windows only) -vss enable the Volume Shadow Copy service (Windows only)
-storage <storage name> backup to the specified storage instead of the default one -storage <storage name> backup to the specified storage instead of the default one
``` ```

View File

@@ -623,12 +623,14 @@ func backupRepository(context *cli.Context) {
enableVSS := context.Bool("vss") enableVSS := context.Bool("vss")
dryRun := context.Bool("dry-run")
uploadRateLimit := context.Int("limit-rate") uploadRateLimit := context.Int("limit-rate")
storage.SetRateLimits(0, uploadRateLimit) storage.SetRateLimits(0, uploadRateLimit)
backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password) backupManager := duplicacy.CreateBackupManager(preference.SnapshotID, storage, repository, password)
duplicacy.SavePassword(*preference, "password", password) duplicacy.SavePassword(*preference, "password", password)
backupManager.SetupSnapshotCache(preference.Name) backupManager.SetupSnapshotCache(preference.Name)
backupManager.SetDryRun(dryRun)
backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS) backupManager.Backup(repository, quickMode, threads, context.String("t"), showStatistics, enableVSS)
runScript(context, preference.Name, "post") runScript(context, preference.Name, "post")
@@ -1211,6 +1213,10 @@ func main() {
Usage: "the maximum upload rate (in kilobytes/sec)", Usage: "the maximum upload rate (in kilobytes/sec)",
Argument: "<kB/s>", Argument: "<kB/s>",
}, },
cli.BoolFlag {
Name: "dry-run",
Usage: "Dry run for testing, don't backup anything. Use with -stats and -d",
},
cli.BoolFlag { cli.BoolFlag {
Name: "vss", Name: "vss",
Usage: "enable the Volume Shadow Copy service (Windows only)", Usage: "enable the Volume Shadow Copy service (Windows only)",

View File

@@ -36,6 +36,10 @@ type BackupManager struct {
} }
func (manager *BackupManager) SetDryRun(dryRun bool) {
manager.config.dryRun = dryRun
}
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to // CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the // identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
@@ -630,7 +634,9 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
} }
skippedFiles = append(skippedFiles, fileReader.SkippedFiles...) skippedFiles = append(skippedFiles, fileReader.SkippedFiles...)
manager.SnapshotManager.CleanSnapshotCache(localSnapshot, nil) if !manager.config.dryRun {
manager.SnapshotManager.CleanSnapshotCache(localSnapshot, nil)
}
LOG_INFO("BACKUP_END", "Backup for %s at revision %d completed", top, localSnapshot.Revision) LOG_INFO("BACKUP_END", "Backup for %s at revision %d completed", top, localSnapshot.Revision)
RunAtError = func() {} RunAtError = func() {}
@@ -1109,8 +1115,9 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
} }
path := fmt.Sprintf("snapshots/%s/%d", manager.snapshotID, snapshot.Revision) path := fmt.Sprintf("snapshots/%s/%d", manager.snapshotID, snapshot.Revision)
manager.SnapshotManager.UploadFile(path, path, description) if !manager.config.dryRun {
manager.SnapshotManager.UploadFile(path, path, description)
}
return totalSnapshotChunkSize, numberOfNewSnapshotChunks, totalUploadedSnapshotChunkSize, totalUploadedSnapshotChunkBytes return totalSnapshotChunkSize, numberOfNewSnapshotChunks, totalUploadedSnapshotChunkSize, totalUploadedSnapshotChunkBytes
} }

View File

@@ -134,13 +134,17 @@ func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) boo
return false return false
} }
err = uploader.storage.UploadFile(threadIndex, chunkPath, chunk.GetBytes()) if !uploader.config.dryRun {
if err != nil { err = uploader.storage.UploadFile(threadIndex, chunkPath, chunk.GetBytes())
LOG_ERROR("UPLOAD_CHUNK", "Failed to upload the chunk %s: %v", chunkID, err) if err != nil {
return false LOG_ERROR("UPLOAD_CHUNK", "Failed to upload the chunk %s: %v", chunkID, err)
return false
}
LOG_DEBUG("CHUNK_UPLOAD", "Chunk %s has been uploaded", chunkID)
} else {
LOG_DEBUG("CHUNK_UPLOAD", "Uploading was skipped for chunk %s", chunkID)
} }
LOG_DEBUG("CHUNK_UPLOAD", "Chunk %s has been uploaded", chunkID)
uploader.completionFunc(chunk, task.chunkIndex, false, chunkSize, chunk.GetLength()) uploader.completionFunc(chunk, task.chunkIndex, false, chunkSize, chunk.GetLength())
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1) atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
return true return true

View File

@@ -55,6 +55,7 @@ type Config struct {
chunkPool chan *Chunk `json:"-"` chunkPool chan *Chunk `json:"-"`
numberOfChunks int32 numberOfChunks int32
dryRun bool
} }
// Create an alias to avoid recursive calls on Config.MarshalJSON // Create an alias to avoid recursive calls on Config.MarshalJSON

View File

@@ -594,19 +594,21 @@ func (manager *SnapshotManager) ListAllFiles(storage Storage, top string) (allFi
} }
} }
if top == "chunks/" { if !manager.config.dryRun {
// We're listing all chunks so this is the perfect place to detect if a directory contains too many if top == "chunks/" {
// chunks. Create sub-directories if necessary // We're listing all chunks so this is the perfect place to detect if a directory contains too many
if len(files) > 1024 && !storage.IsFastListing() { // chunks. Create sub-directories if necessary
for i := 0; i < 256; i++ { if len(files) > 1024 && !storage.IsFastListing() {
subdir := dir + fmt.Sprintf("%02x\n", i) for i := 0; i < 256; i++ {
manager.storage.CreateDirectory(0, subdir) subdir := dir + fmt.Sprintf("%02x\n", i)
manager.storage.CreateDirectory(0, subdir)
}
}
} else {
// Remove chunk sub-directories that are empty
if len(files) == 0 && strings.HasPrefix(dir, "chunks/") && dir != "chunks/" {
storage.DeleteFile(0, dir)
} }
}
} else {
// Remove chunk sub-directories that are empty
if len(files) == 0 && strings.HasPrefix(dir, "chunks/") && dir != "chunks/" {
storage.DeleteFile(0, dir)
} }
} }
} }