diff --git a/duplicacy/duplicacy_main.go b/duplicacy/duplicacy_main.go index c0f6897..8ccc65a 100644 --- a/duplicacy/duplicacy_main.go +++ b/duplicacy/duplicacy_main.go @@ -1066,12 +1066,17 @@ func pruneSnapshots(context *cli.Context) { os.Exit(ArgumentExitCode) } + threads := context.Int("threads") + if threads < 1 { + threads = 1 + } + repository, preference := getRepositoryPreference(context, "") runScript(context, preference.Name, "pre") duplicacy.LOG_INFO("STORAGE_SET", "Storage set to %s", preference.StorageURL) - storage := duplicacy.CreateStorage(*preference, false, 1) + storage := duplicacy.CreateStorage(*preference, false, threads) if storage == nil { return } @@ -1110,7 +1115,7 @@ func pruneSnapshots(context *cli.Context) { backupManager.SetupSnapshotCache(preference.Name) backupManager.SnapshotManager.PruneSnapshots(selfID, snapshotID, revisions, tags, retentions, - exhaustive, exclusive, ignoredIDs, dryRun, deleteOnly, collectOnly) + exhaustive, exclusive, ignoredIDs, dryRun, deleteOnly, collectOnly, threads) runScript(context, preference.Name, "post") } @@ -1658,6 +1663,12 @@ func main() { Usage: "prune snapshots from the specified storage", Argument: "", }, + cli.IntFlag{ + Name: "threads", + Value: 1, + Usage: "number of threads used to prune unreferenced chunks", + Argument: "", + }, }, Usage: "Prune snapshots by revision, tag, or retention policy", ArgsUsage: " ", diff --git a/src/duplicacy_backupmanager_test.go b/src/duplicacy_backupmanager_test.go index e960ceb..dce5f6c 100644 --- a/src/duplicacy_backupmanager_test.go +++ b/src/duplicacy_backupmanager_test.go @@ -343,7 +343,7 @@ func TestBackupManager(t *testing.T) { backupManager.SnapshotManager.CheckSnapshots( /*snapshotID*/ "host1" /*revisions*/, []int{1, 2, 3} /*tag*/, "", /*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false) backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, []int{1} /*tags*/, nil /*retentions*/, nil, - /*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false) + /*exhaustive*/ false /*exclusive=*/, false /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1) numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false) if numberOfSnapshots != 2 { t.Errorf("Expected 2 snapshots but got %d", numberOfSnapshots) @@ -352,7 +352,7 @@ func TestBackupManager(t *testing.T) { /*showStatistics*/ false /*showTabular*/, false /*checkFiles*/, false /*searchFossils*/, false /*resurrect*/, false) backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "fourth", false, false, 0, false) backupManager.SnapshotManager.PruneSnapshots("host1", "host1" /*revisions*/, nil /*tags*/, nil /*retentions*/, nil, - /*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false) + /*exhaustive*/ false /*exclusive=*/, true /*ignoredIDs*/, nil /*dryRun*/, false /*deleteOnly*/, false /*collectOnly*/, false, 1) numberOfSnapshots = backupManager.SnapshotManager.ListSnapshots( /*snapshotID*/ "host1" /*revisionsToList*/, nil /*tag*/, "" /*showFiles*/, false /*showChunks*/, false) if numberOfSnapshots != 3 { t.Errorf("Expected 3 snapshots but got %d", numberOfSnapshots) diff --git a/src/duplicacy_chunkoperator.go b/src/duplicacy_chunkoperator.go new file mode 100644 index 0000000..3b20f60 --- /dev/null +++ b/src/duplicacy_chunkoperator.go @@ -0,0 +1,187 @@ +// Copyright (c) Acrosync LLC. All rights reserved. +// Free for personal use and commercial trial +// Commercial use requires per-user licenses available from https://duplicacy.com + +package duplicacy + +import ( + "sync" + "sync/atomic" + "time" +) + +// These are operations that ChunkOperator will perform. +const ( + ChunkOperationFind = 0 + ChunkOperationDelete = 1 + ChunkOperationFossilize = 2 + ChunkOperationResurrect = 3 +) + +// ChunkOperatorTask is used to pass paramaters for different kinds of chunk operations. +type ChunkOperatorTask struct { + operation int // The type of operation + chunkID string // The chunk id + filePath string // The path of the chunk file; it may be empty +} + +// ChunkOperator is capable of performing multi-threaded operations on chunks. +type ChunkOperator struct { + storage Storage // This storage + threads int // Number of threads + taskQueue chan ChunkOperatorTask // Operating goroutines are waiting on this channel for input + stopChannel chan bool // Used to stop all the goroutines + numberOfActiveTasks int64 // The number of chunks that are being operated on + + fossils []string // For fossilize operation, the paths of the fossils are stored in this slice + fossilsLock *sync.Mutex // The lock for 'fossils' +} + +// CreateChunkOperator creates a new ChunkOperator. +func CreateChunkOperator(storage Storage, threads int) *ChunkOperator { + operator := &ChunkOperator{ + storage: storage, + threads: threads, + + taskQueue: make(chan ChunkOperatorTask, threads*4), + stopChannel: make(chan bool), + + fossils: make([]string, 0), + fossilsLock: &sync.Mutex{}, + } + + // Start the operator goroutines + for i := 0; i < operator.threads; i++ { + go func(threadIndex int) { + defer CatchLogException() + for { + select { + case task := <-operator.taskQueue: + operator.Run(threadIndex, task) + case <-operator.stopChannel: + return + } + } + }(i) + } + + return operator +} + +func (operator *ChunkOperator) Stop() { + for atomic.LoadInt64(&operator.numberOfActiveTasks) > 0 { + time.Sleep(100 * time.Millisecond) + } + for i := 0; i < operator.threads; i++ { + operator.stopChannel <- false + } +} + +func (operator *ChunkOperator) AddTask(operation int, chunkID string, filePath string) { + + task := ChunkOperatorTask{ + operation: operation, + chunkID: chunkID, + filePath: filePath, + } + operator.taskQueue <- task + atomic.AddInt64(&operator.numberOfActiveTasks, int64(1)) +} + +func (operator *ChunkOperator) Find(chunkID string) { + operator.AddTask(ChunkOperationFind, chunkID, "") +} + +func (operator *ChunkOperator) Delete(chunkID string, filePath string) { + operator.AddTask(ChunkOperationDelete, chunkID, filePath) +} + +func (operator *ChunkOperator) Fossilize(chunkID string, filePath string) { + operator.AddTask(ChunkOperationFossilize, chunkID, filePath) +} + +func (operator *ChunkOperator) Resurrect(chunkID string, filePath string) { + operator.AddTask(ChunkOperationResurrect, chunkID, filePath) +} + +func (operator *ChunkOperator) Run(threadIndex int, task ChunkOperatorTask) { + defer func() { + atomic.AddInt64(&operator.numberOfActiveTasks, int64(-1)) + }() + + // task.filePath may be empty. If so, find the chunk first. + if task.operation == ChunkOperationDelete || task.operation == ChunkOperationFossilize { + if task.filePath == "" { + filePath, exist, _, err := operator.storage.FindChunk(threadIndex, task.chunkID, false) + if err != nil { + LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", task.chunkID, err) + return + } else if !exist { + LOG_ERROR("CHUNK_FIND", "Chunk %s does not exist in the storage", task.chunkID) + return + } + task.filePath = filePath + } + } + + if task.operation == ChunkOperationFind { + _, exist, _, err := operator.storage.FindChunk(threadIndex, task.chunkID, false) + if err != nil { + LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", task.chunkID, err) + } else if !exist { + LOG_ERROR("CHUNK_FIND", "Chunk %s does not exist in the storage", task.chunkID) + } else { + LOG_DEBUG("CHUNK_FIND", "Chunk %s exists in the storage", task.chunkID) + } + } else if task.operation == ChunkOperationDelete { + // In exclusive mode, we assume no other restore operation is running concurrently. + err := operator.storage.DeleteFile(threadIndex, task.filePath) + if err != nil { + LOG_WARN("CHUNK_DELETE", "Failed to remove the file %s: %v", task.filePath, err) + } else { + if task.chunkID != "" { + LOG_INFO("CHUNK_DELETE", "The chunk %s has been permanently removed", task.chunkID) + } else { + LOG_INFO("CHUNK_DELETE", "Deleted file %s from the storage", task.filePath) + } + } + } else if task.operation == ChunkOperationFossilize { + + fossilPath := task.filePath + ".fsl" + + err := operator.storage.MoveFile(threadIndex, task.filePath, fossilPath) + if err != nil { + if _, exist, _, _ := operator.storage.FindChunk(threadIndex, task.chunkID, true); exist { + err := operator.storage.DeleteFile(threadIndex, task.filePath) + if err == nil { + LOG_TRACE("CHUNK_DELETE", "Deleted chunk file %s as the fossil already exists", task.chunkID) + } + } else { + LOG_ERROR("CHUNK_DELETE", "Failed to fossilize the chunk %s: %v", task.chunkID, err) + } + } else { + LOG_TRACE("CHUNK_FOSSILIZE", "Fossilized chunk %s", task.chunkID) + operator.fossilsLock.Lock() + operator.fossils = append(operator.fossils, fossilPath) + operator.fossilsLock.Unlock() + } + } else if task.operation == ChunkOperationResurrect { + chunkPath, exist, _, err := operator.storage.FindChunk(threadIndex, task.chunkID, false) + if err != nil { + LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", task.chunkID, err) + } + + if exist { + operator.storage.DeleteFile(threadIndex, task.filePath) + LOG_INFO("FOSSIL_RESURRECT", "The chunk %s already exists", task.chunkID) + } else { + err := operator.storage.MoveFile(threadIndex, task.filePath, chunkPath) + if err != nil { + LOG_ERROR("FOSSIL_RESURRECT", "Failed to resurrect the chunk %s from the fossil %s: %v", + task.chunkID, task.filePath, err) + } else { + LOG_INFO("FOSSIL_RESURRECT", "The chunk %s has been resurrected", task.filePath) + } + } + } +} diff --git a/src/duplicacy_snapshotmanager.go b/src/duplicacy_snapshotmanager.go index f072092..f664bba 100644 --- a/src/duplicacy_snapshotmanager.go +++ b/src/duplicacy_snapshotmanager.go @@ -175,6 +175,7 @@ type SnapshotManager struct { snapshotCache *FileStorage chunkDownloader *ChunkDownloader + chunkOperator *ChunkOperator } // CreateSnapshotManager creates a snapshot manager @@ -1503,36 +1504,11 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis } // fossilizeChunk turns the chunk into a fossil. -func (manager *SnapshotManager) fossilizeChunk(chunkID string, filePath string, - exclusive bool, collection *FossilCollection) bool { +func (manager *SnapshotManager) fossilizeChunk(chunkID string, filePath string, exclusive bool) bool { if exclusive { - err := manager.storage.DeleteFile(0, filePath) - if err != nil { - LOG_ERROR("CHUNK_DELETE", "Failed to remove the chunk %s: %v", chunkID, err) - return false - } else { - LOG_TRACE("CHUNK_DELETE", "Deleted chunk file %s", chunkID) - } - + manager.chunkOperator.Delete(chunkID, filePath) } else { - fossilPath := filePath + ".fsl" - - err := manager.storage.MoveFile(0, filePath, fossilPath) - if err != nil { - if _, exist, _, _ := manager.storage.FindChunk(0, chunkID, true); exist { - err := manager.storage.DeleteFile(0, filePath) - if err == nil { - LOG_TRACE("CHUNK_DELETE", "Deleted chunk file %s as the fossil already exists", chunkID) - } - } else { - LOG_ERROR("CHUNK_DELETE", "Failed to fossilize the chunk %s: %v", chunkID, err) - return false - } - } else { - LOG_TRACE("CHUNK_FOSSILIZE", "Fossilized chunk %s", chunkID) - } - - collection.AddFossil(fossilPath) + manager.chunkOperator.Fossilize(chunkID, filePath) } return true @@ -1541,6 +1517,7 @@ func (manager *SnapshotManager) fossilizeChunk(chunkID string, filePath string, // resurrectChunk turns the fossil back into a chunk func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string) bool { + manager.chunkOperator.Resurrect(chunkID, fossilPath) chunkPath, exist, _, err := manager.storage.FindChunk(0, chunkID, false) if err != nil { LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", chunkID, err) @@ -1581,7 +1558,7 @@ func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string, revisionsToBeDeleted []int, tags []string, retentions []string, exhaustive bool, exclusive bool, ignoredIDs []string, - dryRun bool, deleteOnly bool, collectOnly bool) bool { + dryRun bool, deleteOnly bool, collectOnly bool, threads int) bool { LOG_DEBUG("DELETE_PARAMETERS", "id: %s, revisions: %v, tags: %v, retentions: %v, exhaustive: %t, exclusive: %t, "+ @@ -1593,6 +1570,8 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string, LOG_WARN("DELETE_OPTIONS", "Tags or retention policy will be ignored if at least one revision is specified") } + manager.chunkOperator = CreateChunkOperator(manager.storage, threads) + prefPath := GetDuplicacyPreferencePath() logDir := path.Join(prefPath, "logs") err := os.MkdirAll(logDir, 0700) @@ -1796,20 +1775,15 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string, continue } - manager.resurrectChunk(fossil, chunk) + manager.chunkOperator.Resurrect(chunk, fossil) fmt.Fprintf(logFile, "Resurrected fossil %s (collection %s)\n", chunk, collectionName) } else { if dryRun { LOG_INFO("FOSSIL_DELETE", "The chunk %s would be permanently removed", chunk) } else { - err = manager.storage.DeleteFile(0, fossil) - if err != nil { - LOG_WARN("FOSSIL_DELETE", "The chunk %s could not be removed: %v", chunk, err) - } else { - LOG_INFO("FOSSIL_DELETE", "The chunk %s has been permanently removed", chunk) - fmt.Fprintf(logFile, "Deleted fossil %s (collection %s)\n", chunk, collectionName) - } + manager.chunkOperator.Delete(chunk, fossil) + fmt.Fprintf(logFile, "Deleted fossil %s (collection %s)\n", chunk, collectionName) } } } @@ -1820,8 +1794,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string, LOG_INFO("TEMPORARY_DELETE", "The temporary file %s would be deleted", temporary) } else { // Fail silently, since temporary files are supposed to be renamed or deleted after upload is done - _ = manager.storage.DeleteFile(0, temporary) - LOG_INFO("TEMPORARY_DELETE", "The temporary file %s has been deleted", temporary) + manager.chunkOperator.Delete("", temporary) fmt.Fprintf(logFile, "Deleted temporary %s (collection %s)\n", temporary, collectionName) } } @@ -1949,12 +1922,17 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string, if exhaustive { success = manager.pruneSnapshotsExhaustive(referencedFossils, allSnapshots, collection, logFile, dryRun, exclusive) } else { - success = manager.pruneSnapshots(allSnapshots, collection, logFile, dryRun, exclusive) + success = manager.pruneSnapshotsNonExhaustive(allSnapshots, collection, logFile, dryRun, exclusive) } if !success { return false } + manager.chunkOperator.Stop() + for _, fossil := range manager.chunkOperator.fossils { + collection.AddFossil(fossil) + } + // Save the fossil collection if it is not empty. if !collection.IsEmpty() && !dryRun { collection.EndTime = time.Now().Unix() @@ -2028,7 +2006,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string, // pruneSnapshots in non-exhaustive mode, only chunks that exist in the // snapshots to be deleted but not other are identified as unreferenced chunks. -func (manager *SnapshotManager) pruneSnapshots(allSnapshots map[string][]*Snapshot, collection *FossilCollection, logFile io.Writer, dryRun, exclusive bool) bool { +func (manager *SnapshotManager) pruneSnapshotsNonExhaustive(allSnapshots map[string][]*Snapshot, collection *FossilCollection, logFile io.Writer, dryRun, exclusive bool) bool { targetChunks := make(map[string]bool) // Now build all chunks referened by snapshot not deleted @@ -2085,18 +2063,7 @@ func (manager *SnapshotManager) pruneSnapshots(allSnapshots map[string][]*Snapsh continue } - chunkPath, exist, _, err := manager.storage.FindChunk(0, chunk, false) - if err != nil { - LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", chunk, err) - return false - } - - if !exist { - LOG_WARN("CHUNK_MISSING", "The chunk %s does not exist", chunk) - continue - } - - manager.fossilizeChunk(chunk, chunkPath, exclusive, collection) + manager.fossilizeChunk(chunk, "", exclusive) if exclusive { fmt.Fprintf(logFile, "Deleted chunk %s (exclusive mode)\n", chunk) } else { @@ -2158,12 +2125,7 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s if exclusive { // In exclusive mode, we assume no other restore operation is running concurrently. - err := manager.storage.DeleteFile(0, chunkDir+file) - if err != nil { - LOG_ERROR("CHUNK_TEMPORARY", "Failed to remove the temporary file %s: %v", file, err) - return false - } - LOG_DEBUG("CHUNK_TEMPORARY", "Deleted temporary file %s", file) + manager.chunkOperator.Delete("", chunkDir+file) fmt.Fprintf(logFile, "Deleted temporary %s\n", file) } else { collection.AddTemporary(file) @@ -2182,7 +2144,7 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s chunk = strings.Replace(chunk, ".fsl", "", -1) if _, found := referencedChunks[chunk]; found { - manager.resurrectChunk(chunkDir+file, chunk) + manager.chunkOperator.Resurrect(chunk, chunkDir + file) } else { collection.AddFossil(chunkDir + file) LOG_DEBUG("FOSSIL_FIND", "Found unreferenced fossil %s", file) @@ -2206,7 +2168,7 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s continue } - manager.fossilizeChunk(chunk, chunkDir+file, exclusive, collection) + manager.fossilizeChunk(chunk, chunkDir+file, exclusive) if exclusive { fmt.Fprintf(logFile, "Deleted chunk %s (exclusive mode)\n", chunk) } else { @@ -2222,14 +2184,8 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s } // This is a redundant chunk file (for instance D3/495A8D and D3/49/5A8D ) - err := manager.storage.DeleteFile(0, chunkDir+file) - if err != nil { - LOG_WARN("CHUNK_DELETE", "Failed to remove the redundant chunk file %s: %v", file, err) - } else { - LOG_TRACE("CHUNK_DELETE", "Removed the redundant chunk file %s", file) - fmt.Fprintf(logFile, "Deleted redundant chunk %s\n", file) - } - + manager.chunkOperator.Delete(chunk, chunkDir+file) + fmt.Fprintf(logFile, "Deleted redundant chunk %s\n", file) } else { referencedChunks[chunk] = true LOG_DEBUG("CHUNK_KEEP", "Chunk %s is referenced", chunk) diff --git a/src/duplicacy_snapshotmanager_test.go b/src/duplicacy_snapshotmanager_test.go index d64965f..b6e9f79 100644 --- a/src/duplicacy_snapshotmanager_test.go +++ b/src/duplicacy_snapshotmanager_test.go @@ -253,11 +253,11 @@ func TestPruneSingleRepository(t *testing.T) { checkTestSnapshots(snapshotManager, 4, 0) t.Logf("Removing snapshot repository1 revisions 1 and 2 with --exclusive") - snapshotManager.PruneSnapshots("repository1", "repository1", []int{1, 2}, []string{}, []string{}, false, true, []string{}, false, false, false) + snapshotManager.PruneSnapshots("repository1", "repository1", []int{1, 2}, []string{}, []string{}, false, true, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 2, 0) t.Logf("Removing snapshot repository1 revision 3 without --exclusive") - snapshotManager.PruneSnapshots("repository1", "repository1", []int{3}, []string{}, []string{}, false, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("repository1", "repository1", []int{3}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 1, 2) t.Logf("Creating 1 snapshot") @@ -266,7 +266,7 @@ func TestPruneSingleRepository(t *testing.T) { checkTestSnapshots(snapshotManager, 2, 2) t.Logf("Prune without removing any snapshots -- fossils will be deleted") - snapshotManager.PruneSnapshots("repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 2, 0) } @@ -293,11 +293,11 @@ func TestPruneSingleHost(t *testing.T) { checkTestSnapshots(snapshotManager, 3, 0) t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 2, 2) t.Logf("Prune without removing any snapshots -- no fossils will be deleted") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 2, 2) t.Logf("Creating 1 snapshot") @@ -306,7 +306,7 @@ func TestPruneSingleHost(t *testing.T) { checkTestSnapshots(snapshotManager, 3, 2) t.Logf("Prune without removing any snapshots -- fossils will be deleted") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 3, 0) } @@ -334,11 +334,11 @@ func TestPruneMultipleHost(t *testing.T) { checkTestSnapshots(snapshotManager, 3, 0) t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 2, 2) t.Logf("Prune without removing any snapshots -- no fossils will be deleted") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 2, 2) t.Logf("Creating 1 snapshot") @@ -347,7 +347,7 @@ func TestPruneMultipleHost(t *testing.T) { checkTestSnapshots(snapshotManager, 3, 2) t.Logf("Prune without removing any snapshots -- no fossils will be deleted") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 3, 2) t.Logf("Creating 1 snapshot") @@ -356,7 +356,7 @@ func TestPruneMultipleHost(t *testing.T) { checkTestSnapshots(snapshotManager, 4, 2) t.Logf("Prune without removing any snapshots -- fossils will be deleted") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 4, 0) } @@ -381,7 +381,7 @@ func TestPruneAndResurrect(t *testing.T) { checkTestSnapshots(snapshotManager, 2, 0) t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 1, 2) t.Logf("Creating 1 snapshot") @@ -390,7 +390,7 @@ func TestPruneAndResurrect(t *testing.T) { checkTestSnapshots(snapshotManager, 2, 2) t.Logf("Prune without removing any snapshots -- one fossil will be resurrected") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 2, 0) } @@ -418,11 +418,11 @@ func TestPruneWithInactiveHost(t *testing.T) { checkTestSnapshots(snapshotManager, 3, 0) t.Logf("Removing snapshot vm1@host1 revision 1") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 2, 2) t.Logf("Prune without removing any snapshots -- no fossils will be deleted") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 2, 2) t.Logf("Creating 1 snapshot") @@ -431,7 +431,7 @@ func TestPruneWithInactiveHost(t *testing.T) { checkTestSnapshots(snapshotManager, 3, 2) t.Logf("Prune without removing any snapshots -- fossils will be deleted") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 3, 0) } @@ -459,15 +459,15 @@ func TestPruneWithRetentionPolicy(t *testing.T) { checkTestSnapshots(snapshotManager, 30, 0) t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 19, 0) t.Logf("Removing snapshot vm1@host1 -k 0:20 with --exclusive") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 19, 0) t.Logf("Removing snapshot vm1@host1 -k 3:14 -k 2:7 with --exclusive") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 12, 0) } @@ -499,7 +499,7 @@ func TestPruneWithRetentionPolicyAndTag(t *testing.T) { checkTestSnapshots(snapshotManager, 30, 0) t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive and --tag manual") - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{"manual"}, []string{"0:7"}, false, true, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{"manual"}, []string{"0:7"}, false, true, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 22, 0) } @@ -527,11 +527,11 @@ func TestPruneWithFossils(t *testing.T) { t.Logf("Prune without removing any snapshots but with --exhaustive") // The unreferenced fossil shouldn't be removed - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, true, false, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, true, false, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 2, 1) t.Logf("Prune without removing any snapshots but with --exclusive") // Now the unreferenced fossil should be removed - snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, true, []string{}, false, false, false) + snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, true, []string{}, false, false, false, 1) checkTestSnapshots(snapshotManager, 2, 0) }