1
0
mirror of https://github.com/gilbertchen/duplicacy synced 2025-12-12 06:13:23 +00:00

Rewrite the backup procedure to reduce memory usage

Main changes:

* Change the listing order of files/directories so that the local and remote
  snapshots can be compared on-the-fly.

* Introduce a new struct called EntryList that maintains a list of
  files/directories, which are kept in memory when the number is lower, and
  serialized into a file when there are too many.

* EntryList can also be turned into an on-disk incomplete snapshot quickly,
  to support fast-resume on next run.

* ChunkOperator can now download and upload chunks, thus replacing original
  ChunkDownloader and ChunkUploader.  The new ChunkDownloader is only used
  to prefetch chunks during the restore operation.
This commit is contained in:
Gilbert Chen
2021-10-24 23:34:49 -04:00
parent f83e4f3c44
commit d9f6545d63
20 changed files with 2762 additions and 1749 deletions

View File

@@ -0,0 +1,118 @@
// Copyright (c) Acrosync LLC. All rights reserved.
// Free for personal use and commercial trial
// Commercial use requires per-user licenses available from https://duplicacy.com
package duplicacy
import (
"os"
"path"
"runtime/debug"
"testing"
"time"
crypto_rand "crypto/rand"
"math/rand"
)
func TestChunkOperator(t *testing.T) {
rand.Seed(time.Now().UnixNano())
setTestingT(t)
SetLoggingLevel(DEBUG)
defer func() {
if r := recover(); r != nil {
switch e := r.(type) {
case Exception:
t.Errorf("%s %s", e.LogID, e.Message)
debug.PrintStack()
default:
t.Errorf("%v", e)
debug.PrintStack()
}
}
}()
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
t.Logf("storage: %s", testStorageName)
storage, err := loadStorage(testDir, 1)
if err != nil {
t.Errorf("Failed to create storage: %v", err)
return
}
storage.EnableTestMode()
storage.SetRateLimits(testRateLimit, testRateLimit)
for _, dir := range []string{"chunks", "snapshots"} {
err = storage.CreateDirectory(0, dir)
if err != nil {
t.Errorf("Failed to create directory %s: %v", dir, err)
return
}
}
numberOfChunks := 100
maxChunkSize := 64 * 1024
if testQuickMode {
numberOfChunks = 10
}
var chunks []*Chunk
config := CreateConfig()
config.MinimumChunkSize = 100
config.chunkPool = make(chan *Chunk, numberOfChunks*2)
totalFileSize := 0
for i := 0; i < numberOfChunks; i++ {
content := make([]byte, rand.Int()%maxChunkSize+1)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
chunk := CreateChunk(config, true)
chunk.Reset(true)
chunk.Write(content)
chunks = append(chunks, chunk)
t.Logf("Chunk: %s, size: %d", chunk.GetID(), chunk.GetLength())
totalFileSize += chunk.GetLength()
}
chunkOperator := CreateChunkOperator(config, storage, nil, false, testThreads, false)
chunkOperator.UploadCompletionFunc = func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
t.Logf("Chunk %s size %d (%d/%d) uploaded", chunk.GetID(), chunkSize, chunkIndex, len(chunks))
}
for i, chunk := range chunks {
chunkOperator.Upload(chunk, i, false)
}
chunkOperator.WaitForCompletion()
for i, chunk := range chunks {
downloaded := chunkOperator.Download(chunk.GetHash(), i, false)
if downloaded.GetID() != chunk.GetID() {
t.Errorf("Uploaded: %s, downloaded: %s", chunk.GetID(), downloaded.GetID())
}
}
chunkOperator.Stop()
for _, file := range listChunks(storage) {
err = storage.DeleteFile(0, "chunks/"+file)
if err != nil {
t.Errorf("Failed to delete the file %s: %v", file, err)
return
}
}
}