1
0
mirror of https://github.com/gilbertchen/duplicacy synced 2025-12-06 00:03:38 +00:00

Retry downloads with corrupted content up to three times.

Wasabi's GetObject occasionally (approximately 2% of the time in my testing) returns objects whose contents disagree with what has been stored in Wasabi. These cause errors when chunks are downloaded (during restore, for example). Previously, these errors would abort the restore, requiring that it be started over from the beginning. This made it effectively impossible to complete any normally-sized restore where the cumulative chance of encountering such an error approaches unity.

With this change Duplicacy will retry up to three times if it can't decrypt the downloaded chunk, or if the downloaded chunk's ID doesn't agree with a chunk ID computed from the downloaded chunk's content.
This commit is contained in:
Danny MacMillan
2017-09-26 15:04:32 -06:00
parent 80742ce2ba
commit f9603dad3c

View File

@@ -324,22 +324,39 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
LOG_DEBUG("CHUNK_FOSSIL", "Chunk %s has been marked as a fossil", chunkID)
}
err = downloader.storage.DownloadFile(threadIndex, chunkPath, chunk)
if err != nil {
LOG_ERROR("UPLOAD_FATAL", "Failed to download the chunk %s: %v", chunkID, err)
return false
}
const MaxDownloadAttempts = 3
for downloadAttempt := 0;; downloadAttempt++ {
err = downloader.storage.DownloadFile(threadIndex, chunkPath, chunk)
if err != nil {
LOG_ERROR("UPLOAD_FATAL", "Failed to download the chunk %s: %v", chunkID, err)
return false
}
err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash)
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to decrypt the chunk %s: %v", chunkID, err)
return false
}
err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash)
if err != nil {
if downloadAttempt < MaxDownloadAttempts {
LOG_WARN("RETRY_DOWNLOAD", "Failed to decrypt the chunk %s: %v", chunkID, err)
chunk.Reset(false)
continue
} else {
LOG_ERROR("UPLOAD_CHUNK", "Failed to decrypt the chunk %s: %v", chunkID, err)
return false
}
}
actualChunkID := chunk.GetID()
if actualChunkID != chunkID {
LOG_FATAL("UPLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
return false
actualChunkID := chunk.GetID()
if actualChunkID != chunkID {
if downloadAttempt < MaxDownloadAttempts {
LOG_WARN("RETRY_DOWNLOAD", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
chunk.Reset(false)
continue
} else {
LOG_FATAL("UPLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
return false
}
}
break
}
if len(cachedPath) > 0 {