mirror of
https://github.com/gilbertchen/duplicacy
synced 2025-12-24 04:04:18 +00:00
Merge branch 'master' into master
This commit is contained in:
34
Gopkg.lock
generated
34
Gopkg.lock
generated
@@ -7,11 +7,17 @@
|
||||
revision = "2d3a6656c17a60b0815b7e06ab0be04eacb6e613"
|
||||
version = "v0.16.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = ["version"]
|
||||
revision = "b7fadebe0e7f5c5720986080a01495bd8d27be37"
|
||||
version = "v14.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = ["autorest","autorest/adal","autorest/azure","autorest/date"]
|
||||
revision = "c67b24a8e30d876542a85022ebbdecf0e5a935e8"
|
||||
version = "v9.4.1"
|
||||
revision = "0ae36a9e544696de46fdadb7b0d5fb38af48c063"
|
||||
version = "v10.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -38,10 +44,10 @@
|
||||
version = "v3.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gilbertchen/azure-sdk-for-go"
|
||||
packages = ["storage"]
|
||||
revision = "2d49bb8f2cee530cc16f1f1a9f0aae763dee257d"
|
||||
version = "v10.2.1-beta"
|
||||
revision = "bbf89bd4d716c184f158d1e1428c2dbef4a18307"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -120,12 +126,24 @@
|
||||
packages = ["."]
|
||||
revision = "2788f0dbd16903de03cb8186e5c7d97b69ad387b"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/marstr/guid"
|
||||
packages = ["."]
|
||||
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/minio/blake2b-simd"
|
||||
packages = ["."]
|
||||
revision = "3f5f724cb5b182a5c278d6d3d55b40e7f8c2efb4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/ncw/swift"
|
||||
packages = ["."]
|
||||
revision = "ae9f0ea1605b9aa6434ed5c731ca35d83ba67c55"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
@@ -139,10 +157,10 @@
|
||||
version = "1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/uuid"
|
||||
name = "github.com/satori/go.uuid"
|
||||
packages = ["."]
|
||||
revision = "879c5887cd475cd7864858769793b2ceb0d44feb"
|
||||
version = "v1.1.0"
|
||||
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@@ -207,6 +225,6 @@
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "a84af96e0c7019aa041120828de9995efb9cca3fde6e56a8ad5b80962f23806d"
|
||||
inputs-digest = "eff5ae2d9507f0d62cd2e5bdedebb5c59d64f70f476b087c01c35d4a5e1be72d"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
@@ -39,7 +39,7 @@
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gilbertchen/azure-sdk-for-go"
|
||||
version = "10.2.1-beta"
|
||||
branch = "master"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
|
||||
@@ -35,9 +35,9 @@ With Duplicacy, you can back up files to local or networked drives, SFTP servers
|
||||
|
||||
| Type | Storage (monthly) | Upload | Download | API Charge |
|
||||
|:------------:|:-------------:|:------------------:|:--------------:|:-----------:|
|
||||
| Amazon S3 | $0.023/GB | free | $0.09/GB | [yes](https://aws.amazon.com/s3/pricing/) |
|
||||
| Wasabi | $3.99 first 1TB <br> $0.0039/GB additional | free | $.04/GB | no |
|
||||
| DigitalOcean Spaces| $5 first 250GB <br> $0.02/GB additional | free | first 1TB free <br> $0.01/GB additional| no |
|
||||
| Amazon S3 | $0.023/GB | free | $0.090/GB | [yes](https://aws.amazon.com/s3/pricing/) |
|
||||
| Wasabi | $3.99 first 1TB <br> $0.0039/GB additional | free | $0.04/GB | no |
|
||||
| DigitalOcean Spaces| $5 first 250GB <br> $0.020/GB additional | free | first 1TB free <br> $0.01/GB additional| no |
|
||||
| Backblaze B2 | 10GB free <br> $0.005/GB | free | 1GB free/day <br> $0.02/GB | [yes](https://www.backblaze.com/b2/b2-transactions-price.html) |
|
||||
| Google Cloud Storage| $0.026/GB | free |$ 0.12/GB | [yes](https://cloud.google.com/storage/pricing) |
|
||||
| Google Drive | 15GB free <br> $1.99/100GB <br> $9.99/TB | free | free | no |
|
||||
|
||||
@@ -1196,6 +1196,11 @@ func infoStorage(context *cli.Context) {
|
||||
DoNotSavePassword: true,
|
||||
}
|
||||
|
||||
storageName := context.String("storage-name")
|
||||
if storageName != "" {
|
||||
preference.Name = storageName
|
||||
}
|
||||
|
||||
if resetPasswords {
|
||||
// We don't want password entered for the info command to overwrite the saved password for the default storage,
|
||||
// so we simply assign an empty name.
|
||||
@@ -1222,7 +1227,7 @@ func infoStorage(context *cli.Context) {
|
||||
|
||||
dirs, _, err := storage.ListFiles(0, "snapshots/")
|
||||
if err != nil {
|
||||
duplicacy.LOG_ERROR("STORAGE_LIST", "Failed to list repository ids: %v", err)
|
||||
duplicacy.LOG_WARN("STORAGE_LIST", "Failed to list repository ids: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1266,7 +1271,7 @@ func main() {
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "iterations",
|
||||
Usage: "the number of iterations used in storage key deriviation (default is 16384)",
|
||||
Usage: "the number of iterations used in storage key derivation (default is 16384)",
|
||||
Argument: "<i>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
@@ -1577,7 +1582,7 @@ func main() {
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
Name: "t",
|
||||
Usage: "delete snapshots with the specifed tags",
|
||||
Usage: "delete snapshots with the specified tags",
|
||||
Argument: "<tag>",
|
||||
},
|
||||
cli.StringSliceFlag{
|
||||
@@ -1591,7 +1596,7 @@ func main() {
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "exclusive",
|
||||
Usage: "assume exclusive acess to the storage (disable two-step fossil collection)",
|
||||
Usage: "assume exclusive access to the storage (disable two-step fossil collection)",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "dry-run, d",
|
||||
@@ -1631,7 +1636,7 @@ func main() {
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "iterations",
|
||||
Usage: "the number of iterations used in storage key deriviation (default is 16384)",
|
||||
Usage: "the number of iterations used in storage key derivation (default is 16384)",
|
||||
Argument: "<i>",
|
||||
},
|
||||
},
|
||||
@@ -1665,7 +1670,7 @@ func main() {
|
||||
},
|
||||
cli.IntFlag{
|
||||
Name: "iterations",
|
||||
Usage: "the number of iterations used in storage key deriviation (default is 16384)",
|
||||
Usage: "the number of iterations used in storage key derivation (default is 16384)",
|
||||
Argument: "<i>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
@@ -1787,6 +1792,11 @@ func main() {
|
||||
Usage: "retrieve saved passwords from the specified repository",
|
||||
Argument: "<repository directory>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "storage-name",
|
||||
Usage: "the storage name to be assigned to the storage url",
|
||||
Argument: "<name>",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "reset-passwords",
|
||||
Usage: "take passwords from input rather than keychain/keyring",
|
||||
@@ -1835,7 +1845,7 @@ func main() {
|
||||
app.Name = "duplicacy"
|
||||
app.HelpName = "duplicacy"
|
||||
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
|
||||
app.Version = "2.0.10"
|
||||
app.Version = "2.1.0"
|
||||
|
||||
// If the program is interrupted, call the RunAtError function.
|
||||
c := make(chan os.Signal, 1)
|
||||
|
||||
@@ -153,7 +153,7 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
|
||||
return response.Body, response.Header, response.ContentLength, nil
|
||||
}
|
||||
|
||||
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
|
||||
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s %s' returned status code %d", method, url, response.StatusCode)
|
||||
|
||||
io.Copy(ioutil.Discard, response.Body)
|
||||
response.Body.Close()
|
||||
@@ -170,7 +170,6 @@ func (client *B2Client) call(url string, method string, requestHeaders map[strin
|
||||
continue
|
||||
} else if response.StatusCode == 404 {
|
||||
if http.MethodHead == method {
|
||||
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
|
||||
return nil, nil, 0, nil
|
||||
}
|
||||
} else if response.StatusCode == 416 {
|
||||
@@ -580,7 +579,7 @@ func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit in
|
||||
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
|
||||
|
||||
if response.StatusCode == 401 {
|
||||
LOG_INFO("BACKBLAZE_UPLOAD", "Re-authorizatoin required")
|
||||
LOG_INFO("BACKBLAZE_UPLOAD", "Re-authorization required")
|
||||
client.UploadURL = ""
|
||||
client.UploadToken = ""
|
||||
continue
|
||||
|
||||
@@ -210,6 +210,7 @@ func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist b
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
|
||||
filePath = strings.Replace(filePath, " ", "%20", -1)
|
||||
readCloser, _, err := storage.clients[threadIndex].DownloadFile(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -223,6 +224,7 @@ func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *
|
||||
|
||||
// UploadFile writes 'content' to the file at 'filePath'.
|
||||
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||
filePath = strings.Replace(filePath, " ", "%20", -1)
|
||||
return storage.clients[threadIndex].UploadFile(filePath, content, storage.UploadRateLimit/len(storage.clients))
|
||||
}
|
||||
|
||||
|
||||
@@ -284,7 +284,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
||||
// we simply treat all files as if they were new, and break them into chunks.
|
||||
// Otherwise, we need to find those that are new or recently modified
|
||||
|
||||
if remoteSnapshot.Revision == 0 && incompleteSnapshot == nil {
|
||||
if (remoteSnapshot.Revision == 0 || !quickMode) && incompleteSnapshot == nil {
|
||||
modifiedEntries = localSnapshot.Files
|
||||
for _, entry := range modifiedEntries {
|
||||
totalModifiedFileSize += entry.Size
|
||||
@@ -750,7 +750,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
}
|
||||
|
||||
remoteSnapshot := manager.SnapshotManager.DownloadSnapshot(manager.snapshotID, revision)
|
||||
manager.SnapshotManager.DownloadSnapshotContents(remoteSnapshot, patterns)
|
||||
manager.SnapshotManager.DownloadSnapshotContents(remoteSnapshot, patterns, true)
|
||||
|
||||
localSnapshot, _, _, err := CreateSnapshotFromDirectory(manager.snapshotID, top)
|
||||
if err != nil {
|
||||
@@ -918,9 +918,8 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
||||
totalFileSize, downloadedFileSize, startDownloadingTime) {
|
||||
downloadedFileSize += file.Size
|
||||
downloadedFiles = append(downloadedFiles, file)
|
||||
file.RestoreMetadata(fullPath, nil, setOwner)
|
||||
}
|
||||
|
||||
file.RestoreMetadata(fullPath, nil, setOwner)
|
||||
}
|
||||
|
||||
if deleteMode && len(patterns) == 0 {
|
||||
|
||||
@@ -298,6 +298,9 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
||||
// will be set up before the encryption
|
||||
chunk.Reset(false)
|
||||
|
||||
const MaxDownloadAttempts = 3
|
||||
for downloadAttempt := 0; ; downloadAttempt++ {
|
||||
|
||||
// Find the chunk by ID first.
|
||||
chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false)
|
||||
if err != nil {
|
||||
@@ -307,13 +310,19 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
||||
|
||||
if !exist {
|
||||
// No chunk is found. Have to find it in the fossil pool again.
|
||||
chunkPath, exist, _, err = downloader.storage.FindChunk(threadIndex, chunkID, true)
|
||||
fossilPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, true)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
||||
return false
|
||||
}
|
||||
|
||||
if !exist {
|
||||
// Retry for the Hubic backend as it may return 404 even when the chunk exists
|
||||
if _, ok := downloader.storage.(*HubicStorage); ok && downloadAttempt < MaxDownloadAttempts {
|
||||
LOG_WARN("DOWNLOAD_RETRY", "Failed to find the chunk %s; retrying", chunkID)
|
||||
continue
|
||||
}
|
||||
|
||||
// A chunk is not found. This is a serious error and hopefully it will never happen.
|
||||
if err != nil {
|
||||
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
|
||||
@@ -322,14 +331,24 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
||||
}
|
||||
return false
|
||||
}
|
||||
LOG_DEBUG("CHUNK_FOSSIL", "Chunk %s has been marked as a fossil", chunkID)
|
||||
|
||||
// We can't download the fossil directly. We have to turn it back into a regular chunk and try
|
||||
// downloading again.
|
||||
err = downloader.storage.MoveFile(threadIndex, fossilPath, chunkPath)
|
||||
if err != nil {
|
||||
LOG_FATAL("DOWNLOAD_CHUNK", "Failed to resurrect chunk %s: %v", chunkID, err)
|
||||
return false
|
||||
}
|
||||
|
||||
LOG_WARN("DOWNLOAD_RESURRECT", "Fossil %s has been resurrected", chunkID)
|
||||
continue
|
||||
}
|
||||
|
||||
const MaxDownloadAttempts = 3
|
||||
for downloadAttempt := 0; ; downloadAttempt++ {
|
||||
err = downloader.storage.DownloadFile(threadIndex, chunkPath, chunk)
|
||||
if err != nil {
|
||||
if err == io.ErrUnexpectedEOF && downloadAttempt < MaxDownloadAttempts {
|
||||
_, isHubic := downloader.storage.(*HubicStorage)
|
||||
// Retry on EOF or if it is a Hubic backend as it may return 404 even when the chunk exists
|
||||
if (err == io.ErrUnexpectedEOF || isHubic) && downloadAttempt < MaxDownloadAttempts {
|
||||
LOG_WARN("DOWNLOAD_RETRY", "Failed to download the chunk %s: %v; retrying", chunkID, err)
|
||||
chunk.Reset(false)
|
||||
continue
|
||||
@@ -368,7 +387,7 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
||||
|
||||
if len(cachedPath) > 0 {
|
||||
// Save a copy to the local snapshot cache
|
||||
err = downloader.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes())
|
||||
err := downloader.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes())
|
||||
if err != nil {
|
||||
LOG_WARN("DOWNLOAD_CACHE", "Failed to add the chunk %s to the snapshot cache: %v", chunkID, err)
|
||||
}
|
||||
|
||||
@@ -269,7 +269,7 @@ func (manager *SnapshotManager) DownloadSequence(sequence []string) (content []b
|
||||
return content
|
||||
}
|
||||
|
||||
func (manager *SnapshotManager) DownloadSnapshotFileSequence(snapshot *Snapshot, patterns []string) bool {
|
||||
func (manager *SnapshotManager) DownloadSnapshotFileSequence(snapshot *Snapshot, patterns []string, attributesNeeded bool) bool {
|
||||
|
||||
manager.CreateChunkDownloader()
|
||||
|
||||
@@ -304,7 +304,8 @@ func (manager *SnapshotManager) DownloadSnapshotFileSequence(snapshot *Snapshot,
|
||||
return false
|
||||
}
|
||||
|
||||
if len(patterns) != 0 && !MatchPath(entry.Path, patterns) {
|
||||
// If we don't need the attributes or the file isn't included we clear the attributes to save memory
|
||||
if !attributesNeeded || (len(patterns) != 0 && !MatchPath(entry.Path, patterns)) {
|
||||
entry.Attributes = nil
|
||||
}
|
||||
|
||||
@@ -347,9 +348,9 @@ func (manager *SnapshotManager) DownloadSnapshotSequence(snapshot *Snapshot, seq
|
||||
// DownloadSnapshotContents loads all chunk sequences in a snapshot. A snapshot, when just created, only contains
|
||||
// some metadata and theree sequence representing files, chunk hashes, and chunk lengths. This function must be called
|
||||
// for the actual content of the snapshot to be usable.
|
||||
func (manager *SnapshotManager) DownloadSnapshotContents(snapshot *Snapshot, patterns []string) bool {
|
||||
func (manager *SnapshotManager) DownloadSnapshotContents(snapshot *Snapshot, patterns []string, attributesNeeded bool) bool {
|
||||
|
||||
manager.DownloadSnapshotFileSequence(snapshot, patterns)
|
||||
manager.DownloadSnapshotFileSequence(snapshot, patterns, attributesNeeded)
|
||||
manager.DownloadSnapshotSequence(snapshot, "chunks")
|
||||
manager.DownloadSnapshotSequence(snapshot, "lengths")
|
||||
|
||||
@@ -553,7 +554,7 @@ func (manager *SnapshotManager) downloadLatestSnapshot(snapshotID string) (remot
|
||||
}
|
||||
|
||||
if remote != nil {
|
||||
manager.DownloadSnapshotContents(remote, nil)
|
||||
manager.DownloadSnapshotContents(remote, nil, false)
|
||||
}
|
||||
|
||||
return remote
|
||||
@@ -679,7 +680,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
|
||||
}
|
||||
|
||||
if showFiles {
|
||||
manager.DownloadSnapshotFileSequence(snapshot, nil)
|
||||
manager.DownloadSnapshotFileSequence(snapshot, nil, false)
|
||||
}
|
||||
|
||||
if showFiles {
|
||||
@@ -799,7 +800,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
||||
}
|
||||
|
||||
if checkFiles {
|
||||
manager.DownloadSnapshotContents(snapshot, nil)
|
||||
manager.DownloadSnapshotContents(snapshot, nil, false)
|
||||
manager.VerifySnapshot(snapshot)
|
||||
continue
|
||||
}
|
||||
@@ -1208,7 +1209,8 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
|
||||
patterns = []string{path}
|
||||
}
|
||||
|
||||
if !manager.DownloadSnapshotContents(snapshot, patterns) {
|
||||
// If no path is specified, we're printing the snapshot so we need all attributes
|
||||
if !manager.DownloadSnapshotContents(snapshot, patterns, path == "") {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -1268,9 +1270,9 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
||||
|
||||
if len(filePath) > 0 {
|
||||
|
||||
manager.DownloadSnapshotContents(leftSnapshot, nil)
|
||||
manager.DownloadSnapshotContents(leftSnapshot, nil, false)
|
||||
if rightSnapshot != nil && rightSnapshot.Revision != 0 {
|
||||
manager.DownloadSnapshotContents(rightSnapshot, nil)
|
||||
manager.DownloadSnapshotContents(rightSnapshot, nil, false)
|
||||
}
|
||||
|
||||
var leftFile []byte
|
||||
@@ -1346,9 +1348,9 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
||||
}
|
||||
|
||||
// We only need to decode the 'files' sequence, not 'chunkhashes' or 'chunklengthes'
|
||||
manager.DownloadSnapshotFileSequence(leftSnapshot, nil)
|
||||
manager.DownloadSnapshotFileSequence(leftSnapshot, nil, false)
|
||||
if rightSnapshot != nil && rightSnapshot.Revision != 0 {
|
||||
manager.DownloadSnapshotFileSequence(rightSnapshot, nil)
|
||||
manager.DownloadSnapshotFileSequence(rightSnapshot, nil, false)
|
||||
}
|
||||
|
||||
maxSize := int64(9)
|
||||
@@ -1452,7 +1454,7 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
|
||||
sort.Ints(revisions)
|
||||
for _, revision := range revisions {
|
||||
snapshot := manager.DownloadSnapshot(snapshotID, revision)
|
||||
manager.DownloadSnapshotFileSequence(snapshot, nil)
|
||||
manager.DownloadSnapshotFileSequence(snapshot, nil, false)
|
||||
file := manager.FindFile(snapshot, filePath, true)
|
||||
|
||||
if file != nil {
|
||||
@@ -1863,7 +1865,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
||||
}
|
||||
|
||||
if len(tagMap) > 0 {
|
||||
if _, found := tagMap[snapshot.Tag]; found {
|
||||
if _, found := tagMap[snapshot.Tag]; !found {
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -2292,6 +2294,10 @@ func (manager *SnapshotManager) DownloadFile(path string, derivationKey string)
|
||||
return nil
|
||||
}
|
||||
|
||||
if len(derivationKey) > 64 {
|
||||
derivationKey = derivationKey[len(derivationKey) - 64:]
|
||||
}
|
||||
|
||||
err = manager.fileChunk.Decrypt(manager.config.FileKey, derivationKey)
|
||||
if err != nil {
|
||||
LOG_ERROR("DOWNLOAD_DECRYPT", "Failed to decrypt the file %s: %v", path, err)
|
||||
@@ -2322,6 +2328,10 @@ func (manager *SnapshotManager) UploadFile(path string, derivationKey string, co
|
||||
}
|
||||
}
|
||||
|
||||
if len(derivationKey) > 64 {
|
||||
derivationKey = derivationKey[len(derivationKey) - 64:]
|
||||
}
|
||||
|
||||
err := manager.fileChunk.Encrypt(manager.config.FileKey, derivationKey)
|
||||
if err != nil {
|
||||
LOG_ERROR("UPLOAD_File", "Failed to encrypt the file %s: %v", path, err)
|
||||
|
||||
@@ -107,6 +107,9 @@ func createTestSnapshotManager(testDir string) *SnapshotManager {
|
||||
snapshotCache.CreateDirectory(0, "snapshots")
|
||||
|
||||
snapshotManager.snapshotCache = snapshotCache
|
||||
|
||||
SetDuplicacyPreferencePath(testDir + "/.duplicacy")
|
||||
|
||||
return snapshotManager
|
||||
}
|
||||
|
||||
@@ -140,7 +143,7 @@ func uploadRandomChunk(manager *SnapshotManager, chunkSize int) string {
|
||||
return uploadTestChunk(manager, content)
|
||||
}
|
||||
|
||||
func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision int, startTime int64, endTime int64, chunkHashes []string) {
|
||||
func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision int, startTime int64, endTime int64, chunkHashes []string, tag string) {
|
||||
|
||||
snapshot := &Snapshot{
|
||||
ID: snapshotID,
|
||||
@@ -148,6 +151,7 @@ func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision in
|
||||
StartTime: startTime,
|
||||
EndTime: endTime,
|
||||
ChunkHashes: chunkHashes,
|
||||
Tag: tag,
|
||||
}
|
||||
|
||||
var chunkHashesInHex []string
|
||||
@@ -239,12 +243,12 @@ func TestSingleRepositoryPrune(t *testing.T) {
|
||||
now := time.Now().Unix()
|
||||
day := int64(24 * 3600)
|
||||
t.Logf("Creating 1 snapshot")
|
||||
createTestSnapshot(snapshotManager, "repository1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||
createTestSnapshot(snapshotManager, "repository1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
|
||||
checkTestSnapshots(snapshotManager, 1, 2)
|
||||
|
||||
t.Logf("Creating 2 snapshots")
|
||||
createTestSnapshot(snapshotManager, "repository1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||
createTestSnapshot(snapshotManager, "repository1", 3, now-1*day-3600, now-1*day-60, []string{chunkHash3, chunkHash4})
|
||||
createTestSnapshot(snapshotManager, "repository1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
|
||||
createTestSnapshot(snapshotManager, "repository1", 3, now-1*day-3600, now-1*day-60, []string{chunkHash3, chunkHash4}, "tag")
|
||||
checkTestSnapshots(snapshotManager, 3, 0)
|
||||
|
||||
t.Logf("Removing snapshot repository1 revision 1 with --exclusive")
|
||||
@@ -257,7 +261,7 @@ func TestSingleRepositoryPrune(t *testing.T) {
|
||||
|
||||
t.Logf("Creating 1 snapshot")
|
||||
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||
createTestSnapshot(snapshotManager, "repository1", 4, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
|
||||
createTestSnapshot(snapshotManager, "repository1", 4, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5}, "tag")
|
||||
checkTestSnapshots(snapshotManager, 2, 2)
|
||||
|
||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||
@@ -282,9 +286,9 @@ func TestSingleHostPrune(t *testing.T) {
|
||||
now := time.Now().Unix()
|
||||
day := int64(24 * 3600)
|
||||
t.Logf("Creating 3 snapshots")
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||
createTestSnapshot(snapshotManager, "vm2@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4})
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
|
||||
createTestSnapshot(snapshotManager, "vm2@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4}, "tag")
|
||||
checkTestSnapshots(snapshotManager, 3, 0)
|
||||
|
||||
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||
@@ -297,7 +301,7 @@ func TestSingleHostPrune(t *testing.T) {
|
||||
|
||||
t.Logf("Creating 1 snapshot")
|
||||
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||
createTestSnapshot(snapshotManager, "vm2@host1", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
|
||||
createTestSnapshot(snapshotManager, "vm2@host1", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5}, "tag")
|
||||
checkTestSnapshots(snapshotManager, 3, 2)
|
||||
|
||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||
@@ -323,9 +327,9 @@ func TestMultipleHostPrune(t *testing.T) {
|
||||
now := time.Now().Unix()
|
||||
day := int64(24 * 3600)
|
||||
t.Logf("Creating 3 snapshot")
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4})
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
|
||||
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4}, "tag")
|
||||
checkTestSnapshots(snapshotManager, 3, 0)
|
||||
|
||||
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||
@@ -338,7 +342,7 @@ func TestMultipleHostPrune(t *testing.T) {
|
||||
|
||||
t.Logf("Creating 1 snapshot")
|
||||
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||
createTestSnapshot(snapshotManager, "vm2@host2", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
|
||||
createTestSnapshot(snapshotManager, "vm2@host2", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5}, "tag")
|
||||
checkTestSnapshots(snapshotManager, 3, 2)
|
||||
|
||||
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
||||
@@ -347,7 +351,7 @@ func TestMultipleHostPrune(t *testing.T) {
|
||||
|
||||
t.Logf("Creating 1 snapshot")
|
||||
chunkHash6 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash5, chunkHash6})
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash5, chunkHash6}, "tag")
|
||||
checkTestSnapshots(snapshotManager, 4, 2)
|
||||
|
||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||
@@ -371,8 +375,8 @@ func TestPruneAndResurrect(t *testing.T) {
|
||||
now := time.Now().Unix()
|
||||
day := int64(24 * 3600)
|
||||
t.Logf("Creating 2 snapshots")
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
|
||||
checkTestSnapshots(snapshotManager, 2, 0)
|
||||
|
||||
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||
@@ -381,7 +385,7 @@ func TestPruneAndResurrect(t *testing.T) {
|
||||
|
||||
t.Logf("Creating 1 snapshot")
|
||||
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 4, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash1})
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 4, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash1}, "tag")
|
||||
checkTestSnapshots(snapshotManager, 2, 2)
|
||||
|
||||
t.Logf("Prune without removing any snapshots -- one fossil will be resurrected")
|
||||
@@ -406,10 +410,10 @@ func TestInactiveHostPrune(t *testing.T) {
|
||||
now := time.Now().Unix()
|
||||
day := int64(24 * 3600)
|
||||
t.Logf("Creating 3 snapshot")
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2}, "tag")
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3}, "tag")
|
||||
// Host2 is inactive
|
||||
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-7*day-3600, now-7*day-60, []string{chunkHash3, chunkHash4})
|
||||
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-7*day-3600, now-7*day-60, []string{chunkHash3, chunkHash4}, "tag")
|
||||
checkTestSnapshots(snapshotManager, 3, 0)
|
||||
|
||||
t.Logf("Removing snapshot vm1@host1 revision 1")
|
||||
@@ -422,7 +426,7 @@ func TestInactiveHostPrune(t *testing.T) {
|
||||
|
||||
t.Logf("Creating 1 snapshot")
|
||||
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5}, "tag")
|
||||
checkTestSnapshots(snapshotManager, 3, 2)
|
||||
|
||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||
@@ -448,7 +452,7 @@ func TestRetentionPolicy(t *testing.T) {
|
||||
day := int64(24 * 3600)
|
||||
t.Logf("Creating 30 snapshots")
|
||||
for i := 0; i < 30; i++ {
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]})
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]}, "tag")
|
||||
}
|
||||
|
||||
checkTestSnapshots(snapshotManager, 30, 0)
|
||||
@@ -465,3 +469,35 @@ func TestRetentionPolicy(t *testing.T) {
|
||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false)
|
||||
checkTestSnapshots(snapshotManager, 12, 0)
|
||||
}
|
||||
|
||||
func TestRetentionPolicyAndTag(t *testing.T) {
|
||||
|
||||
setTestingT(t)
|
||||
|
||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||
|
||||
snapshotManager := createTestSnapshotManager(testDir)
|
||||
|
||||
chunkSize := 1024
|
||||
var chunkHashes []string
|
||||
for i := 0; i < 30; i++ {
|
||||
chunkHashes = append(chunkHashes, uploadRandomChunk(snapshotManager, chunkSize))
|
||||
}
|
||||
|
||||
now := time.Now().Unix()
|
||||
day := int64(24 * 3600)
|
||||
t.Logf("Creating 30 snapshots")
|
||||
for i := 0; i < 30; i++ {
|
||||
tag := "auto"
|
||||
if i % 3 == 0 {
|
||||
tag = "manual"
|
||||
}
|
||||
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]}, tag)
|
||||
}
|
||||
|
||||
checkTestSnapshots(snapshotManager, 30, 0)
|
||||
|
||||
t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive and --tag manual")
|
||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{"manual"}, []string{"0:7"}, false, true, []string{}, false, false, false)
|
||||
checkTestSnapshots(snapshotManager, 22, 0)
|
||||
}
|
||||
|
||||
@@ -596,6 +596,16 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
||||
}
|
||||
SavePassword(preference, "hubic_token", tokenFile)
|
||||
return hubicStorage
|
||||
} else if matched[1] == "swift" {
|
||||
prompt := fmt.Sprintf("Enter the OpenStack Swift key:")
|
||||
key := GetPassword(preference, "swift_key", prompt, true, resetPassword)
|
||||
swiftStorage, err := CreateSwiftStorage(storageURL[8:], key, threads)
|
||||
if err != nil {
|
||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the OpenStack Swift storage at %s: %v", storageURL, err)
|
||||
return nil
|
||||
}
|
||||
SavePassword(preference, "swift_key", key)
|
||||
return swiftStorage
|
||||
} else {
|
||||
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
|
||||
return nil
|
||||
|
||||
@@ -142,6 +142,12 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
||||
storage, err := CreateHubicStorage(config["token_file"], config["storage_path"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else if testStorageName == "memset" {
|
||||
storage, err := CreateSwiftStorage(config["storage_url"], config["key"], threads)
|
||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||
return storage, err
|
||||
} else {
|
||||
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
|
||||
|
||||
251
src/duplicacy_swiftstorage.go
Normal file
251
src/duplicacy_swiftstorage.go
Normal file
@@ -0,0 +1,251 @@
|
||||
// Copyright (c) Acrosync LLC. All rights reserved.
|
||||
// Free for personal use and commercial trial
|
||||
// Commercial use requires per-user licenses available from https://duplicacy.com
|
||||
|
||||
package duplicacy
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ncw/swift"
|
||||
)
|
||||
|
||||
type SwiftStorage struct {
|
||||
StorageBase
|
||||
|
||||
connection *swift.Connection
|
||||
container string
|
||||
storageDir string
|
||||
threads int
|
||||
}
|
||||
|
||||
// CreateSwiftStorage creates an OpenStack Swift storage object. storageURL is in the form of
|
||||
// `user@authURL/container/path?arg1=value1&arg2=value2``
|
||||
func CreateSwiftStorage(storageURL string, key string, threads int) (storage *SwiftStorage, err error) {
|
||||
|
||||
// This is the map to store all arguments
|
||||
arguments := make(map[string]string)
|
||||
|
||||
// Check if there are arguments provided as a query string
|
||||
if strings.Contains(storageURL, "?") {
|
||||
urlAndArguments := strings.SplitN(storageURL, "?", 2)
|
||||
storageURL = urlAndArguments[0]
|
||||
for _, pair := range strings.Split(urlAndArguments[1], "&") {
|
||||
if strings.Contains(pair, "=") {
|
||||
keyAndValue := strings.Split(pair, "=")
|
||||
arguments[keyAndValue[0]] = keyAndValue[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Take out the user name if there is one
|
||||
if strings.Contains(storageURL, "@") {
|
||||
userAndURL := strings.Split(storageURL, "@")
|
||||
arguments["user"] = userAndURL[0]
|
||||
storageURL = userAndURL[1]
|
||||
}
|
||||
|
||||
// The version is used to split authURL and container/path
|
||||
versions := []string{"/v1/", "/v1.0/", "/v2/", "/v2.0/", "/v3/", "/v3.0/", "/v4/", "/v4.0/"}
|
||||
storageDir := ""
|
||||
for _, version := range versions {
|
||||
if strings.Contains(storageURL, version) {
|
||||
urlAndStorageDir := strings.SplitN(storageURL, version, 2)
|
||||
storageURL = urlAndStorageDir[0] + version[0:len(version)-1]
|
||||
storageDir = urlAndStorageDir[1]
|
||||
}
|
||||
}
|
||||
|
||||
// If no container/path is specified, find them from the arguments
|
||||
if storageDir == "" {
|
||||
storageDir = arguments["storage_dir"]
|
||||
}
|
||||
|
||||
// Now separate the container name from the storage path
|
||||
container := ""
|
||||
if strings.Contains(storageDir, "/") {
|
||||
containerAndStorageDir := strings.SplitN(storageDir, "/", 2)
|
||||
container = containerAndStorageDir[0]
|
||||
storageDir = containerAndStorageDir[1]
|
||||
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||
storageDir += "/"
|
||||
}
|
||||
} else {
|
||||
container = storageDir
|
||||
storageDir = ""
|
||||
}
|
||||
|
||||
// Number of retries on err
|
||||
retries := 4
|
||||
if value, ok := arguments["retries"]; ok {
|
||||
retries, _ = strconv.Atoi(value)
|
||||
}
|
||||
|
||||
// Connect channel timeout
|
||||
connectionTimeout := 10
|
||||
if value, ok := arguments["connection_timeout"]; ok {
|
||||
connectionTimeout, _ = strconv.Atoi(value)
|
||||
}
|
||||
|
||||
// Data channel timeout
|
||||
timeout := 60
|
||||
if value, ok := arguments["timeout"]; ok {
|
||||
timeout, _ = strconv.Atoi(value)
|
||||
}
|
||||
|
||||
// Auth version; default to auto-detect
|
||||
authVersion := 0
|
||||
if value, ok := arguments["auth_version"]; ok {
|
||||
authVersion, _ = strconv.Atoi(value)
|
||||
}
|
||||
|
||||
// Allow http to be used by setting "protocol=http" in arguments
|
||||
if _, ok := arguments["protocol"]; !ok {
|
||||
arguments["protocol"] = "https"
|
||||
}
|
||||
|
||||
// Please refer to https://godoc.org/github.com/ncw/swift#Connection
|
||||
connection := swift.Connection{
|
||||
Domain: arguments["domain"],
|
||||
DomainId: arguments["domain_id"],
|
||||
UserName: arguments["user"],
|
||||
UserId: arguments["user_id"],
|
||||
ApiKey: key,
|
||||
AuthUrl: arguments["protocol"] + "://" + storageURL,
|
||||
Retries: retries,
|
||||
UserAgent: arguments["user_agent"],
|
||||
ConnectTimeout: time.Duration(connectionTimeout) * time.Second,
|
||||
Timeout: time.Duration(timeout) * time.Second,
|
||||
Region: arguments["region"],
|
||||
AuthVersion: authVersion,
|
||||
Internal: false,
|
||||
Tenant: arguments["tenant"],
|
||||
TenantId: arguments["tenant_id"],
|
||||
EndpointType: swift.EndpointType(arguments["endpiont_type"]),
|
||||
TenantDomain: arguments["tenant_domain"],
|
||||
TenantDomainId: arguments["tenant_domain_id"],
|
||||
TrustId: arguments["trust_id"],
|
||||
}
|
||||
|
||||
_, _, err = connection.Container(container)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
storage = &SwiftStorage{
|
||||
connection: &connection,
|
||||
container: container,
|
||||
storageDir: storageDir,
|
||||
threads: threads,
|
||||
}
|
||||
|
||||
storage.DerivedStorage = storage
|
||||
storage.SetDefaultNestingLevels([]int{1}, 1)
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||
func (storage *SwiftStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||
if len(dir) > 0 && dir[len(dir)-1] != '/' {
|
||||
dir += "/"
|
||||
}
|
||||
isSnapshotDir := dir == "snapshots/"
|
||||
dir = storage.storageDir + dir
|
||||
|
||||
options := swift.ObjectsOpts{
|
||||
Prefix: dir,
|
||||
Limit: 1000,
|
||||
}
|
||||
|
||||
if isSnapshotDir {
|
||||
options.Delimiter = '/'
|
||||
}
|
||||
|
||||
objects, err := storage.connection.ObjectsAll(storage.container, &options)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
for _, obj := range objects {
|
||||
if isSnapshotDir {
|
||||
if obj.SubDir != "" {
|
||||
files = append(files, obj.SubDir[len(dir):])
|
||||
sizes = append(sizes, 0)
|
||||
}
|
||||
} else {
|
||||
files = append(files, obj.Name[len(dir):])
|
||||
sizes = append(sizes, obj.Bytes)
|
||||
}
|
||||
}
|
||||
|
||||
return files, sizes, nil
|
||||
}
|
||||
|
||||
// DeleteFile deletes the file or directory at 'filePath'.
|
||||
func (storage *SwiftStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||
return storage.connection.ObjectDelete(storage.container, storage.storageDir+filePath)
|
||||
}
|
||||
|
||||
// MoveFile renames the file.
|
||||
func (storage *SwiftStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||
return storage.connection.ObjectMove(storage.container, storage.storageDir+from,
|
||||
storage.container, storage.storageDir+to)
|
||||
}
|
||||
|
||||
// CreateDirectory creates a new directory.
|
||||
func (storage *SwiftStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||
// Does nothing as directories do not exist in OpenStack Swift
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||
func (storage *SwiftStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||
object, _, err := storage.connection.Object(storage.container, storage.storageDir+filePath)
|
||||
|
||||
if err != nil {
|
||||
if err == swift.ObjectNotFound {
|
||||
return false, false, 0, nil
|
||||
} else {
|
||||
return false, false, 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return true, false, object.Bytes, nil
|
||||
}
|
||||
|
||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||
func (storage *SwiftStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||
|
||||
file, _, err := storage.connection.ObjectOpen(storage.container, storage.storageDir+filePath, false, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.threads)
|
||||
return err
|
||||
}
|
||||
|
||||
// UploadFile writes 'content' to the file at 'filePath'.
|
||||
func (storage *SwiftStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.threads)
|
||||
_, err = storage.connection.ObjectPut(storage.container, storage.storageDir+filePath, reader, true, "", "application/duplicacy", nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||
// managing snapshots.
|
||||
func (storage *SwiftStorage) IsCacheNeeded() bool { return true }
|
||||
|
||||
// If the 'MoveFile' method is implemented.
|
||||
func (storage *SwiftStorage) IsMoveFileImplemented() bool { return true }
|
||||
|
||||
// If the storage can guarantee strong consistency.
|
||||
func (storage *SwiftStorage) IsStrongConsistent() bool { return false }
|
||||
|
||||
// If the storage supports fast listing of files names.
|
||||
func (storage *SwiftStorage) IsFastListing() bool { return true }
|
||||
|
||||
// Enable the test mode.
|
||||
func (storage *SwiftStorage) EnableTestMode() {
|
||||
}
|
||||
@@ -69,7 +69,7 @@ func (entry *Entry) SetAttributesToFile(fullPath string) {
|
||||
newAttribute, found := entry.Attributes[name]
|
||||
if found {
|
||||
oldAttribute, _ := xattr.Getxattr(fullPath, name)
|
||||
if bytes.Equal(oldAttribute, newAttribute) {
|
||||
if !bytes.Equal(oldAttribute, newAttribute) {
|
||||
xattr.Setxattr(fullPath, name, newAttribute)
|
||||
}
|
||||
delete(entry.Attributes, name)
|
||||
|
||||
Reference in New Issue
Block a user