mirror of
https://github.com/gilbertchen/duplicacy
synced 2025-12-06 00:03:38 +00:00
Merge pull request #454 from mikecook/master
spelling fix, go fmt, go vet
This commit is contained in:
@@ -7,6 +7,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
@@ -16,7 +17,6 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"net/http"
|
|
||||||
|
|
||||||
_ "net/http/pprof"
|
_ "net/http/pprof"
|
||||||
|
|
||||||
@@ -159,8 +159,6 @@ func setGlobalOptions(context *cli.Context) {
|
|||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
duplicacy.RunInBackground = context.GlobalBool("background")
|
duplicacy.RunInBackground = context.GlobalBool("background")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -309,11 +307,11 @@ func configRepository(context *cli.Context, init bool) {
|
|||||||
repositoryPath = context.String("repository")
|
repositoryPath = context.String("repository")
|
||||||
}
|
}
|
||||||
preference := duplicacy.Preference{
|
preference := duplicacy.Preference{
|
||||||
Name: storageName,
|
Name: storageName,
|
||||||
SnapshotID: snapshotID,
|
SnapshotID: snapshotID,
|
||||||
RepositoryPath: repositoryPath,
|
RepositoryPath: repositoryPath,
|
||||||
StorageURL: storageURL,
|
StorageURL: storageURL,
|
||||||
Encrypted: context.Bool("encrypt"),
|
Encrypted: context.Bool("encrypt"),
|
||||||
}
|
}
|
||||||
|
|
||||||
storage := duplicacy.CreateStorage(preference, true, 1)
|
storage := duplicacy.CreateStorage(preference, true, 1)
|
||||||
@@ -533,7 +531,7 @@ func setPreference(context *cli.Context) {
|
|||||||
if triBool.IsSet() {
|
if triBool.IsSet() {
|
||||||
newPreference.DoNotSavePassword = triBool.IsTrue()
|
newPreference.DoNotSavePassword = triBool.IsTrue()
|
||||||
}
|
}
|
||||||
|
|
||||||
newPreference.NobackupFile = context.String("nobackup-file")
|
newPreference.NobackupFile = context.String("nobackup-file")
|
||||||
|
|
||||||
key := context.String("key")
|
key := context.String("key")
|
||||||
@@ -650,7 +648,7 @@ func changePassword(context *cli.Context) {
|
|||||||
duplicacy.LOG_INFO("CONFIG_CLEAN", "The local copy of the old config has been removed")
|
duplicacy.LOG_INFO("CONFIG_CLEAN", "The local copy of the old config has been removed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
err = storage.DeleteFile(0, "config")
|
err = storage.DeleteFile(0, "config")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1262,7 +1260,7 @@ func infoStorage(context *cli.Context) {
|
|||||||
|
|
||||||
for _, dir := range dirs {
|
for _, dir := range dirs {
|
||||||
if len(dir) > 0 && dir[len(dir)-1] == '/' {
|
if len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
duplicacy.LOG_INFO("STORAGE_SNAPSHOT", "%s", dir[0:len(dir) - 1])
|
duplicacy.LOG_INFO("STORAGE_SNAPSHOT", "%s", dir[0:len(dir)-1])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1298,7 +1296,7 @@ func benchmark(context *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
threads := downloadThreads
|
threads := downloadThreads
|
||||||
if (threads < uploadThreads) {
|
if threads < uploadThreads {
|
||||||
threads = uploadThreads
|
threads = uploadThreads
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1773,8 +1771,8 @@ func main() {
|
|||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
},
|
},
|
||||||
cli.BoolFlag{
|
cli.BoolFlag{
|
||||||
Name: "bit-identical",
|
Name: "bit-identical",
|
||||||
Usage: "(when using -copy) make the new storage bit-identical to also allow rsync etc.",
|
Usage: "(when using -copy) make the new storage bit-identical to also allow rsync etc.",
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "repository",
|
Name: "repository",
|
||||||
@@ -1815,10 +1813,10 @@ func main() {
|
|||||||
Arg: "true",
|
Arg: "true",
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "nobackup-file",
|
Name: "nobackup-file",
|
||||||
Usage: "Directories containing a file with this name will not be backed up",
|
Usage: "Directories containing a file with this name will not be backed up",
|
||||||
Argument: "<file name>",
|
Argument: "<file name>",
|
||||||
Value: "",
|
Value: "",
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "key",
|
Name: "key",
|
||||||
@@ -1984,8 +1982,8 @@ func main() {
|
|||||||
Argument: "<address:port>",
|
Argument: "<address:port>",
|
||||||
},
|
},
|
||||||
cli.StringFlag{
|
cli.StringFlag{
|
||||||
Name: "comment",
|
Name: "comment",
|
||||||
Usage: "add a comment to identify the process",
|
Usage: "add a comment to identify the process",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1999,7 +1997,7 @@ func main() {
|
|||||||
c := make(chan os.Signal, 1)
|
c := make(chan os.Signal, 1)
|
||||||
signal.Notify(c, os.Interrupt)
|
signal.Notify(c, os.Interrupt)
|
||||||
go func() {
|
go func() {
|
||||||
for _ = range c {
|
for range c {
|
||||||
duplicacy.RunAtError()
|
duplicacy.RunAtError()
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -104,7 +104,7 @@ func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files
|
|||||||
|
|
||||||
if dir == "snapshots/" {
|
if dir == "snapshots/" {
|
||||||
|
|
||||||
for subDir, _ := range subDirs {
|
for subDir := range subDirs {
|
||||||
files = append(files, subDir)
|
files = append(files, subDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -385,7 +385,7 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
|||||||
}
|
}
|
||||||
fileUploadTimestamp, _ := strconv.ParseInt(responseHeader.Get("X-Bz-Upload-Timestamp"), 0, 64)
|
fileUploadTimestamp, _ := strconv.ParseInt(responseHeader.Get("X-Bz-Upload-Timestamp"), 0, 64)
|
||||||
|
|
||||||
return []*B2Entry{&B2Entry{fileID, fileName, fileAction, fileSize, fileUploadTimestamp}}, nil
|
return []*B2Entry{{fileID, fileName, fileAction, fileSize, fileUploadTimestamp}}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
|
|||||||
@@ -71,7 +71,7 @@ func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string
|
|||||||
subDirs[subDir+"/"] = true
|
subDirs[subDir+"/"] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
for subDir, _ := range subDirs {
|
for subDir := range subDirs {
|
||||||
files = append(files, subDir)
|
files = append(files, subDir)
|
||||||
}
|
}
|
||||||
} else if dir == "chunks" {
|
} else if dir == "chunks" {
|
||||||
|
|||||||
@@ -33,7 +33,7 @@ type BackupManager struct {
|
|||||||
snapshotCache *FileStorage // for copies of chunks needed by snapshots
|
snapshotCache *FileStorage // for copies of chunks needed by snapshots
|
||||||
|
|
||||||
config *Config // contains a number of options
|
config *Config // contains a number of options
|
||||||
|
|
||||||
nobackupFile string // don't backup directory when this file name is found
|
nobackupFile string // don't backup directory when this file name is found
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,7 +65,7 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
|
|||||||
SnapshotManager: snapshotManager,
|
SnapshotManager: snapshotManager,
|
||||||
|
|
||||||
config: config,
|
config: config,
|
||||||
|
|
||||||
nobackupFile: nobackupFile,
|
nobackupFile: nobackupFile,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -981,12 +981,12 @@ type fileEncoder struct {
|
|||||||
buffer *bytes.Buffer
|
buffer *bytes.Buffer
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read reads data from the embeded buffer
|
// Read reads data from the embedded buffer
|
||||||
func (encoder fileEncoder) Read(data []byte) (n int, err error) {
|
func (encoder fileEncoder) Read(data []byte) (n int, err error) {
|
||||||
return encoder.buffer.Read(data)
|
return encoder.buffer.Read(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NextFile switchs to the next file and generates its json description in the buffer. It also takes care of
|
// NextFile switches to the next file and generates its json description in the buffer. It also takes care of
|
||||||
// the ending ']' and the commas between files.
|
// the ending ']' and the commas between files.
|
||||||
func (encoder *fileEncoder) NextFile() (io.Reader, bool) {
|
func (encoder *fileEncoder) NextFile() (io.Reader, bool) {
|
||||||
if encoder.currentIndex == len(encoder.files) {
|
if encoder.currentIndex == len(encoder.files) {
|
||||||
@@ -1126,7 +1126,7 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Restore downloads a file from the storage. If 'inPlace' is false, the download file is saved first to a temporary
|
// Restore downloads a file from the storage. If 'inPlace' is false, the download file is saved first to a temporary
|
||||||
// file under the .duplicacy directory and then replaces the existing one. Otherwise, the exising file will be
|
// file under the .duplicacy directory and then replaces the existing one. Otherwise, the existing file will be
|
||||||
// overwritten directly.
|
// overwritten directly.
|
||||||
func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chunkMaker *ChunkMaker, entry *Entry, top string, inPlace bool, overwrite bool,
|
func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chunkMaker *ChunkMaker, entry *Entry, top string, inPlace bool, overwrite bool,
|
||||||
showStatistics bool, totalFileSize int64, downloadedFileSize int64, startTime int64) bool {
|
showStatistics bool, totalFileSize int64, downloadedFileSize int64, startTime int64) bool {
|
||||||
@@ -1324,7 +1324,6 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
for i := entry.StartChunk; i <= entry.EndChunk; i++ {
|
for i := entry.StartChunk; i <= entry.EndChunk; i++ {
|
||||||
if _, found := offsetMap[chunkDownloader.taskList[i].chunkHash]; !found {
|
if _, found := offsetMap[chunkDownloader.taskList[i].chunkHash]; !found {
|
||||||
chunkDownloader.taskList[i].needed = true
|
chunkDownloader.taskList[i].needed = true
|
||||||
@@ -1528,7 +1527,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
revisionsToBeCopied []int, threads int) bool {
|
revisionsToBeCopied []int, threads int) bool {
|
||||||
|
|
||||||
if !manager.config.IsCompatiableWith(otherManager.config) {
|
if !manager.config.IsCompatiableWith(otherManager.config) {
|
||||||
LOG_ERROR("CONFIG_INCOMPATIABLE", "Two storages are not compatiable for the copy operation")
|
LOG_ERROR("CONFIG_INCOMPATIBLE", "Two storages are not compatible for the copy operation")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1674,7 +1673,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
chunksToCopy := 0
|
chunksToCopy := 0
|
||||||
chunksToSkip := 0
|
chunksToSkip := 0
|
||||||
|
|
||||||
for chunkHash, _ := range chunks {
|
for chunkHash := range chunks {
|
||||||
otherChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
otherChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
||||||
if _, found := otherChunks[otherChunkID]; found {
|
if _, found := otherChunks[otherChunkID]; found {
|
||||||
chunksToSkip++
|
chunksToSkip++
|
||||||
@@ -1704,7 +1703,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
totalSkipped := 0
|
totalSkipped := 0
|
||||||
chunkIndex := 0
|
chunkIndex := 0
|
||||||
|
|
||||||
for chunkHash, _ := range chunks {
|
for chunkHash := range chunks {
|
||||||
chunkIndex++
|
chunkIndex++
|
||||||
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
|
chunkID := manager.config.GetChunkIDFromHash(chunkHash)
|
||||||
newChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
newChunkID := otherManager.config.GetChunkIDFromHash(chunkHash)
|
||||||
|
|||||||
@@ -246,8 +246,8 @@ func TestBackupManager(t *testing.T) {
|
|||||||
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false, 0, false)
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir+"/repository2", threads, /*inPlace=*/false, /*quickMode=*/false, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/ nil)
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
|
||||||
|
|
||||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
|
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
|
||||||
@@ -270,8 +270,8 @@ func TestBackupManager(t *testing.T) {
|
|||||||
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false)
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false, 0, false)
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir+"/repository2", 2, /*inPlace=*/true, /*quickMode=*/true, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
|
||||||
|
|
||||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
@@ -298,8 +298,8 @@ func TestBackupManager(t *testing.T) {
|
|||||||
createRandomFile(testDir+"/repository2/dir5/file5", 100)
|
createRandomFile(testDir+"/repository2/dir5/file5", 100)
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir+"/repository2", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/true, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
/*deleteMode=*/ true /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, nil)
|
||||||
|
|
||||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
@@ -325,8 +325,8 @@ func TestBackupManager(t *testing.T) {
|
|||||||
os.Remove(testDir + "/repository1/file2")
|
os.Remove(testDir + "/repository1/file2")
|
||||||
os.Remove(testDir + "/repository1/dir1/file3")
|
os.Remove(testDir + "/repository1/dir1/file3")
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Restore(testDir+"/repository1", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/false, /*setowner=*/false, /*showStatistics=*/false, /*patterns=*/[]string{"+file2", "+dir1/file3", "-*"})
|
/*deleteMode=*/ false /*setowner=*/, false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"})
|
||||||
|
|
||||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
|
|||||||
@@ -5,18 +5,18 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/zlib"
|
"compress/zlib"
|
||||||
"crypto/aes"
|
"crypto/aes"
|
||||||
"crypto/cipher"
|
"crypto/cipher"
|
||||||
"crypto/rand"
|
|
||||||
"crypto/hmac"
|
"crypto/hmac"
|
||||||
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
"io"
|
"io"
|
||||||
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/bkaradzic/go-lz4"
|
"github.com/bkaradzic/go-lz4"
|
||||||
@@ -250,7 +250,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
// PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes
|
// PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes
|
||||||
// to be the maximum allowed by PKCS7
|
// to be the maximum allowed by PKCS7
|
||||||
dataLength := encryptedBuffer.Len() - offset
|
dataLength := encryptedBuffer.Len() - offset
|
||||||
paddingLength := 256 - dataLength % 256
|
paddingLength := 256 - dataLength%256
|
||||||
|
|
||||||
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
|
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
|
||||||
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
|
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
|
||||||
@@ -267,7 +267,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is to ensure compability with Vertical Backup, which still uses HMAC-SHA256 (instead of HMAC-BLAKE2) to
|
// This is to ensure compatibility with Vertical Backup, which still uses HMAC-SHA256 (instead of HMAC-BLAKE2) to
|
||||||
// derive the key used to encrypt/decrypt files and chunks.
|
// derive the key used to encrypt/decrypt files and chunks.
|
||||||
|
|
||||||
var DecryptWithHMACSHA256 = false
|
var DecryptWithHMACSHA256 = false
|
||||||
@@ -342,7 +342,6 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
paddingLength := int(decryptedBytes[len(decryptedBytes)-1])
|
paddingLength := int(decryptedBytes[len(decryptedBytes)-1])
|
||||||
if paddingLength == 0 {
|
if paddingLength == 0 {
|
||||||
paddingLength = 256
|
paddingLength = 256
|
||||||
|
|||||||
@@ -48,7 +48,7 @@ func TestChunk(t *testing.T) {
|
|||||||
|
|
||||||
if remainderLength == -1 {
|
if remainderLength == -1 {
|
||||||
remainderLength = len(encryptedData) % 256
|
remainderLength = len(encryptedData) % 256
|
||||||
} else if len(encryptedData) % 256 != remainderLength {
|
} else if len(encryptedData)%256 != remainderLength {
|
||||||
t.Errorf("Incorrect padding size")
|
t.Errorf("Incorrect padding size")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -71,7 +71,7 @@ func TestChunk(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if bytes.Compare(plainData, decryptedData) != 0 {
|
if bytes.Compare(plainData, decryptedData) != 0 {
|
||||||
t.Logf("orginal length: %d, decrypted length: %d", len(plainData), len(decryptedData))
|
t.Logf("Original length: %d, decrypted length: %d", len(plainData), len(decryptedData))
|
||||||
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
|
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -178,7 +178,7 @@ func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, _ := range downloader.completedTasks {
|
for i := range downloader.completedTasks {
|
||||||
if i < chunkIndex && downloader.taskList[i].chunk != nil {
|
if i < chunkIndex && downloader.taskList[i].chunk != nil {
|
||||||
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
||||||
downloader.taskList[i].chunk = nil
|
downloader.taskList[i].chunk = nil
|
||||||
@@ -253,7 +253,7 @@ func (downloader *ChunkDownloader) Stop() {
|
|||||||
downloader.numberOfDownloadingChunks--
|
downloader.numberOfDownloadingChunks--
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, _ := range downloader.completedTasks {
|
for i := range downloader.completedTasks {
|
||||||
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
||||||
downloader.taskList[i].chunk = nil
|
downloader.taskList[i].chunk = nil
|
||||||
downloader.numberOfActiveChunks--
|
downloader.numberOfActiveChunks--
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ const (
|
|||||||
ChunkOperationResurrect = 3
|
ChunkOperationResurrect = 3
|
||||||
)
|
)
|
||||||
|
|
||||||
// ChunkOperatorTask is used to pass paramaters for different kinds of chunk operations.
|
// ChunkOperatorTask is used to pass parameters for different kinds of chunk operations.
|
||||||
type ChunkOperatorTask struct {
|
type ChunkOperatorTask struct {
|
||||||
operation int // The type of operation
|
operation int // The type of operation
|
||||||
chunkID string // The chunk id
|
chunkID string // The chunk id
|
||||||
|
|||||||
@@ -272,7 +272,7 @@ func (entry *Entry) IsLink() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) GetPermissions() os.FileMode {
|
func (entry *Entry) GetPermissions() os.FileMode {
|
||||||
return os.FileMode(entry.Mode)&fileModeMask
|
return os.FileMode(entry.Mode) & fileModeMask
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) IsSameAs(other *Entry) bool {
|
func (entry *Entry) IsSameAs(other *Entry) bool {
|
||||||
@@ -308,7 +308,7 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo, setO
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Only set the permission if the file is not a symlink
|
// Only set the permission if the file is not a symlink
|
||||||
if !entry.IsLink() && (*fileInfo).Mode() & fileModeMask != entry.GetPermissions() {
|
if !entry.IsLink() && (*fileInfo).Mode()&fileModeMask != entry.GetPermissions() {
|
||||||
err := os.Chmod(fullPath, entry.GetPermissions())
|
err := os.Chmod(fullPath, entry.GetPermissions())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("RESTORE_CHMOD", "Failed to set the file permissions: %v", err)
|
LOG_ERROR("RESTORE_CHMOD", "Failed to set the file permissions: %v", err)
|
||||||
@@ -456,10 +456,10 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns []string,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return directoryList, nil, err
|
return directoryList, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// This binary search works because ioutil.ReadDir returns files sorted by Name() by default
|
// This binary search works because ioutil.ReadDir returns files sorted by Name() by default
|
||||||
if nobackupFile != "" {
|
if nobackupFile != "" {
|
||||||
ii := sort.Search(len(files), func(ii int) bool { return strings.Compare(files[ii].Name(), nobackupFile) >= 0})
|
ii := sort.Search(len(files), func(ii int) bool { return strings.Compare(files[ii].Name(), nobackupFile) >= 0 })
|
||||||
if ii < len(files) && files[ii].Name() == nobackupFile {
|
if ii < len(files) && files[ii].Name() == nobackupFile {
|
||||||
LOG_DEBUG("LIST_NOBACKUP", "%s is excluded due to nobackup file", path)
|
LOG_DEBUG("LIST_NOBACKUP", "%s is excluded due to nobackup file", path)
|
||||||
return directoryList, skippedFiles, nil
|
return directoryList, skippedFiles, nil
|
||||||
|
|||||||
@@ -34,7 +34,7 @@ func CreateFileReader(top string, files []*Entry) *FileReader {
|
|||||||
return reader
|
return reader
|
||||||
}
|
}
|
||||||
|
|
||||||
// NextFile switchs to the next file in the file reader.
|
// NextFile switches to the next file in the file reader.
|
||||||
func (reader *FileReader) NextFile() bool {
|
func (reader *FileReader) NextFile() bool {
|
||||||
|
|
||||||
if reader.CurrentFile != nil {
|
if reader.CurrentFile != nil {
|
||||||
|
|||||||
@@ -165,7 +165,7 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !stat.IsDir() {
|
if !stat.IsDir() {
|
||||||
fmt.Errorf("The path %s is not a directory", dir)
|
return fmt.Errorf("The path %s is not a directory", dir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
GCDFileMimeType = "application/octet-stream"
|
GCDFileMimeType = "application/octet-stream"
|
||||||
GCDDirectoryMimeType = "application/vnd.google-apps.folder"
|
GCDDirectoryMimeType = "application/vnd.google-apps.folder"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -33,7 +33,7 @@ type GCDStorage struct {
|
|||||||
StorageBase
|
StorageBase
|
||||||
|
|
||||||
service *drive.Service
|
service *drive.Service
|
||||||
idCache map[string]string // only directories are saved in this cache
|
idCache map[string]string // only directories are saved in this cache
|
||||||
idCacheLock sync.Mutex
|
idCacheLock sync.Mutex
|
||||||
backoffs []int // desired backoff time in seconds for each thread
|
backoffs []int // desired backoff time in seconds for each thread
|
||||||
attempts []int // number of failed attempts since last success for each thread
|
attempts []int // number of failed attempts since last success for each thread
|
||||||
@@ -291,7 +291,7 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, filePath string, creat
|
|||||||
} else if isDir {
|
} else if isDir {
|
||||||
storage.savePathID(current, fileID)
|
storage.savePathID(current, fileID)
|
||||||
}
|
}
|
||||||
if i != len(names) - 1 && !isDir {
|
if i != len(names)-1 && !isDir {
|
||||||
return "", fmt.Errorf("Path '%s' is not a directory", current)
|
return "", fmt.Errorf("Path '%s' is not a directory", current)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -386,8 +386,8 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
subDirs := []string{}
|
subDirs := []string{}
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
storage.savePathID("snapshots/" + file.Name, file.Id)
|
storage.savePathID("snapshots/"+file.Name, file.Id)
|
||||||
subDirs = append(subDirs, file.Name + "/")
|
subDirs = append(subDirs, file.Name+"/")
|
||||||
}
|
}
|
||||||
return subDirs, nil, nil
|
return subDirs, nil, nil
|
||||||
} else if strings.HasPrefix(dir, "snapshots/") || strings.HasPrefix(dir, "benchmark") {
|
} else if strings.HasPrefix(dir, "snapshots/") || strings.HasPrefix(dir, "benchmark") {
|
||||||
@@ -438,8 +438,8 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
files = append(files, name)
|
files = append(files, name)
|
||||||
sizes = append(sizes, entry.Size)
|
sizes = append(sizes, entry.Size)
|
||||||
} else {
|
} else {
|
||||||
parents = append(parents, parent+ "/" + entry.Name)
|
parents = append(parents, parent+"/"+entry.Name)
|
||||||
storage.savePathID(parent + "/" + entry.Name, entry.Id)
|
storage.savePathID(parent+"/"+entry.Name, entry.Id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -113,7 +113,7 @@ func (storage *HubicStorage) ListFiles(threadIndex int, dir string) ([]string, [
|
|||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.Type == "application/directory" {
|
if entry.Type == "application/directory" {
|
||||||
files = append(files, entry.Name + "/")
|
files = append(files, entry.Name+"/")
|
||||||
sizes = append(sizes, 0)
|
sizes = append(sizes, 0)
|
||||||
} else {
|
} else {
|
||||||
files = append(files, entry.Name)
|
files = append(files, entry.Name)
|
||||||
|
|||||||
@@ -237,8 +237,6 @@ func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content [
|
|||||||
LOG_INFO("S3_RETRY", "Retrying on %s: %v", reflect.TypeOf(err), err)
|
LOG_INFO("S3_RETRY", "Retrying on %s: %v", reflect.TypeOf(err), err)
|
||||||
attempts += 1
|
attempts += 1
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
|
|||||||
@@ -207,7 +207,7 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
// The error may be caused by a non-existent fullDir, or a broken connection. In either case,
|
// The error may be caused by a non-existent fullDir, or a broken connection. In either case,
|
||||||
// we just assume it is the former because there isn't a way to tell which is the case.
|
// we just assume it is the former because there isn't a way to tell which is the case.
|
||||||
for i, _ := range dirs[1 : len(dirs)-1] {
|
for i := range dirs[1 : len(dirs)-1] {
|
||||||
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
|
subDir := path.Join(storage.storageDir, path.Join(dirs[0:i+2]...))
|
||||||
// We don't check the error; just keep going blindly but always store the last err
|
// We don't check the error; just keep going blindly but always store the last err
|
||||||
err = storage.client.Mkdir(subDir)
|
err = storage.client.Mkdir(subDir)
|
||||||
|
|||||||
@@ -25,7 +25,7 @@ var snapshotDate string
|
|||||||
func CharsToString(ca []int8) string {
|
func CharsToString(ca []int8) string {
|
||||||
|
|
||||||
len := len(ca)
|
len := len(ca)
|
||||||
ba := make([]byte, len)
|
ba := make([]byte, len)
|
||||||
|
|
||||||
for i, v := range ca {
|
for i, v := range ca {
|
||||||
ba[i] = byte(v)
|
ba[i] = byte(v)
|
||||||
@@ -54,8 +54,8 @@ func GetPathDeviceId(path string) (deviceId int32, err error) {
|
|||||||
// Executes shell command with timeout and returns stdout
|
// Executes shell command with timeout and returns stdout
|
||||||
func CommandWithTimeout(timeoutInSeconds int, name string, arg ...string) (output string, err error) {
|
func CommandWithTimeout(timeoutInSeconds int, name string, arg ...string) (output string, err error) {
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutInSeconds) * time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeoutInSeconds)*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
cmd := exec.CommandContext(ctx, name, arg...)
|
cmd := exec.CommandContext(ctx, name, arg...)
|
||||||
out, err := cmd.Output()
|
out, err := cmd.Output()
|
||||||
@@ -91,10 +91,10 @@ func DeleteShadowCopy() {
|
|||||||
LOG_ERROR("VSS_DELETE", "Error while deleting temporary mount directory")
|
LOG_ERROR("VSS_DELETE", "Error while deleting temporary mount directory")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_INFO("VSS_DELETE", "Shadow copy unmounted and deleted at %s", snapshotPath)
|
LOG_INFO("VSS_DELETE", "Shadow copy unmounted and deleted at %s", snapshotPath)
|
||||||
|
|
||||||
snapshotPath = ""
|
snapshotPath = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadowTop string) {
|
func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadowTop string) {
|
||||||
@@ -128,7 +128,7 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
|
|||||||
}
|
}
|
||||||
if deviceIdLocal != deviceIdRepository {
|
if deviceIdLocal != deviceIdRepository {
|
||||||
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: ", top)
|
LOG_WARN("VSS_PATH", "VSS not supported for non-local repository path: ", top)
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
if timeoutInSeconds <= 60 {
|
if timeoutInSeconds <= 60 {
|
||||||
@@ -157,8 +157,8 @@ func CreateShadowCopy(top string, shadowCopy bool, timeoutInSeconds int) (shadow
|
|||||||
snapshotDate = strings.TrimSpace(tmutilOutput[colonPos+1:])
|
snapshotDate = strings.TrimSpace(tmutilOutput[colonPos+1:])
|
||||||
|
|
||||||
// Mount snapshot as readonly and hide from GUI i.e. Finder
|
// Mount snapshot as readonly and hide from GUI i.e. Finder
|
||||||
_, err = CommandWithTimeout(timeoutInSeconds,
|
_, err = CommandWithTimeout(timeoutInSeconds,
|
||||||
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s=com.apple.TimeMachine." + snapshotDate, "/", snapshotPath)
|
"/sbin/mount", "-t", "apfs", "-o", "nobrowse,-r,-s=com.apple.TimeMachine."+snapshotDate, "/", snapshotPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: ", err)
|
LOG_ERROR("VSS_CREATE", "Error while mounting snapshot: ", err)
|
||||||
return top
|
return top
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ func CreateFossilCollection(allSnapshots map[string][]*Snapshot) *FossilCollecti
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &FossilCollection{
|
return &FossilCollection{
|
||||||
LastRevisions: lastRevisions,
|
LastRevisions: lastRevisions,
|
||||||
DeletedRevisions: make(map[string][]int),
|
DeletedRevisions: make(map[string][]int),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -386,7 +386,7 @@ func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, all
|
|||||||
|
|
||||||
if allSnapshots == nil {
|
if allSnapshots == nil {
|
||||||
// If the 'fossils' directory exists then don't clean the cache as all snapshots will be needed later
|
// If the 'fossils' directory exists then don't clean the cache as all snapshots will be needed later
|
||||||
// during the fossil collection phase. The deletion procedure creates this direcotry.
|
// during the fossil collection phase. The deletion procedure creates this directory.
|
||||||
// We only check this condition when allSnapshots is nil because
|
// We only check this condition when allSnapshots is nil because
|
||||||
// in thise case it is the deletion procedure that is trying to clean the snapshot cache.
|
// in thise case it is the deletion procedure that is trying to clean the snapshot cache.
|
||||||
exist, _, _, err := manager.snapshotCache.GetFileInfo(0, "fossils")
|
exist, _, _, err := manager.snapshotCache.GetFileInfo(0, "fossils")
|
||||||
@@ -807,7 +807,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
|
|
||||||
snapshotIDIndex := 0
|
snapshotIDIndex := 0
|
||||||
totalMissingChunks := 0
|
totalMissingChunks := 0
|
||||||
for snapshotID, _ = range snapshotMap {
|
for snapshotID = range snapshotMap {
|
||||||
|
|
||||||
revisions := revisionsToCheck
|
revisions := revisionsToCheck
|
||||||
if len(revisions) == 0 || showStatistics {
|
if len(revisions) == 0 || showStatistics {
|
||||||
@@ -839,7 +839,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
}
|
}
|
||||||
LOG_INFO("SNAPSHOT_CHECK", "Total chunk size is %s in %d chunks", PrettyNumber(totalChunkSize), len(chunkSizeMap))
|
LOG_INFO("SNAPSHOT_CHECK", "Total chunk size is %s in %d chunks", PrettyNumber(totalChunkSize), len(chunkSizeMap))
|
||||||
|
|
||||||
for snapshotID, _ = range snapshotMap {
|
for snapshotID = range snapshotMap {
|
||||||
|
|
||||||
for _, snapshot := range snapshotMap[snapshotID] {
|
for _, snapshot := range snapshotMap[snapshotID] {
|
||||||
|
|
||||||
@@ -855,7 +855,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
}
|
}
|
||||||
|
|
||||||
missingChunks := 0
|
missingChunks := 0
|
||||||
for chunkID, _ := range chunks {
|
for chunkID := range chunks {
|
||||||
|
|
||||||
_, found := chunkSizeMap[chunkID]
|
_, found := chunkSizeMap[chunkID]
|
||||||
|
|
||||||
@@ -953,7 +953,7 @@ func (manager *SnapshotManager) ShowStatistics(snapshotMap map[string][]*Snapsho
|
|||||||
var totalChunkSize int64
|
var totalChunkSize int64
|
||||||
var uniqueChunkSize int64
|
var uniqueChunkSize int64
|
||||||
|
|
||||||
for chunkID, _ := range chunks {
|
for chunkID := range chunks {
|
||||||
chunkSize := chunkSizeMap[chunkID]
|
chunkSize := chunkSizeMap[chunkID]
|
||||||
totalChunkSize += chunkSize
|
totalChunkSize += chunkSize
|
||||||
if chunkUniqueMap[chunkID] {
|
if chunkUniqueMap[chunkID] {
|
||||||
@@ -971,7 +971,7 @@ func (manager *SnapshotManager) ShowStatistics(snapshotMap map[string][]*Snapsho
|
|||||||
|
|
||||||
var totalChunkSize int64
|
var totalChunkSize int64
|
||||||
var uniqueChunkSize int64
|
var uniqueChunkSize int64
|
||||||
for chunkID, _ := range snapshotChunks {
|
for chunkID := range snapshotChunks {
|
||||||
chunkSize := chunkSizeMap[chunkID]
|
chunkSize := chunkSizeMap[chunkID]
|
||||||
totalChunkSize += chunkSize
|
totalChunkSize += chunkSize
|
||||||
|
|
||||||
@@ -1021,7 +1021,7 @@ func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string][]*
|
|||||||
var newChunkCount int64
|
var newChunkCount int64
|
||||||
var newChunkSize int64
|
var newChunkSize int64
|
||||||
|
|
||||||
for chunkID, _ := range chunks {
|
for chunkID := range chunks {
|
||||||
chunkSize := chunkSizeMap[chunkID]
|
chunkSize := chunkSizeMap[chunkID]
|
||||||
totalChunkSize += chunkSize
|
totalChunkSize += chunkSize
|
||||||
totalChunkCount += 1
|
totalChunkCount += 1
|
||||||
@@ -1049,7 +1049,7 @@ func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string][]*
|
|||||||
var uniqueChunkSize int64
|
var uniqueChunkSize int64
|
||||||
var totalChunkCount int64
|
var totalChunkCount int64
|
||||||
var uniqueChunkCount int64
|
var uniqueChunkCount int64
|
||||||
for chunkID, _ := range snapshotChunks {
|
for chunkID := range snapshotChunks {
|
||||||
chunkSize := chunkSizeMap[chunkID]
|
chunkSize := chunkSizeMap[chunkID]
|
||||||
totalChunkSize += chunkSize
|
totalChunkSize += chunkSize
|
||||||
totalChunkCount += 1
|
totalChunkCount += 1
|
||||||
@@ -1154,7 +1154,7 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveFile retrieve the file in the specifed snapshot.
|
// RetrieveFile retrieves the file in the specified snapshot.
|
||||||
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, output func([]byte)) bool {
|
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, output func([]byte)) bool {
|
||||||
|
|
||||||
if file.Size == 0 {
|
if file.Size == 0 {
|
||||||
@@ -2221,7 +2221,7 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
manager.chunkOperator.Resurrect(chunk, chunkDir + file)
|
manager.chunkOperator.Resurrect(chunk, chunkDir+file)
|
||||||
fmt.Fprintf(logFile, "Found referenced fossil %s\n", file)
|
fmt.Fprintf(logFile, "Found referenced fossil %s\n", file)
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
@@ -2232,7 +2232,7 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s
|
|||||||
}
|
}
|
||||||
|
|
||||||
if exclusive {
|
if exclusive {
|
||||||
manager.chunkOperator.Delete(chunk, chunkDir + file)
|
manager.chunkOperator.Delete(chunk, chunkDir+file)
|
||||||
} else {
|
} else {
|
||||||
collection.AddFossil(chunkDir + file)
|
collection.AddFossil(chunkDir + file)
|
||||||
LOG_DEBUG("FOSSIL_FIND", "Found unreferenced fossil %s", file)
|
LOG_DEBUG("FOSSIL_FIND", "Found unreferenced fossil %s", file)
|
||||||
@@ -2247,7 +2247,7 @@ func (manager *SnapshotManager) pruneSnapshotsExhaustive(referencedFossils map[s
|
|||||||
chunk := strings.Replace(file, "/", "", -1)
|
chunk := strings.Replace(file, "/", "", -1)
|
||||||
|
|
||||||
if !chunkRegex.MatchString(chunk) {
|
if !chunkRegex.MatchString(chunk) {
|
||||||
LOG_WARN("CHUNK_UNKONWN_FILE", "File %s is not a chunk", file)
|
LOG_WARN("CHUNK_UNKNOWN_FILE", "File %s is not a chunk", file)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2409,7 +2409,7 @@ func (manager *SnapshotManager) DownloadFile(path string, derivationKey string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(derivationKey) > 64 {
|
if len(derivationKey) > 64 {
|
||||||
derivationKey = derivationKey[len(derivationKey) - 64:]
|
derivationKey = derivationKey[len(derivationKey)-64:]
|
||||||
}
|
}
|
||||||
|
|
||||||
err = manager.fileChunk.Decrypt(manager.config.FileKey, derivationKey)
|
err = manager.fileChunk.Decrypt(manager.config.FileKey, derivationKey)
|
||||||
@@ -2443,7 +2443,7 @@ func (manager *SnapshotManager) UploadFile(path string, derivationKey string, co
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(derivationKey) > 64 {
|
if len(derivationKey) > 64 {
|
||||||
derivationKey = derivationKey[len(derivationKey) - 64:]
|
derivationKey = derivationKey[len(derivationKey)-64:]
|
||||||
}
|
}
|
||||||
|
|
||||||
err := manager.fileChunk.Encrypt(manager.config.FileKey, derivationKey)
|
err := manager.fileChunk.Encrypt(manager.config.FileKey, derivationKey)
|
||||||
|
|||||||
@@ -9,12 +9,12 @@ import (
|
|||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
"io/ioutil"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot {
|
func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot {
|
||||||
@@ -500,7 +500,7 @@ func TestPruneWithRetentionPolicyAndTag(t *testing.T) {
|
|||||||
t.Logf("Creating 30 snapshots")
|
t.Logf("Creating 30 snapshots")
|
||||||
for i := 0; i < 30; i++ {
|
for i := 0; i < 30; i++ {
|
||||||
tag := "auto"
|
tag := "auto"
|
||||||
if i % 3 == 0 {
|
if i%3 == 0 {
|
||||||
tag = "manual"
|
tag = "manual"
|
||||||
}
|
}
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]}, tag)
|
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]}, tag)
|
||||||
@@ -615,12 +615,12 @@ func TestPruneNewSnapshots(t *testing.T) {
|
|||||||
// Create another snapshot of vm1 that brings back chunkHash1
|
// Create another snapshot of vm1 that brings back chunkHash1
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 3, now-0*day-3600, now-0*day-60, []string{chunkHash1, chunkHash3}, "tag")
|
createTestSnapshot(snapshotManager, "vm1@host1", 3, now-0*day-3600, now-0*day-60, []string{chunkHash1, chunkHash3}, "tag")
|
||||||
// Create another snapshot of vm2 so the fossil collection will be processed by next prune
|
// Create another snapshot of vm2 so the fossil collection will be processed by next prune
|
||||||
createTestSnapshot(snapshotManager, "vm2@host1", 2, now + 3600, now + 3600 * 2, []string{chunkHash4, chunkHash5}, "tag")
|
createTestSnapshot(snapshotManager, "vm2@host1", 2, now+3600, now+3600*2, []string{chunkHash4, chunkHash5}, "tag")
|
||||||
|
|
||||||
// Now chunkHash1 wil be resurrected
|
// Now chunkHash1 wil be resurrected
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
checkTestSnapshots(snapshotManager, 4, 0)
|
checkTestSnapshots(snapshotManager, 4, 0)
|
||||||
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false);
|
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3}, "", false, false, false, false, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A fossil collection left by an aborted prune should be ignored if any supposedly deleted snapshot exists
|
// A fossil collection left by an aborted prune should be ignored if any supposedly deleted snapshot exists
|
||||||
@@ -664,12 +664,12 @@ func TestPruneGhostSnapshots(t *testing.T) {
|
|||||||
|
|
||||||
// Create another snapshot of vm1 so the fossil collection becomes eligible for processing.
|
// Create another snapshot of vm1 so the fossil collection becomes eligible for processing.
|
||||||
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 3, now - day - 3600, now - day - 60, []string{chunkHash3, chunkHash4}, "tag")
|
createTestSnapshot(snapshotManager, "vm1@host1", 3, now-day-3600, now-day-60, []string{chunkHash3, chunkHash4}, "tag")
|
||||||
|
|
||||||
// Run the prune again but the fossil collection should be igored, since revision 1 still exists
|
// Run the prune again but the fossil collection should be igored, since revision 1 still exists
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
checkTestSnapshots(snapshotManager, 3, 2)
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, true /*searchFossils*/, false);
|
snapshotManager.CheckSnapshots("vm1@host1", []int{1, 2, 3}, "", false, false, false, true /*searchFossils*/, false)
|
||||||
|
|
||||||
// Prune snapshot 1 again
|
// Prune snapshot 1 again
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
@@ -677,11 +677,11 @@ func TestPruneGhostSnapshots(t *testing.T) {
|
|||||||
|
|
||||||
// Create another snapshot
|
// Create another snapshot
|
||||||
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 4, now + 3600, now + 3600 * 2, []string{chunkHash5, chunkHash5}, "tag")
|
createTestSnapshot(snapshotManager, "vm1@host1", 4, now+3600, now+3600*2, []string{chunkHash5, chunkHash5}, "tag")
|
||||||
checkTestSnapshots(snapshotManager, 3, 2)
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
// Run the prune again and this time the fossil collection will be processed and the fossils removed
|
// Run the prune again and this time the fossil collection will be processed and the fossils removed
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false, 1)
|
||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false);
|
snapshotManager.CheckSnapshots("vm1@host1", []int{2, 3, 4}, "", false, false, false, false, false)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -89,7 +89,7 @@ func (storage *StorageBase) SetRateLimits(downloadRateLimit int, uploadRateLimit
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetDefaultNestingLevels sets the default read and write levels. This is usually called by
|
// SetDefaultNestingLevels sets the default read and write levels. This is usually called by
|
||||||
// derived storages to set the levels with old values so that storages initialied by ealier versions
|
// derived storages to set the levels with old values so that storages initialized by earlier versions
|
||||||
// will continue to work.
|
// will continue to work.
|
||||||
func (storage *StorageBase) SetDefaultNestingLevels(readLevels []int, writeLevel int) {
|
func (storage *StorageBase) SetDefaultNestingLevels(readLevels []int, writeLevel int) {
|
||||||
storage.readLevels = readLevels
|
storage.readLevels = readLevels
|
||||||
@@ -626,7 +626,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
} else if matched[1] == "webdav" || matched[1] == "webdav-http" {
|
} else if matched[1] == "webdav" || matched[1] == "webdav-http" {
|
||||||
server := matched[3]
|
server := matched[3]
|
||||||
username := matched[2]
|
username := matched[2]
|
||||||
username = username[:len(username) - 1]
|
username = username[:len(username)-1]
|
||||||
storageDir := matched[5]
|
storageDir := matched[5]
|
||||||
port := 0
|
port := 0
|
||||||
useHTTP := matched[1] == "webdav-http"
|
useHTTP := matched[1] == "webdav-http"
|
||||||
|
|||||||
@@ -80,12 +80,12 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
|||||||
return storage, err
|
return storage, err
|
||||||
} else if testStorageName == "s3" {
|
} else if testStorageName == "s3" {
|
||||||
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false)
|
storage, err := CreateS3Storage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads, true, false)
|
||||||
return storage, err
|
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
} else if testStorageName == "wasabi" {
|
} else if testStorageName == "wasabi" {
|
||||||
storage, err := CreateWasabiStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
|
storage, err := CreateWasabiStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
|
||||||
return storage, err
|
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
|
return storage, err
|
||||||
} else if testStorageName == "s3c" {
|
} else if testStorageName == "s3c" {
|
||||||
storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
|
storage, err := CreateS3CStorage(config["region"], config["endpoint"], config["bucket"], config["directory"], config["access_key"], config["secret_key"], threads)
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
@@ -153,10 +153,7 @@ func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
|||||||
}
|
}
|
||||||
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
storage.SetDefaultNestingLevels([]int{2, 3}, 2)
|
||||||
return storage, err
|
return storage, err
|
||||||
} else {
|
|
||||||
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
|
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ type SwiftStorage struct {
|
|||||||
threads int
|
threads int
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateSwiftStorage creates an OpenStack Swift storage object. storageURL is in the form of
|
// CreateSwiftStorage creates an OpenStack Swift storage object. storageURL is in the form of
|
||||||
// `user@authURL/container/path?arg1=value1&arg2=value2``
|
// `user@authURL/container/path?arg1=value1&arg2=value2``
|
||||||
func CreateSwiftStorage(storageURL string, key string, threads int) (storage *SwiftStorage, err error) {
|
func CreateSwiftStorage(storageURL string, key string, threads int) (storage *SwiftStorage, err error) {
|
||||||
|
|
||||||
|
|||||||
@@ -7,10 +7,10 @@ package duplicacy
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"strings"
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
"path/filepath"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type symbolicLinkReparseBuffer struct {
|
type symbolicLinkReparseBuffer struct {
|
||||||
|
|||||||
@@ -100,7 +100,7 @@ func (storage *WasabiStorage) MoveFile(
|
|||||||
var from_path string
|
var from_path string
|
||||||
// The from path includes the bucket. Take care not to include an empty storageDir
|
// The from path includes the bucket. Take care not to include an empty storageDir
|
||||||
// string as Wasabi's backend will return 404 on URLs with double slashes.
|
// string as Wasabi's backend will return 404 on URLs with double slashes.
|
||||||
if (storage.storageDir == "") {
|
if storage.storageDir == "" {
|
||||||
from_path = fmt.Sprintf("/%s/%s", storage.bucket, from)
|
from_path = fmt.Sprintf("/%s/%s", storage.bucket, from)
|
||||||
} else {
|
} else {
|
||||||
from_path = fmt.Sprintf("/%s/%s/%s", storage.bucket, storage.storageDir, from)
|
from_path = fmt.Sprintf("/%s/%s/%s", storage.bucket, storage.storageDir, from)
|
||||||
|
|||||||
@@ -19,9 +19,9 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
//"net/http/httputil"
|
//"net/http/httputil"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type WebDAVStorage struct {
|
type WebDAVStorage struct {
|
||||||
@@ -42,10 +42,10 @@ type WebDAVStorage struct {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
errWebDAVAuthorizationFailure = errors.New("Authentication failed")
|
errWebDAVAuthorizationFailure = errors.New("Authentication failed")
|
||||||
errWebDAVMovedPermanently = errors.New("Moved permanently")
|
errWebDAVMovedPermanently = errors.New("Moved permanently")
|
||||||
errWebDAVNotExist = errors.New("Path does not exist")
|
errWebDAVNotExist = errors.New("Path does not exist")
|
||||||
errWebDAVMaximumBackoff = errors.New("Maximum backoff reached")
|
errWebDAVMaximumBackoff = errors.New("Maximum backoff reached")
|
||||||
errWebDAVMethodNotAllowed = errors.New("Method not allowed")
|
errWebDAVMethodNotAllowed = errors.New("Method not allowed")
|
||||||
)
|
)
|
||||||
|
|
||||||
func CreateWebDAVStorage(host string, port int, username string, password string, storageDir string, useHTTP bool, threads int) (storage *WebDAVStorage, err error) {
|
func CreateWebDAVStorage(host string, port int, username string, password string, storageDir string, useHTTP bool, threads int) (storage *WebDAVStorage, err error) {
|
||||||
@@ -68,7 +68,7 @@ func CreateWebDAVStorage(host string, port int, username string, password string
|
|||||||
|
|
||||||
// Make sure it doesn't follow redirect
|
// Make sure it doesn't follow redirect
|
||||||
storage.client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
storage.client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
||||||
return http.ErrUseLastResponse
|
return http.ErrUseLastResponse
|
||||||
}
|
}
|
||||||
|
|
||||||
exist, isDir, _, err := storage.GetFileInfo(0, storageDir)
|
exist, isDir, _, err := storage.GetFileInfo(0, storageDir)
|
||||||
@@ -325,11 +325,11 @@ func (storage *WebDAVStorage) GetFileInfo(threadIndex int, filePath string) (exi
|
|||||||
return false, false, 0, err
|
return false, false, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if m, exist := properties["/" + storage.storageDir + filePath]; !exist {
|
if m, exist := properties["/"+storage.storageDir+filePath]; !exist {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
} else if resourceType, exist := m["resourcetype"]; exist && strings.Contains(resourceType, "collection") {
|
} else if resourceType, exist := m["resourcetype"]; exist && strings.Contains(resourceType, "collection") {
|
||||||
return true, true, 0, nil
|
return true, true, 0, nil
|
||||||
} else if length, exist := m["getcontentlength"]; exist && length != ""{
|
} else if length, exist := m["getcontentlength"]; exist && length != "" {
|
||||||
value, _ := strconv.Atoi(length)
|
value, _ := strconv.Atoi(length)
|
||||||
return true, false, int64(value), nil
|
return true, false, int64(value), nil
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
Reference in New Issue
Block a user