mirror of
https://github.com/gilbertchen/duplicacy
synced 2025-12-11 05:43:23 +00:00
Run goimports on all source files
This commit is contained in:
@@ -5,22 +5,23 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"os/signal"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
|
||||||
"strconv"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"os/exec"
|
"strconv"
|
||||||
"os/signal"
|
"strings"
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"github.com/gilbertchen/cli"
|
"github.com/gilbertchen/cli"
|
||||||
|
|
||||||
"github.com/gilbertchen/duplicacy/src"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/gilbertchen/duplicacy/src"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -239,7 +240,6 @@ func configRepository(context *cli.Context, init bool) {
|
|||||||
preferencePath = path.Join(repository, duplicacy.DUPLICACY_DIRECTORY) // TOKEEP
|
preferencePath = path.Join(repository, duplicacy.DUPLICACY_DIRECTORY) // TOKEEP
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if stat, _ := os.Stat(path.Join(preferencePath, "preferences")); stat != nil {
|
if stat, _ := os.Stat(path.Join(preferencePath, "preferences")); stat != nil {
|
||||||
duplicacy.LOG_ERROR("REPOSITORY_INIT", "The repository %s has already been initialized", repository)
|
duplicacy.LOG_ERROR("REPOSITORY_INIT", "The repository %s has already been initialized", repository)
|
||||||
return
|
return
|
||||||
@@ -890,7 +890,6 @@ func diff(context *cli.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
password := ""
|
password := ""
|
||||||
if preference.Encrypted {
|
if preference.Encrypted {
|
||||||
password = duplicacy.GetPassword(*preference, "password", "Enter storage password:", false, false)
|
password = duplicacy.GetPassword(*preference, "password", "Enter storage password:", false, false)
|
||||||
@@ -1058,7 +1057,6 @@ func copySnapshots(context *cli.Context) {
|
|||||||
sourceManager.SetupSnapshotCache(source.Name)
|
sourceManager.SetupSnapshotCache(source.Name)
|
||||||
duplicacy.SavePassword(*source, "password", sourcePassword)
|
duplicacy.SavePassword(*source, "password", sourcePassword)
|
||||||
|
|
||||||
|
|
||||||
_, destination := getRepositoryPreference(context, context.String("to"))
|
_, destination := getRepositoryPreference(context, context.String("to"))
|
||||||
|
|
||||||
if destination.Name == source.Name {
|
if destination.Name == source.Name {
|
||||||
@@ -1072,7 +1070,6 @@ func copySnapshots(context *cli.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
duplicacy.LOG_INFO("STORAGE_SET", "Destination storage set to %s", destination.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "Destination storage set to %s", destination.StorageURL)
|
||||||
destinationStorage := duplicacy.CreateStorage(*destination, false, threads)
|
destinationStorage := duplicacy.CreateStorage(*destination, false, threads)
|
||||||
if destinationStorage == nil {
|
if destinationStorage == nil {
|
||||||
@@ -1521,7 +1518,6 @@ func main() {
|
|||||||
Action: pruneSnapshots,
|
Action: pruneSnapshots,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
{
|
{
|
||||||
Name: "password",
|
Name: "password",
|
||||||
Flags: []cli.Flag{
|
Flags: []cli.Flag{
|
||||||
@@ -1683,7 +1679,6 @@ func main() {
|
|||||||
ArgsUsage: "<storage url>",
|
ArgsUsage: "<storage url>",
|
||||||
Action: infoStorage,
|
Action: infoStorage,
|
||||||
},
|
},
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
app.Flags = []cli.Flag{
|
app.Flags = []cli.Flag{
|
||||||
|
|||||||
@@ -5,16 +5,16 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"sync"
|
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"io/ioutil"
|
||||||
"mime/multipart"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
@@ -29,6 +29,7 @@ func (err ACDError) Error() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var ACDRefreshTokenURL = "https://duplicacy.com/acd_refresh"
|
var ACDRefreshTokenURL = "https://duplicacy.com/acd_refresh"
|
||||||
|
|
||||||
type ACDClient struct {
|
type ACDClient struct {
|
||||||
HTTPClient *http.Client
|
HTTPClient *http.Client
|
||||||
|
|
||||||
@@ -42,7 +43,6 @@ type ACDClient struct {
|
|||||||
TestMode bool
|
TestMode bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func NewACDClient(tokenFile string) (*ACDClient, error) {
|
func NewACDClient(tokenFile string) (*ACDClient, error) {
|
||||||
|
|
||||||
description, err := ioutil.ReadFile(tokenFile)
|
description, err := ioutil.ReadFile(tokenFile)
|
||||||
|
|||||||
@@ -5,11 +5,11 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
|||||||
@@ -88,8 +88,7 @@ func (storage *ACDStorage) deletePathID(path string) {
|
|||||||
storage.idCacheLock.Unlock()
|
storage.idCacheLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (storage *ACDStorage) convertFilePath(filePath string) string {
|
||||||
func (storage *ACDStorage) convertFilePath(filePath string) (string) {
|
|
||||||
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
||||||
return "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
return "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
||||||
}
|
}
|
||||||
@@ -183,7 +182,6 @@ func (storage *ACDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
name := entry.Name
|
name := entry.Name
|
||||||
if parent == "fossils" {
|
if parent == "fossils" {
|
||||||
@@ -389,16 +387,16 @@ func (storage *ACDStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *ACDStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *ACDStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *ACDStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *ACDStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *ACDStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *ACDStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *ACDStorage) IsFastListing() (bool) { return true }
|
func (storage *ACDStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *ACDStorage) EnableTestMode() {}
|
func (storage *ACDStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -189,16 +189,16 @@ func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, conten
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *AzureStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *AzureStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *AzureStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *AzureStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *AzureStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *AzureStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *AzureStorage) IsFastListing() (bool) { return true }
|
func (storage *AzureStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *AzureStorage) EnableTestMode() {}
|
func (storage *AzureStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -5,19 +5,19 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"strconv"
|
"crypto/sha1"
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"io/ioutil"
|
||||||
"strings"
|
|
||||||
"crypto/sha1"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type B2Error struct {
|
type B2Error struct {
|
||||||
@@ -51,7 +51,6 @@ type B2Client struct {
|
|||||||
UploadToken string
|
UploadToken string
|
||||||
|
|
||||||
TestMode bool
|
TestMode bool
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewB2Client(accountID string, applicationKey string) *B2Client {
|
func NewB2Client(accountID string, applicationKey string) *B2Client {
|
||||||
@@ -168,8 +167,7 @@ func (client *B2Client) call(url string, input interface{}) (io.ReadCloser, int6
|
|||||||
|
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
|
|
||||||
e := &B2Error {
|
e := &B2Error{}
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
|
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
@@ -299,8 +297,7 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := B2ListFileNamesOutput {
|
output := B2ListFileNamesOutput{}
|
||||||
}
|
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -405,7 +402,7 @@ type B2GetUploadArgumentOutput struct {
|
|||||||
AuthorizationToken string
|
AuthorizationToken string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) getUploadURL() (error) {
|
func (client *B2Client) getUploadURL() error {
|
||||||
input := make(map[string]string)
|
input := make(map[string]string)
|
||||||
input["bucketId"] = client.BucketID
|
input["bucketId"] = client.BucketID
|
||||||
|
|
||||||
@@ -431,7 +428,6 @@ func (client *B2Client) getUploadURL() (error) {
|
|||||||
|
|
||||||
func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit int) (err error) {
|
func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit int) (err error) {
|
||||||
|
|
||||||
|
|
||||||
hasher := sha1.New()
|
hasher := sha1.New()
|
||||||
hasher.Write(content)
|
hasher.Write(content)
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
@@ -517,4 +513,3 @@ func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit in
|
|||||||
|
|
||||||
return fmt.Errorf("Maximum backoff reached")
|
return fmt.Errorf("Maximum backoff reached")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,15 +5,15 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
)
|
)
|
||||||
|
|
||||||
func createB2ClientForTest(t *testing.T) (*B2Client, string) {
|
func createB2ClientForTest(t *testing.T) (*B2Client, string) {
|
||||||
|
|||||||
@@ -236,16 +236,16 @@ func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content [
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *B2Storage) IsCacheNeeded() (bool) { return true }
|
func (storage *B2Storage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *B2Storage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *B2Storage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *B2Storage) IsStrongConsistent() (bool) { return true }
|
func (storage *B2Storage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *B2Storage) IsFastListing() (bool) { return true }
|
func (storage *B2Storage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *B2Storage) EnableTestMode() {
|
func (storage *B2Storage) EnableTestMode() {
|
||||||
|
|||||||
@@ -5,21 +5,21 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"os"
|
"encoding/hex"
|
||||||
"io"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"time"
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"strings"
|
"time"
|
||||||
"strconv"
|
|
||||||
"runtime"
|
|
||||||
"encoding/hex"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// BackupManager performs the two major operations, backup and restore, and passes other operations, mostly related to
|
// BackupManager performs the two major operations, backup and restore, and passes other operations, mostly related to
|
||||||
@@ -35,12 +35,10 @@ type BackupManager struct {
|
|||||||
config *Config // contains a number of options
|
config *Config // contains a number of options
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func (manager *BackupManager) SetDryRun(dryRun bool) {
|
func (manager *BackupManager) SetDryRun(dryRun bool) {
|
||||||
manager.config.dryRun = dryRun
|
manager.config.dryRun = dryRun
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
|
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
|
||||||
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
|
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
|
||||||
// master key which can be nil if encryption is not enabled.
|
// master key which can be nil if encryption is not enabled.
|
||||||
@@ -230,7 +228,6 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if incompleteSnapshot != nil {
|
if incompleteSnapshot != nil {
|
||||||
|
|
||||||
// This is the last chunk from the incomplete snapshot that can be found in the cache
|
// This is the last chunk from the incomplete snapshot that can be found in the cache
|
||||||
@@ -398,7 +395,6 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
LOG_INFO("SNAPSHOT_FAIL", "Will abort the backup on chunk %d", chunkToFail)
|
LOG_INFO("SNAPSHOT_FAIL", "Will abort the backup on chunk %d", chunkToFail)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
chunkMaker := CreateChunkMaker(manager.config, false)
|
chunkMaker := CreateChunkMaker(manager.config, false)
|
||||||
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, threads, nil)
|
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, threads, nil)
|
||||||
|
|
||||||
@@ -924,7 +920,6 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
file.RestoreMetadata(fullPath, nil)
|
file.RestoreMetadata(fullPath, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if deleteMode && len(patterns) == 0 {
|
if deleteMode && len(patterns) == 0 {
|
||||||
// Reverse the order to make sure directories are empty before being deleted
|
// Reverse the order to make sure directories are empty before being deleted
|
||||||
for i := range extraFiles {
|
for i := range extraFiles {
|
||||||
@@ -1019,7 +1014,7 @@ func (encoder *fileEncoder) NextFile() (io.Reader, bool) {
|
|||||||
// UploadSnapshot uploads the specified snapshot to the storage. It turns Files, ChunkHashes, and ChunkLengths into
|
// UploadSnapshot uploads the specified snapshot to the storage. It turns Files, ChunkHashes, and ChunkLengths into
|
||||||
// sequences of chunks, and uploads these chunks, and finally the snapshot file.
|
// sequences of chunks, and uploads these chunks, and finally the snapshot file.
|
||||||
func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *ChunkUploader, top string, snapshot *Snapshot,
|
func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *ChunkUploader, top string, snapshot *Snapshot,
|
||||||
chunkCache map[string]bool, ) ( totalSnapshotChunkSize int64,
|
chunkCache map[string]bool) (totalSnapshotChunkSize int64,
|
||||||
numberOfNewSnapshotChunks int, totalUploadedSnapshotChunkSize int64,
|
numberOfNewSnapshotChunks int, totalUploadedSnapshotChunkSize int64,
|
||||||
totalUploadedSnapshotChunkBytes int64) {
|
totalUploadedSnapshotChunkBytes int64) {
|
||||||
|
|
||||||
@@ -1455,7 +1450,6 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if existingFile != nil {
|
if existingFile != nil {
|
||||||
existingFile.Close()
|
existingFile.Close()
|
||||||
existingFile = nil
|
existingFile = nil
|
||||||
|
|||||||
@@ -5,15 +5,15 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
crypto_rand "crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
"io"
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"testing"
|
"testing"
|
||||||
"math/rand"
|
|
||||||
"encoding/hex"
|
|
||||||
"time"
|
"time"
|
||||||
"crypto/sha256"
|
|
||||||
crypto_rand "crypto/rand"
|
|
||||||
|
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
)
|
)
|
||||||
@@ -236,7 +236,6 @@ func TestBackupManager(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
@@ -244,11 +243,11 @@ func TestBackupManager(t *testing.T) {
|
|||||||
backupManager.SetupSnapshotCache("default")
|
backupManager.SetupSnapshotCache("default")
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "first", false, false)
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false)
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir + "/repository2", threads, /*inPlace=*/false, /*quickMode=*/false, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
/*deleteMode=*/ false /*showStatistics=*/, false /*patterns=*/, nil)
|
||||||
|
|
||||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
|
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
|
||||||
@@ -268,11 +267,11 @@ func TestBackupManager(t *testing.T) {
|
|||||||
modifyFile(testDir+"/repository1/dir1/file3", 0.3)
|
modifyFile(testDir+"/repository1/dir1/file3", 0.3)
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "second", false, false)
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false)
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir + "/repository2", 2, /*inPlace=*/true, /*quickMode=*/true, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
/*deleteMode=*/ false /*showStatistics=*/, false /*patterns=*/, nil)
|
||||||
|
|
||||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
@@ -288,7 +287,7 @@ func TestBackupManager(t *testing.T) {
|
|||||||
os.Mkdir(testDir+"/repository1/dir2/dir3", 0700)
|
os.Mkdir(testDir+"/repository1/dir2/dir3", 0700)
|
||||||
os.Mkdir(testDir+"/repository1/dir4", 0700)
|
os.Mkdir(testDir+"/repository1/dir4", 0700)
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/false, threads, "third", false, false)
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "third", false, false)
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
// Create some directories and files under repository2 that will be deleted during restore
|
// Create some directories and files under repository2 that will be deleted during restore
|
||||||
@@ -299,8 +298,8 @@ func TestBackupManager(t *testing.T) {
|
|||||||
createRandomFile(testDir+"/repository2/dir5/file5", 100)
|
createRandomFile(testDir+"/repository2/dir5/file5", 100)
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir + "/repository2", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/true, /*showStatistics=*/false, /*patterns=*/nil)
|
/*deleteMode=*/ true /*showStatistics=*/, false /*patterns=*/, nil)
|
||||||
|
|
||||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
@@ -311,23 +310,23 @@ func TestBackupManager(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// These files/dirs should not exist because deleteMode == true
|
// These files/dirs should not exist because deleteMode == true
|
||||||
checkExistence(t, testDir + "/repository2/dir5", false, false);
|
checkExistence(t, testDir+"/repository2/dir5", false, false)
|
||||||
checkExistence(t, testDir + "/repository2/dir5/dir6", false, false);
|
checkExistence(t, testDir+"/repository2/dir5/dir6", false, false)
|
||||||
checkExistence(t, testDir + "/repository2/dir7", false, false);
|
checkExistence(t, testDir+"/repository2/dir7", false, false)
|
||||||
checkExistence(t, testDir + "/repository2/file4", false, false);
|
checkExistence(t, testDir+"/repository2/file4", false, false)
|
||||||
checkExistence(t, testDir + "/repository2/dir5/file5", false, false);
|
checkExistence(t, testDir+"/repository2/dir5/file5", false, false)
|
||||||
|
|
||||||
// These empty dirs should exist
|
// These empty dirs should exist
|
||||||
checkExistence(t, testDir + "/repository2/dir2", true, true);
|
checkExistence(t, testDir+"/repository2/dir2", true, true)
|
||||||
checkExistence(t, testDir + "/repository2/dir2/dir3", true, true);
|
checkExistence(t, testDir+"/repository2/dir2/dir3", true, true)
|
||||||
checkExistence(t, testDir + "/repository2/dir4", true, true);
|
checkExistence(t, testDir+"/repository2/dir4", true, true)
|
||||||
|
|
||||||
// Remove file2 and dir1/file3 and restore them from revision 3
|
// Remove file2 and dir1/file3 and restore them from revision 3
|
||||||
os.Remove(testDir + "/repository1/file2")
|
os.Remove(testDir + "/repository1/file2")
|
||||||
os.Remove(testDir + "/repository1/dir1/file3")
|
os.Remove(testDir + "/repository1/dir1/file3")
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Restore(testDir + "/repository1", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/[]string{"+file2", "+dir1/file3", "-*"})
|
/*deleteMode=*/ false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"})
|
||||||
|
|
||||||
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
|
|||||||
@@ -5,19 +5,18 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"runtime"
|
"compress/zlib"
|
||||||
"crypto/cipher"
|
|
||||||
"crypto/aes"
|
"crypto/aes"
|
||||||
|
"crypto/cipher"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"compress/zlib"
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
"github.com/bkaradzic/go-lz4"
|
"github.com/bkaradzic/go-lz4"
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
|
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
|
||||||
@@ -132,7 +131,7 @@ func (chunk *Chunk) Write(p []byte) (int, error){
|
|||||||
|
|
||||||
// GetHash returns the chunk hash.
|
// GetHash returns the chunk hash.
|
||||||
func (chunk *Chunk) GetHash() string {
|
func (chunk *Chunk) GetHash() string {
|
||||||
if (len(chunk.hash) == 0) {
|
if len(chunk.hash) == 0 {
|
||||||
chunk.hash = chunk.hasher.Sum(nil)
|
chunk.hash = chunk.hasher.Sum(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -379,4 +378,3 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,10 +5,10 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestChunk(t *testing.T) {
|
func TestChunk(t *testing.T) {
|
||||||
@@ -67,7 +67,6 @@ func TestChunk(t *testing.T) {
|
|||||||
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
|
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -330,7 +330,6 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash)
|
err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("UPLOAD_CHUNK", "Failed to decrypt the chunk %s: %v", chunkID, err)
|
LOG_ERROR("UPLOAD_CHUNK", "Failed to decrypt the chunk %s: %v", chunkID, err)
|
||||||
|
|||||||
@@ -5,10 +5,10 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ChunkMaker breaks data into chunks using buzhash. To save memory, the chunk maker only use a circular buffer
|
// ChunkMaker breaks data into chunks using buzhash. To save memory, the chunk maker only use a circular buffer
|
||||||
@@ -229,9 +229,8 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Minimum chunk size has been reached. Calculate the buzhash for the minimum size chunk.
|
// Minimum chunk size has been reached. Calculate the buzhash for the minimum size chunk.
|
||||||
if (!minimumReached) {
|
if !minimumReached {
|
||||||
|
|
||||||
bytes := maker.minimumChunkSize
|
bytes := maker.minimumChunkSize
|
||||||
|
|
||||||
|
|||||||
@@ -5,12 +5,12 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
|
||||||
"io"
|
"io"
|
||||||
|
"math/rand"
|
||||||
"sort"
|
"sort"
|
||||||
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunkSize,
|
func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunkSize,
|
||||||
@@ -76,7 +76,7 @@ func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunk
|
|||||||
return buffers[i], true
|
return buffers[i], true
|
||||||
})
|
})
|
||||||
|
|
||||||
if (totalFileSize != int64(totalChunkSize)) {
|
if totalFileSize != int64(totalChunkSize) {
|
||||||
LOG_ERROR("CHUNK_SPLIT", "total chunk size: %d, total file size: %d", totalChunkSize, totalFileSize)
|
LOG_ERROR("CHUNK_SPLIT", "total chunk size: %d, total file size: %d", totalChunkSize, totalFileSize)
|
||||||
}
|
}
|
||||||
return chunks, totalChunkSize
|
return chunks, totalChunkSize
|
||||||
@@ -84,7 +84,6 @@ func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunk
|
|||||||
|
|
||||||
func TestChunkMaker(t *testing.T) {
|
func TestChunkMaker(t *testing.T) {
|
||||||
|
|
||||||
|
|
||||||
//sizes := [...] int { 64 }
|
//sizes := [...] int { 64 }
|
||||||
sizes := [...]int{64, 256, 1024, 1024 * 10}
|
sizes := [...]int{64, 256, 1024, 1024 * 10}
|
||||||
|
|
||||||
@@ -101,7 +100,7 @@ func TestChunkMaker(t *testing.T) {
|
|||||||
|
|
||||||
capacities := [...]int{32, 33, 34, 61, 62, 63, 64, 65, 66, 126, 127, 128, 129, 130,
|
capacities := [...]int{32, 33, 34, 61, 62, 63, 64, 65, 66, 126, 127, 128, 129, 130,
|
||||||
255, 256, 257, 511, 512, 513, 1023, 1024, 1025,
|
255, 256, 257, 511, 512, 513, 1023, 1024, 1025,
|
||||||
32, 48, 64, 128, 256, 512, 1024, 2048, }
|
32, 48, 64, 128, 256, 512, 1024, 2048}
|
||||||
|
|
||||||
//capacities := [...]int { 32 }
|
//capacities := [...]int { 32 }
|
||||||
|
|
||||||
|
|||||||
@@ -6,10 +6,10 @@ package duplicacy
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"time"
|
|
||||||
"path"
|
"path"
|
||||||
"testing"
|
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@@ -56,7 +56,6 @@ func TestUploaderAndDownloader(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
numberOfChunks := 100
|
numberOfChunks := 100
|
||||||
maxChunkSize := 64 * 1024
|
maxChunkSize := 64 * 1024
|
||||||
|
|
||||||
@@ -102,7 +101,6 @@ func TestUploaderAndDownloader(t *testing.T) {
|
|||||||
|
|
||||||
chunkUploader.Stop()
|
chunkUploader.Stop()
|
||||||
|
|
||||||
|
|
||||||
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
|
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
|
||||||
chunkDownloader.totalChunkSize = int64(totalFileSize)
|
chunkDownloader.totalChunkSize = int64(totalFileSize)
|
||||||
|
|
||||||
|
|||||||
@@ -5,18 +5,18 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"os"
|
"crypto/hmac"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"crypto/rand"
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
|
|
||||||
blake2 "github.com/minio/blake2b-simd"
|
blake2 "github.com/minio/blake2b-simd"
|
||||||
)
|
)
|
||||||
@@ -344,7 +344,7 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func UploadConfig(storage Storage, config *Config, password string) (bool) {
|
func UploadConfig(storage Storage, config *Config, password string) bool {
|
||||||
|
|
||||||
// This is the key to encrypt the config file.
|
// This is the key to encrypt the config file.
|
||||||
var masterKey []byte
|
var masterKey []byte
|
||||||
@@ -417,7 +417,6 @@ func ConfigStorage(storage Storage, compressionLevel int, averageChunkSize int,
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
config := CreateConfigFromParameters(compressionLevel, averageChunkSize, maximumChunkSize, minimumChunkSize, len(password) > 0,
|
config := CreateConfigFromParameters(compressionLevel, averageChunkSize, maximumChunkSize, minimumChunkSize, len(password) > 0,
|
||||||
copyFrom)
|
copyFrom)
|
||||||
if config == nil {
|
if config == nil {
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/gilbertchen/go-dropbox"
|
"github.com/gilbertchen/go-dropbox"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -85,7 +86,7 @@ func (storage *DropboxStorage) ListFiles(threadIndex int, dir string) (files []s
|
|||||||
|
|
||||||
if output.HasMore {
|
if output.HasMore {
|
||||||
output, err = storage.clients[threadIndex].ListFolderContinue(
|
output, err = storage.clients[threadIndex].ListFolderContinue(
|
||||||
&dropbox.ListFolderContinueInput { Cursor: output.Cursor, })
|
&dropbox.ListFolderContinueInput{Cursor: output.Cursor})
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
break
|
break
|
||||||
@@ -281,16 +282,16 @@ func (storage *DropboxStorage) UploadFile(threadIndex int, filePath string, cont
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *DropboxStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *DropboxStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *DropboxStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *DropboxStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *DropboxStorage) IsStrongConsistent() (bool) { return false }
|
func (storage *DropboxStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *DropboxStorage) IsFastListing() (bool) { return false }
|
func (storage *DropboxStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *DropboxStorage) EnableTestMode() {}
|
func (storage *DropboxStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -4,22 +4,20 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"fmt"
|
|
||||||
"path/filepath"
|
|
||||||
"io/ioutil"
|
|
||||||
"sort"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"strings"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
// This is the hidden directory in the repository for storing various files.
|
// This is the hidden directory in the repository for storing various files.
|
||||||
var DUPLICACY_DIRECTORY = ".duplicacy"
|
var DUPLICACY_DIRECTORY = ".duplicacy"
|
||||||
var DUPLICACY_FILE = ".duplicacy"
|
var DUPLICACY_FILE = ".duplicacy"
|
||||||
@@ -323,7 +321,6 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo) bool
|
|||||||
return SetOwner(fullPath, entry, fileInfo)
|
return SetOwner(fullPath, entry, fileInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Return -1 if 'left' should appear before 'right', 1 if opposite, and 0 if they are the same.
|
// Return -1 if 'left' should appear before 'right', 1 if opposite, and 0 if they are the same.
|
||||||
// Files are always arranged before subdirectories under the same parent directory.
|
// Files are always arranged before subdirectories under the same parent directory.
|
||||||
func (left *Entry) Compare(right *Entry) int {
|
func (left *Entry) Compare(right *Entry) int {
|
||||||
|
|||||||
@@ -5,12 +5,12 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"math/rand"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestEntrySort(t *testing.T) {
|
func TestEntrySort(t *testing.T) {
|
||||||
@@ -107,7 +107,6 @@ func TestEntryList(t *testing.T) {
|
|||||||
"ab3/c",
|
"ab3/c",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
var entry1, entry2 *Entry
|
var entry1, entry2 *Entry
|
||||||
|
|
||||||
for i, p1 := range DATA {
|
for i, p1 := range DATA {
|
||||||
@@ -217,4 +216,3 @@ func TestEntryList(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ type FileReader struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateFileReader creates a file reader.
|
// CreateFileReader creates a file reader.
|
||||||
func CreateFileReader(top string, files[] *Entry) (*FileReader) {
|
func CreateFileReader(top string, files []*Entry) *FileReader {
|
||||||
|
|
||||||
reader := &FileReader{
|
reader := &FileReader{
|
||||||
top: top,
|
top: top,
|
||||||
@@ -68,7 +68,3 @@ func (reader *FileReader) NextFile() bool{
|
|||||||
reader.CurrentFile = nil
|
reader.CurrentFile = nil
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,13 +5,13 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"time"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FileStorage is a local on-disk file storage implementing the Storage interface.
|
// FileStorage is a local on-disk file storage implementing the Storage interface.
|
||||||
@@ -248,16 +248,16 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *FileStorage) IsCacheNeeded () (bool) { return storage.isCacheNeeded }
|
func (storage *FileStorage) IsCacheNeeded() bool { return storage.isCacheNeeded }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *FileStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *FileStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *FileStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *FileStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *FileStorage) IsFastListing() (bool) { return false }
|
func (storage *FileStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *FileStorage) EnableTestMode() {}
|
func (storage *FileStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -5,18 +5,18 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"io"
|
||||||
"path"
|
|
||||||
"time"
|
|
||||||
"sync"
|
|
||||||
"strings"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"encoding/json"
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
@@ -35,7 +35,6 @@ type GCDStorage struct {
|
|||||||
isConnected bool
|
isConnected bool
|
||||||
numberOfThreads int
|
numberOfThreads int
|
||||||
TestMode bool
|
TestMode bool
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type GCDConfig struct {
|
type GCDConfig struct {
|
||||||
@@ -96,7 +95,7 @@ func (storage *GCDStorage) shouldRetry(threadIndex int, err error) (bool, error)
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *GCDStorage) convertFilePath(filePath string) (string) {
|
func (storage *GCDStorage) convertFilePath(filePath string) string {
|
||||||
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
||||||
return "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
return "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
||||||
}
|
}
|
||||||
@@ -174,7 +173,6 @@ func (storage *GCDStorage) listFiles(threadIndex int, parentID string, listFiles
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
return files, nil
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -614,16 +612,16 @@ func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *GCDStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *GCDStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *GCDStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *GCDStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *GCDStorage) IsStrongConsistent() (bool) { return false }
|
func (storage *GCDStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *GCDStorage) IsFastListing() (bool) { return true }
|
func (storage *GCDStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *GCDStorage) EnableTestMode() { storage.TestMode = true }
|
func (storage *GCDStorage) EnableTestMode() { storage.TestMode = true }
|
||||||
|
|||||||
@@ -5,22 +5,22 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
"net/url"
|
|
||||||
"math/rand"
|
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
gcs "cloud.google.com/go/storage"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
gcs "cloud.google.com/go/storage"
|
"google.golang.org/api/googleapi"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type GCSStorage struct {
|
type GCSStorage struct {
|
||||||
@@ -31,7 +31,6 @@ type GCSStorage struct {
|
|||||||
|
|
||||||
numberOfThreads int
|
numberOfThreads int
|
||||||
TestMode bool
|
TestMode bool
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type GCSConfig struct {
|
type GCSConfig struct {
|
||||||
@@ -149,7 +148,6 @@ func (storage *GCSStorage) shouldRetry(backoff *int, err error) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *GCSStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
func (storage *GCSStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
@@ -288,16 +286,16 @@ func (storage *GCSStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *GCSStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *GCSStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *GCSStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *GCSStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *GCSStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *GCSStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *GCSStorage) IsFastListing() (bool) { return true }
|
func (storage *GCSStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *GCSStorage) EnableTestMode() { storage.TestMode = true }
|
func (storage *GCSStorage) EnableTestMode() { storage.TestMode = true }
|
||||||
|
|||||||
@@ -5,18 +5,18 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
"sync"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"strings"
|
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
net_url "net/url"
|
net_url "net/url"
|
||||||
"math/rand"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
@@ -436,7 +436,7 @@ func (client *HubicClient) MoveFile(from string, to string) error {
|
|||||||
return client.DeleteFile(from)
|
return client.DeleteFile(from)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *HubicClient) CreateDirectory(path string) (error) {
|
func (client *HubicClient) CreateDirectory(path string) error {
|
||||||
|
|
||||||
for len(path) > 0 && path[len(path)-1] == '/' {
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
path = path[:len(path)-1]
|
path = path[:len(path)-1]
|
||||||
|
|||||||
@@ -5,11 +5,11 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
|||||||
@@ -190,16 +190,16 @@ func (storage *HubicStorage) UploadFile(threadIndex int, filePath string, conten
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *HubicStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *HubicStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *HubicStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *HubicStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *HubicStorage) IsStrongConsistent() (bool) { return false }
|
func (storage *HubicStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *HubicStorage) IsFastListing() (bool) { return true }
|
func (storage *HubicStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *HubicStorage) EnableTestMode() {
|
func (storage *HubicStorage) EnableTestMode() {
|
||||||
|
|||||||
@@ -5,10 +5,10 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var keyringFile string
|
var keyringFile string
|
||||||
|
|||||||
@@ -5,12 +5,12 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"os"
|
||||||
|
"runtime/debug"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"runtime/debug"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -5,16 +5,16 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
"sync"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"strings"
|
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
@@ -167,7 +167,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
|
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
|
||||||
return nil, 0, OneDriveError { Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response"), }
|
return nil, 0, OneDriveError{Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response")}
|
||||||
}
|
}
|
||||||
|
|
||||||
errorResponse.Error.Status = response.StatusCode
|
errorResponse.Error.Status = response.StatusCode
|
||||||
@@ -340,7 +340,7 @@ func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *OneDriveClient) CreateDirectory(path string, name string) (error) {
|
func (client *OneDriveClient) CreateDirectory(path string, name string) error {
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/root/children"
|
url := OneDriveAPIURL + "/root/children"
|
||||||
|
|
||||||
|
|||||||
@@ -5,11 +5,11 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@@ -30,7 +30,6 @@ func TestOneDriveClient(t *testing.T) {
|
|||||||
fmt.Printf("name: %s, isDir: %t\n", file.Name, len(file.Folder) != 0)
|
fmt.Printf("name: %s, isDir: %t\n", file.Name, len(file.Folder) != 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
testID, _, _, err := oneDriveClient.GetFileInfo("test")
|
testID, _, _, err := oneDriveClient.GetFileInfo("test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list the test directory: %v", err)
|
t.Errorf("Failed to list the test directory: %v", err)
|
||||||
|
|||||||
@@ -225,16 +225,16 @@ func (storage *OneDriveStorage) UploadFile(threadIndex int, filePath string, con
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *OneDriveStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *OneDriveStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *OneDriveStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *OneDriveStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *OneDriveStorage) IsStrongConsistent() (bool) { return false }
|
func (storage *OneDriveStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *OneDriveStorage) IsFastListing() (bool) { return true }
|
func (storage *OneDriveStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *OneDriveStorage) EnableTestMode() {
|
func (storage *OneDriveStorage) EnableTestMode() {
|
||||||
|
|||||||
@@ -5,12 +5,12 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"path"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"reflect"
|
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Preference stores options for each storage.
|
// Preference stores options for each storage.
|
||||||
@@ -91,7 +91,7 @@ func SetDuplicacyPreferencePath(p string) {
|
|||||||
preferencePath = p
|
preferencePath = p
|
||||||
}
|
}
|
||||||
|
|
||||||
func SavePreferences() (bool) {
|
func SavePreferences() bool {
|
||||||
description, err := json.MarshalIndent(Preferences, "", " ")
|
description, err := json.MarshalIndent(Preferences, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
|
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
|
||||||
@@ -108,7 +108,7 @@ func SavePreferences() (bool) {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func FindPreference(name string) (*Preference) {
|
func FindPreference(name string) *Preference {
|
||||||
for i, preference := range Preferences {
|
for i, preference := range Preferences {
|
||||||
if preference.Name == name || preference.StorageURL == name {
|
if preference.Name == name || preference.StorageURL == name {
|
||||||
return &Preferences[i]
|
return &Preferences[i]
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ package duplicacy
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gilbertchen/goamz/aws"
|
"github.com/gilbertchen/goamz/aws"
|
||||||
"github.com/gilbertchen/goamz/s3"
|
"github.com/gilbertchen/goamz/s3"
|
||||||
)
|
)
|
||||||
@@ -197,16 +198,16 @@ func (storage *S3CStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *S3CStorage) IsCacheNeeded () (bool) { return true }
|
func (storage *S3CStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *S3CStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *S3CStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *S3CStorage) IsStrongConsistent() (bool) { return false }
|
func (storage *S3CStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *S3CStorage) IsFastListing() (bool) { return true }
|
func (storage *S3CStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *S3CStorage) EnableTestMode() {}
|
func (storage *S3CStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -5,8 +5,8 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
@@ -255,16 +255,16 @@ func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content [
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *S3Storage) IsCacheNeeded () (bool) { return true }
|
func (storage *S3Storage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *S3Storage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *S3Storage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *S3Storage) IsStrongConsistent() (bool) { return false }
|
func (storage *S3Storage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *S3Storage) IsFastListing() (bool) { return true }
|
func (storage *S3Storage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *S3Storage) EnableTestMode() {}
|
func (storage *S3Storage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -7,15 +7,15 @@ package duplicacy
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"net"
|
|
||||||
"path"
|
|
||||||
"time"
|
|
||||||
"runtime"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/crypto/ssh"
|
|
||||||
"github.com/pkg/sftp"
|
"github.com/pkg/sftp"
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SFTPStorage struct {
|
type SFTPStorage struct {
|
||||||
@@ -31,7 +31,6 @@ func CreateSFTPStorageWithPassword(server string, port int, username string, sto
|
|||||||
|
|
||||||
authMethods := []ssh.AuthMethod{ssh.Password(password)}
|
authMethods := []ssh.AuthMethod{ssh.Password(password)}
|
||||||
|
|
||||||
|
|
||||||
hostKeyCallback := func(hostname string, remote net.Addr,
|
hostKeyCallback := func(hostname string, remote net.Addr,
|
||||||
key ssh.PublicKey) error {
|
key ssh.PublicKey) error {
|
||||||
return nil
|
return nil
|
||||||
@@ -294,16 +293,16 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *SFTPStorage) IsCacheNeeded () (bool) { return true }
|
func (storage *SFTPStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *SFTPStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *SFTPStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *SFTPStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *SFTPStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *SFTPStorage) IsFastListing() (bool) { return false }
|
func (storage *SFTPStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *SFTPStorage) EnableTestMode() {}
|
func (storage *SFTPStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -5,11 +5,11 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
"time"
|
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
ole "github.com/gilbertchen/go-ole"
|
ole "github.com/gilbertchen/go-ole"
|
||||||
)
|
)
|
||||||
@@ -77,7 +77,6 @@ func getIVSSAsync(unknown *ole.IUnknown, iid *ole.GUID) (async *IVSSAsync) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//665c1d5f-c218-414d-a05d-7fef5f9d5c86
|
//665c1d5f-c218-414d-a05d-7fef5f9d5c86
|
||||||
var IID_IVSS = &ole.GUID{0x665c1d5f, 0xc218, 0x414d, [8]byte{0xa0, 0x5d, 0x7f, 0xef, 0x5f, 0x9d, 0x5c, 0x86}}
|
var IID_IVSS = &ole.GUID{0x665c1d5f, 0xc218, 0x414d, [8]byte{0xa0, 0x5d, 0x7f, 0xef, 0x5f, 0x9d, 0x5c, 0x86}}
|
||||||
|
|
||||||
@@ -238,7 +237,7 @@ type SnapshotProperties struct {
|
|||||||
Status int
|
Status int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vss *IVSS) GetSnapshotProperties(snapshotSetID ole.GUID, properties *SnapshotProperties) (int) {
|
func (vss *IVSS) GetSnapshotProperties(snapshotSetID ole.GUID, properties *SnapshotProperties) int {
|
||||||
var ret uintptr
|
var ret uintptr
|
||||||
if runtime.GOARCH == "386" {
|
if runtime.GOARCH == "386" {
|
||||||
address := uint(uintptr(unsafe.Pointer(&snapshotSetID)))
|
address := uint(uintptr(unsafe.Pointer(&snapshotSetID)))
|
||||||
@@ -292,8 +291,7 @@ func (vss *IVSS) DeleteSnapshots(snapshotID ole.GUID) (int, int, ole.GUID) {
|
|||||||
return int(ret), int(deleted), deletedGUID
|
return int(ret), int(deleted), deletedGUID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func uint16ArrayToString(p *uint16) string {
|
||||||
func uint16ArrayToString(p *uint16) (string) {
|
|
||||||
if p == nil {
|
if p == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
@@ -481,9 +479,7 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
|
|||||||
}
|
}
|
||||||
async.Release()
|
async.Release()
|
||||||
|
|
||||||
|
properties := SnapshotProperties{}
|
||||||
properties := SnapshotProperties {
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = vssBackupComponent.GetSnapshotProperties(snapshotID, &properties)
|
ret = vssBackupComponent.GetSnapshotProperties(snapshotID, &properties)
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
@@ -521,5 +517,3 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
|
|||||||
return shadowLink + "\\" + top[2:]
|
return shadowLink + "\\" + top[2:]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,15 +5,15 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"strconv"
|
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Snapshot represents a backup of the repository.
|
// Snapshot represents a backup of the repository.
|
||||||
@@ -409,7 +409,7 @@ func (snapshot *Snapshot) SetSequence(sequenceType string, sequence [] string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// encodeSequence turns a sequence of binary hashes into a sequence of hex hashes.
|
// encodeSequence turns a sequence of binary hashes into a sequence of hex hashes.
|
||||||
func encodeSequence(sequence[] string) ([] string) {
|
func encodeSequence(sequence []string) []string {
|
||||||
|
|
||||||
sequenceInHex := make([]string, len(sequence))
|
sequenceInHex := make([]string, len(sequence))
|
||||||
|
|
||||||
@@ -419,5 +419,3 @@ func encodeSequence(sequence[] string) ([] string) {
|
|||||||
|
|
||||||
return sequenceInHex
|
return sequenceInHex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,21 +5,21 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"fmt"
|
|
||||||
"text/tabwriter"
|
|
||||||
"sort"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
"math"
|
|
||||||
"path"
|
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/hex"
|
|
||||||
|
|
||||||
"github.com/aryann/difflib"
|
"github.com/aryann/difflib"
|
||||||
)
|
)
|
||||||
@@ -174,7 +174,6 @@ type SnapshotManager struct {
|
|||||||
snapshotCache *FileStorage
|
snapshotCache *FileStorage
|
||||||
|
|
||||||
chunkDownloader *ChunkDownloader
|
chunkDownloader *ChunkDownloader
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateSnapshotManager creates a snapshot manager
|
// CreateSnapshotManager creates a snapshot manager
|
||||||
@@ -234,7 +233,7 @@ type sequenceReader struct {
|
|||||||
sequence []string
|
sequence []string
|
||||||
buffer *bytes.Buffer
|
buffer *bytes.Buffer
|
||||||
index int
|
index int
|
||||||
refillFunc func(hash string) ([]byte)
|
refillFunc func(hash string) []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read reads a new chunk using the refill function when there is no more data in the buffer
|
// Read reads a new chunk using the refill function when there is no more data in the buffer
|
||||||
@@ -277,7 +276,7 @@ func (manager *SnapshotManager) DownloadSnapshotFileSequence(snapshot *Snapshot,
|
|||||||
reader := sequenceReader{
|
reader := sequenceReader{
|
||||||
sequence: snapshot.FileSequence,
|
sequence: snapshot.FileSequence,
|
||||||
buffer: new(bytes.Buffer),
|
buffer: new(bytes.Buffer),
|
||||||
refillFunc: func (chunkHash string) ([]byte) {
|
refillFunc: func(chunkHash string) []byte {
|
||||||
i := manager.chunkDownloader.AddChunk(chunkHash)
|
i := manager.chunkDownloader.AddChunk(chunkHash)
|
||||||
chunk := manager.chunkDownloader.WaitForChunk(i)
|
chunk := manager.chunkDownloader.WaitForChunk(i)
|
||||||
return chunk.GetBytes()
|
return chunk.GetBytes()
|
||||||
@@ -315,7 +314,6 @@ func (manager *SnapshotManager) DownloadSnapshotFileSequence(snapshot *Snapshot,
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// DownloadSnapshotSequence downloads the content represented by a sequence of chunks, and then unmarshal the content
|
// DownloadSnapshotSequence downloads the content represented by a sequence of chunks, and then unmarshal the content
|
||||||
// using the specified 'loadFunction'. It purpose is to decode the chunk sequences representing chunk hashes or chunk lengths
|
// using the specified 'loadFunction'. It purpose is to decode the chunk sequences representing chunk hashes or chunk lengths
|
||||||
// in a snapshot.
|
// in a snapshot.
|
||||||
@@ -331,7 +329,6 @@ func (manager *SnapshotManager) DownloadSnapshotSequence(snapshot *Snapshot, seq
|
|||||||
|
|
||||||
content := manager.DownloadSequence(sequence)
|
content := manager.DownloadSequence(sequence)
|
||||||
|
|
||||||
|
|
||||||
if len(content) == 0 {
|
if len(content) == 0 {
|
||||||
LOG_ERROR("SNAPSHOT_PARSE", "Failed to load %s specified in the snapshot %s at revision %d",
|
LOG_ERROR("SNAPSHOT_PARSE", "Failed to load %s specified in the snapshot %s at revision %d",
|
||||||
sequenceType, snapshot.ID, snapshot.Revision)
|
sequenceType, snapshot.ID, snapshot.Revision)
|
||||||
@@ -799,7 +796,6 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
snapshotMap[snapshotID] = nil
|
snapshotMap[snapshotID] = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
snapshotIDIndex := 0
|
snapshotIDIndex := 0
|
||||||
for snapshotID, _ = range snapshotMap {
|
for snapshotID, _ = range snapshotMap {
|
||||||
|
|
||||||
@@ -1127,7 +1123,7 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveFile retrieve the file in the specifed snapshot.
|
// RetrieveFile retrieve the file in the specifed snapshot.
|
||||||
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, output func([]byte)()) bool {
|
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, output func([]byte)) bool {
|
||||||
|
|
||||||
if file.Size == 0 {
|
if file.Size == 0 {
|
||||||
return true
|
return true
|
||||||
@@ -1181,7 +1177,7 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FindFile returns the file entry that has the given file name.
|
// FindFile returns the file entry that has the given file name.
|
||||||
func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string, suppressError bool) (*Entry) {
|
func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string, suppressError bool) *Entry {
|
||||||
for _, entry := range snapshot.Files {
|
for _, entry := range snapshot.Files {
|
||||||
if entry.Path == filePath {
|
if entry.Path == filePath {
|
||||||
return entry
|
return entry
|
||||||
@@ -1280,7 +1276,6 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
|||||||
leftSnapshot = manager.DownloadSnapshot(snapshotID, revisions[0])
|
leftSnapshot = manager.DownloadSnapshot(snapshotID, revisions[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if len(filePath) > 0 {
|
if len(filePath) > 0 {
|
||||||
|
|
||||||
manager.DownloadSnapshotContents(leftSnapshot, nil)
|
manager.DownloadSnapshotContents(leftSnapshot, nil)
|
||||||
@@ -1486,7 +1481,6 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
|
|||||||
LOG_INFO("SNAPSHOT_HISTORY", "%7d:", revision)
|
LOG_INFO("SNAPSHOT_HISTORY", "%7d:", revision)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
stat, err := os.Stat(joinPath(top, filePath))
|
stat, err := os.Stat(joinPath(top, filePath))
|
||||||
@@ -1512,7 +1506,7 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
|
|||||||
|
|
||||||
// fossilizeChunk turns the chunk into a fossil.
|
// fossilizeChunk turns the chunk into a fossil.
|
||||||
func (manager *SnapshotManager) fossilizeChunk(chunkID string, filePath string,
|
func (manager *SnapshotManager) fossilizeChunk(chunkID string, filePath string,
|
||||||
exclusive bool, collection *FossilCollection) (bool) {
|
exclusive bool, collection *FossilCollection) bool {
|
||||||
if exclusive {
|
if exclusive {
|
||||||
err := manager.storage.DeleteFile(0, filePath)
|
err := manager.storage.DeleteFile(0, filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1548,7 +1542,7 @@ func (manager *SnapshotManager) fossilizeChunk(chunkID string, filePath string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// resurrectChunk turns the fossil back into a chunk
|
// resurrectChunk turns the fossil back into a chunk
|
||||||
func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string) (bool) {
|
func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string) bool {
|
||||||
chunkPath, exist, _, err := manager.storage.FindChunk(0, chunkID, false)
|
chunkPath, exist, _, err := manager.storage.FindChunk(0, chunkID, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", chunkID, err)
|
LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", chunkID, err)
|
||||||
@@ -1571,8 +1565,6 @@ func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// PruneSnapshots deletes snapshots by revisions, tags, or a retention policy. The main idea is two-step
|
// PruneSnapshots deletes snapshots by revisions, tags, or a retention policy. The main idea is two-step
|
||||||
// fossil collection.
|
// fossil collection.
|
||||||
// 1. Delete snapshots specified by revision, retention policy, with a tag. Find any resulting unreferenced
|
// 1. Delete snapshots specified by revision, retention policy, with a tag. Find any resulting unreferenced
|
||||||
@@ -2113,7 +2105,6 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the fossil collection if it is not empty.
|
// Save the fossil collection if it is not empty.
|
||||||
@@ -2181,7 +2172,6 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// CheckSnapshot performs sanity checks on the given snapshot.
|
// CheckSnapshot performs sanity checks on the given snapshot.
|
||||||
func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
|
func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
|
||||||
|
|
||||||
|
|||||||
@@ -5,15 +5,15 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"crypto/rand"
|
||||||
"os"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"crypto/rand"
|
"testing"
|
||||||
"encoding/json"
|
"time"
|
||||||
"encoding/hex"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot {
|
func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot {
|
||||||
|
|||||||
@@ -6,14 +6,14 @@ package duplicacy
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"strconv"
|
|
||||||
"os"
|
|
||||||
"net"
|
|
||||||
"path"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
"golang.org/x/crypto/ssh/agent"
|
"golang.org/x/crypto/ssh/agent"
|
||||||
@@ -47,16 +47,16 @@ type Storage interface {
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
IsCacheNeeded() (bool)
|
IsCacheNeeded() bool
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
IsMoveFileImplemented() (bool)
|
IsMoveFileImplemented() bool
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
IsStrongConsistent() (bool)
|
IsStrongConsistent() bool
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
IsFastListing() (bool)
|
IsFastListing() bool
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
EnableTestMode()
|
EnableTestMode()
|
||||||
@@ -274,8 +274,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
authMethods := [] ssh.AuthMethod {
|
authMethods := []ssh.AuthMethod{}
|
||||||
}
|
|
||||||
passwordAuthMethods := []ssh.AuthMethod{
|
passwordAuthMethods := []ssh.AuthMethod{
|
||||||
ssh.PasswordCallback(passwordCallback),
|
ssh.PasswordCallback(passwordCallback),
|
||||||
ssh.KeyboardInteractive(keyboardInteractive),
|
ssh.KeyboardInteractive(keyboardInteractive),
|
||||||
|
|||||||
@@ -5,19 +5,19 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
"flag"
|
|
||||||
"path"
|
|
||||||
"testing"
|
|
||||||
"strings"
|
|
||||||
"strconv"
|
|
||||||
"io/ioutil"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@@ -396,7 +396,6 @@ func TestStorage(t *testing.T) {
|
|||||||
|
|
||||||
chunk := CreateChunk(config, true)
|
chunk := CreateChunk(config, true)
|
||||||
|
|
||||||
|
|
||||||
for _, chunkID := range chunks {
|
for _, chunkID := range chunks {
|
||||||
|
|
||||||
chunk.Reset(false)
|
chunk.Reset(false)
|
||||||
|
|||||||
@@ -5,21 +5,21 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"crypto/sha256"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
|
||||||
"strconv"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"crypto/sha256"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/crypto/pbkdf2"
|
|
||||||
"github.com/gilbertchen/gopass"
|
"github.com/gilbertchen/gopass"
|
||||||
|
"golang.org/x/crypto/pbkdf2"
|
||||||
)
|
)
|
||||||
|
|
||||||
var RunInBackground bool = false
|
var RunInBackground bool = false
|
||||||
@@ -41,7 +41,7 @@ func init() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateRateLimitedReader(content []byte, rate int) (*RateLimitedReader) {
|
func CreateRateLimitedReader(content []byte, rate int) *RateLimitedReader {
|
||||||
return &RateLimitedReader{
|
return &RateLimitedReader{
|
||||||
Content: content,
|
Content: content,
|
||||||
Rate: float64(rate * 1024),
|
Rate: float64(rate * 1024),
|
||||||
@@ -84,7 +84,7 @@ func IsValidRegex(pattern string) (valid bool, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reader *RateLimitedReader) Length() (int64) {
|
func (reader *RateLimitedReader) Length() int64 {
|
||||||
return int64(len(reader.Content))
|
return int64(len(reader.Content))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -164,7 +164,7 @@ func GenerateKeyFromPassword(password string) []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get password from preference, env, but don't start any keyring request
|
// Get password from preference, env, but don't start any keyring request
|
||||||
func GetPasswordFromPreference(preference Preference, passwordType string) (string) {
|
func GetPasswordFromPreference(preference Preference, passwordType string) string {
|
||||||
passwordID := passwordType
|
passwordID := passwordType
|
||||||
if preference.Name != "default" {
|
if preference.Name != "default" {
|
||||||
passwordID = preference.Name + "_" + passwordID
|
passwordID = preference.Name + "_" + passwordID
|
||||||
@@ -196,7 +196,7 @@ func GetPasswordFromPreference(preference Preference, passwordType string) (stri
|
|||||||
|
|
||||||
// GetPassword attempts to get the password from KeyChain/KeyRing, environment variables, or keyboard input.
|
// GetPassword attempts to get the password from KeyChain/KeyRing, environment variables, or keyboard input.
|
||||||
func GetPassword(preference Preference, passwordType string, prompt string,
|
func GetPassword(preference Preference, passwordType string, prompt string,
|
||||||
showPassword bool, resetPassword bool) (string) {
|
showPassword bool, resetPassword bool) string {
|
||||||
passwordID := passwordType
|
passwordID := passwordType
|
||||||
password := GetPasswordFromPreference(preference, passwordType)
|
password := GetPasswordFromPreference(preference, passwordType)
|
||||||
if password != "" {
|
if password != "" {
|
||||||
@@ -386,7 +386,7 @@ func joinPath(components ...string) string {
|
|||||||
return combinedPath
|
return combinedPath
|
||||||
}
|
}
|
||||||
|
|
||||||
func PrettyNumber(number int64) (string) {
|
func PrettyNumber(number int64) string {
|
||||||
|
|
||||||
G := int64(1024 * 1024 * 1024)
|
G := int64(1024 * 1024 * 1024)
|
||||||
M := int64(1024 * 1024)
|
M := int64(1024 * 1024)
|
||||||
@@ -405,7 +405,7 @@ func PrettyNumber(number int64) (string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func PrettySize(size int64) (string) {
|
func PrettySize(size int64) string {
|
||||||
if size > 1024*1024 {
|
if size > 1024*1024 {
|
||||||
return fmt.Sprintf("%.2fM", float64(size)/(1024.0*1024.0))
|
return fmt.Sprintf("%.2fM", float64(size)/(1024.0*1024.0))
|
||||||
} else if size > 1024 {
|
} else if size > 1024 {
|
||||||
@@ -415,7 +415,7 @@ func PrettySize(size int64) (string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func PrettyTime(seconds int64) (string) {
|
func PrettyTime(seconds int64) string {
|
||||||
|
|
||||||
day := int64(3600 * 24)
|
day := int64(3600 * 24)
|
||||||
|
|
||||||
@@ -431,7 +431,7 @@ func PrettyTime(seconds int64) (string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func AtoSize(sizeString string) (int) {
|
func AtoSize(sizeString string) int {
|
||||||
sizeString = strings.ToLower(sizeString)
|
sizeString = strings.ToLower(sizeString)
|
||||||
|
|
||||||
sizeRegex := regexp.MustCompile(`^([0-9]+)([mk])?$`)
|
sizeRegex := regexp.MustCompile(`^([0-9]+)([mk])?$`)
|
||||||
@@ -451,7 +451,7 @@ func AtoSize(sizeString string) (int) {
|
|||||||
return size
|
return size
|
||||||
}
|
}
|
||||||
|
|
||||||
func MinInt(x, y int) (int) {
|
func MinInt(x, y int) int {
|
||||||
if x < y {
|
if x < y {
|
||||||
return x
|
return x
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,10 +7,10 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"syscall"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"github.com/gilbertchen/xattr"
|
"github.com/gilbertchen/xattr"
|
||||||
)
|
)
|
||||||
@@ -31,7 +31,7 @@ func GetOwner(entry *Entry, fileInfo *os.FileInfo) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) (bool) {
|
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
|
||||||
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
|
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
|
||||||
if ok && stat != nil && (int(stat.Uid) != entry.UID || int(stat.Gid) != entry.GID) {
|
if ok && stat != nil && (int(stat.Uid) != entry.UID || int(stat.Gid) != entry.GID) {
|
||||||
if entry.UID != -1 && entry.GID != -1 {
|
if entry.UID != -1 && entry.GID != -1 {
|
||||||
|
|||||||
@@ -5,15 +5,14 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"time"
|
"time"
|
||||||
"bytes"
|
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMatchPattern(t *testing.T) {
|
func TestMatchPattern(t *testing.T) {
|
||||||
|
|||||||
@@ -5,8 +5,8 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
@@ -36,6 +36,7 @@ type reparseDataBuffer struct {
|
|||||||
// GenericReparseBuffer
|
// GenericReparseBuffer
|
||||||
reparseBuffer byte
|
reparseBuffer byte
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
FSCTL_GET_REPARSE_POINT = 0x900A8
|
FSCTL_GET_REPARSE_POINT = 0x900A8
|
||||||
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024
|
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024
|
||||||
@@ -103,7 +104,7 @@ func GetOwner(entry *Entry, fileInfo *os.FileInfo) {
|
|||||||
entry.GID = -1
|
entry.GID = -1
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) (bool) {
|
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user