mirror of
https://github.com/gilbertchen/duplicacy
synced 2025-12-14 23:33:18 +00:00
Run goimports on all source files
This commit is contained in:
@@ -5,22 +5,23 @@
|
|||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"os/signal"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
|
||||||
"strconv"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"os/exec"
|
"strconv"
|
||||||
"os/signal"
|
"strings"
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"github.com/gilbertchen/cli"
|
"github.com/gilbertchen/cli"
|
||||||
|
|
||||||
"github.com/gilbertchen/duplicacy/src"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/gilbertchen/duplicacy/src"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -79,7 +80,7 @@ func getRepositoryPreference(context *cli.Context, storageName string) (reposito
|
|||||||
return repository, preference
|
return repository, preference
|
||||||
}
|
}
|
||||||
|
|
||||||
func getRevisions(context *cli.Context) (revisions[] int) {
|
func getRevisions(context *cli.Context) (revisions []int) {
|
||||||
|
|
||||||
flags := context.StringSlice("r")
|
flags := context.StringSlice("r")
|
||||||
|
|
||||||
@@ -148,8 +149,8 @@ func runScript(context *cli.Context, storageName string, phase string) bool {
|
|||||||
|
|
||||||
preferencePath := duplicacy.GetDuplicacyPreferencePath()
|
preferencePath := duplicacy.GetDuplicacyPreferencePath()
|
||||||
scriptDir, _ := filepath.Abs(path.Join(preferencePath, "scripts"))
|
scriptDir, _ := filepath.Abs(path.Join(preferencePath, "scripts"))
|
||||||
scriptNames := []string { phase + "-" + context.Command.Name,
|
scriptNames := []string{phase + "-" + context.Command.Name,
|
||||||
storageName + "-" + phase + "-" + context.Command.Name }
|
storageName + "-" + phase + "-" + context.Command.Name}
|
||||||
|
|
||||||
script := ""
|
script := ""
|
||||||
for _, scriptName := range scriptNames {
|
for _, scriptName := range scriptNames {
|
||||||
@@ -239,7 +240,6 @@ func configRepository(context *cli.Context, init bool) {
|
|||||||
preferencePath = path.Join(repository, duplicacy.DUPLICACY_DIRECTORY) // TOKEEP
|
preferencePath = path.Join(repository, duplicacy.DUPLICACY_DIRECTORY) // TOKEEP
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if stat, _ := os.Stat(path.Join(preferencePath, "preferences")); stat != nil {
|
if stat, _ := os.Stat(path.Join(preferencePath, "preferences")); stat != nil {
|
||||||
duplicacy.LOG_ERROR("REPOSITORY_INIT", "The repository %s has already been initialized", repository)
|
duplicacy.LOG_ERROR("REPOSITORY_INIT", "The repository %s has already been initialized", repository)
|
||||||
return
|
return
|
||||||
@@ -273,10 +273,10 @@ func configRepository(context *cli.Context, init bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
preference := duplicacy.Preference {
|
preference := duplicacy.Preference{
|
||||||
Name: storageName,
|
Name: storageName,
|
||||||
SnapshotID : snapshotID,
|
SnapshotID: snapshotID,
|
||||||
StorageURL : storageURL,
|
StorageURL: storageURL,
|
||||||
Encrypted: context.Bool("encrypt"),
|
Encrypted: context.Bool("encrypt"),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -318,7 +318,7 @@ func configRepository(context *cli.Context, init bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
size := 1
|
size := 1
|
||||||
for size * 2 <= averageChunkSize {
|
for size*2 <= averageChunkSize {
|
||||||
size *= 2
|
size *= 2
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -678,7 +678,7 @@ func restoreRepository(context *cli.Context) {
|
|||||||
deleteMode := context.Bool("delete")
|
deleteMode := context.Bool("delete")
|
||||||
showStatistics := context.Bool("stats")
|
showStatistics := context.Bool("stats")
|
||||||
|
|
||||||
var patterns [] string
|
var patterns []string
|
||||||
for _, pattern := range context.Args() {
|
for _, pattern := range context.Args() {
|
||||||
|
|
||||||
pattern = strings.TrimSpace(pattern)
|
pattern = strings.TrimSpace(pattern)
|
||||||
@@ -890,7 +890,6 @@ func diff(context *cli.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
password := ""
|
password := ""
|
||||||
if preference.Encrypted {
|
if preference.Encrypted {
|
||||||
password = duplicacy.GetPassword(*preference, "password", "Enter storage password:", false, false)
|
password = duplicacy.GetPassword(*preference, "password", "Enter storage password:", false, false)
|
||||||
@@ -1058,7 +1057,6 @@ func copySnapshots(context *cli.Context) {
|
|||||||
sourceManager.SetupSnapshotCache(source.Name)
|
sourceManager.SetupSnapshotCache(source.Name)
|
||||||
duplicacy.SavePassword(*source, "password", sourcePassword)
|
duplicacy.SavePassword(*source, "password", sourcePassword)
|
||||||
|
|
||||||
|
|
||||||
_, destination := getRepositoryPreference(context, context.String("to"))
|
_, destination := getRepositoryPreference(context, context.String("to"))
|
||||||
|
|
||||||
if destination.Name == source.Name {
|
if destination.Name == source.Name {
|
||||||
@@ -1072,7 +1070,6 @@ func copySnapshots(context *cli.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
duplicacy.LOG_INFO("STORAGE_SET", "Destination storage set to %s", destination.StorageURL)
|
duplicacy.LOG_INFO("STORAGE_SET", "Destination storage set to %s", destination.StorageURL)
|
||||||
destinationStorage := duplicacy.CreateStorage(*destination, false, threads)
|
destinationStorage := duplicacy.CreateStorage(*destination, false, threads)
|
||||||
if destinationStorage == nil {
|
if destinationStorage == nil {
|
||||||
@@ -1082,7 +1079,7 @@ func copySnapshots(context *cli.Context) {
|
|||||||
destinationPassword := ""
|
destinationPassword := ""
|
||||||
if destination.Encrypted {
|
if destination.Encrypted {
|
||||||
destinationPassword = duplicacy.GetPassword(*destination, "password",
|
destinationPassword = duplicacy.GetPassword(*destination, "password",
|
||||||
"Enter destination storage password:",false, false)
|
"Enter destination storage password:", false, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
sourceStorage.SetRateLimits(context.Int("download-limit-rate"), 0)
|
sourceStorage.SetRateLimits(context.Int("download-limit-rate"), 0)
|
||||||
@@ -1121,7 +1118,7 @@ func infoStorage(context *cli.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
isEncrypted := context.Bool("e")
|
isEncrypted := context.Bool("e")
|
||||||
preference := duplicacy.Preference {
|
preference := duplicacy.Preference{
|
||||||
Name: "default",
|
Name: "default",
|
||||||
SnapshotID: "default",
|
SnapshotID: "default",
|
||||||
StorageURL: context.Args()[0],
|
StorageURL: context.Args()[0],
|
||||||
@@ -1154,26 +1151,26 @@ func main() {
|
|||||||
|
|
||||||
app := cli.NewApp()
|
app := cli.NewApp()
|
||||||
|
|
||||||
app.Commands = []cli.Command {
|
app.Commands = []cli.Command{
|
||||||
{
|
{
|
||||||
Name: "init",
|
Name: "init",
|
||||||
Flags: []cli.Flag {
|
Flags: []cli.Flag{
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "encrypt, e",
|
Name: "encrypt, e",
|
||||||
Usage: "encrypt the storage with a password",
|
Usage: "encrypt the storage with a password",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "chunk-size, c",
|
Name: "chunk-size, c",
|
||||||
Value: "4M",
|
Value: "4M",
|
||||||
Usage: "the average size of chunks",
|
Usage: "the average size of chunks",
|
||||||
Argument: "4M",
|
Argument: "4M",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "max-chunk-size, max",
|
Name: "max-chunk-size, max",
|
||||||
Usage: "the maximum size of chunks (defaults to chunk-size * 4)",
|
Usage: "the maximum size of chunks (defaults to chunk-size * 4)",
|
||||||
Argument: "16M",
|
Argument: "16M",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "min-chunk-size, min",
|
Name: "min-chunk-size, min",
|
||||||
Usage: "the minimum size of chunks (defaults to chunk-size / 4)",
|
Usage: "the minimum size of chunks (defaults to chunk-size / 4)",
|
||||||
Argument: "1M",
|
Argument: "1M",
|
||||||
@@ -1190,41 +1187,41 @@ func main() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "backup",
|
Name: "backup",
|
||||||
Flags: []cli.Flag {
|
Flags: []cli.Flag{
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "hash",
|
Name: "hash",
|
||||||
Usage: "detect file differences by hash (rather than size and timestamp)",
|
Usage: "detect file differences by hash (rather than size and timestamp)",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "t",
|
Name: "t",
|
||||||
Usage: "assign a tag to the backup",
|
Usage: "assign a tag to the backup",
|
||||||
Argument: "<tag>",
|
Argument: "<tag>",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "stats",
|
Name: "stats",
|
||||||
Usage: "show statistics during and after backup",
|
Usage: "show statistics during and after backup",
|
||||||
},
|
},
|
||||||
cli.IntFlag {
|
cli.IntFlag{
|
||||||
Name: "threads",
|
Name: "threads",
|
||||||
Value: 1,
|
Value: 1,
|
||||||
Usage: "number of uploading threads",
|
Usage: "number of uploading threads",
|
||||||
Argument: "<n>",
|
Argument: "<n>",
|
||||||
},
|
},
|
||||||
cli.IntFlag {
|
cli.IntFlag{
|
||||||
Name: "limit-rate",
|
Name: "limit-rate",
|
||||||
Value: 0,
|
Value: 0,
|
||||||
Usage: "the maximum upload rate (in kilobytes/sec)",
|
Usage: "the maximum upload rate (in kilobytes/sec)",
|
||||||
Argument: "<kB/s>",
|
Argument: "<kB/s>",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "dry-run",
|
Name: "dry-run",
|
||||||
Usage: "Dry run for testing, don't backup anything. Use with -stats and -d",
|
Usage: "Dry run for testing, don't backup anything. Use with -stats and -d",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "vss",
|
Name: "vss",
|
||||||
Usage: "enable the Volume Shadow Copy service (Windows only)",
|
Usage: "enable the Volume Shadow Copy service (Windows only)",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "storage",
|
Name: "storage",
|
||||||
Usage: "backup to the specified storage instead of the default one",
|
Usage: "backup to the specified storage instead of the default one",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
@@ -1237,41 +1234,41 @@ func main() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
Name: "restore",
|
Name: "restore",
|
||||||
Flags: []cli.Flag {
|
Flags: []cli.Flag{
|
||||||
cli.IntFlag {
|
cli.IntFlag{
|
||||||
Name: "r",
|
Name: "r",
|
||||||
Usage: "the revision number of the snapshot (required)",
|
Usage: "the revision number of the snapshot (required)",
|
||||||
Argument: "<revision>",
|
Argument: "<revision>",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "hash",
|
Name: "hash",
|
||||||
Usage: "detect file differences by hash (rather than size and timestamp)",
|
Usage: "detect file differences by hash (rather than size and timestamp)",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "overwrite",
|
Name: "overwrite",
|
||||||
Usage: "overwrite existing files in the repository",
|
Usage: "overwrite existing files in the repository",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "delete",
|
Name: "delete",
|
||||||
Usage: "delete files not in the snapshot",
|
Usage: "delete files not in the snapshot",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "stats",
|
Name: "stats",
|
||||||
Usage: "show statistics during and after restore",
|
Usage: "show statistics during and after restore",
|
||||||
},
|
},
|
||||||
cli.IntFlag {
|
cli.IntFlag{
|
||||||
Name: "threads",
|
Name: "threads",
|
||||||
Value: 1,
|
Value: 1,
|
||||||
Usage: "number of downloading threads",
|
Usage: "number of downloading threads",
|
||||||
Argument: "<n>",
|
Argument: "<n>",
|
||||||
},
|
},
|
||||||
cli.IntFlag {
|
cli.IntFlag{
|
||||||
Name: "limit-rate",
|
Name: "limit-rate",
|
||||||
Value: 0,
|
Value: 0,
|
||||||
Usage: "the maximum download rate (in kilobytes/sec)",
|
Usage: "the maximum download rate (in kilobytes/sec)",
|
||||||
Argument: "<kB/s>",
|
Argument: "<kB/s>",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "storage",
|
Name: "storage",
|
||||||
Usage: "restore from the specified storage instead of the default one",
|
Usage: "restore from the specified storage instead of the default one",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
@@ -1284,39 +1281,39 @@ func main() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
Name: "list",
|
Name: "list",
|
||||||
Flags: []cli.Flag {
|
Flags: []cli.Flag{
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "all, a",
|
Name: "all, a",
|
||||||
Usage: "list snapshots with any id",
|
Usage: "list snapshots with any id",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "id",
|
Name: "id",
|
||||||
Usage: "list snapshots with the specified id rather than the default one",
|
Usage: "list snapshots with the specified id rather than the default one",
|
||||||
Argument: "<snapshot id>",
|
Argument: "<snapshot id>",
|
||||||
},
|
},
|
||||||
cli.StringSliceFlag {
|
cli.StringSliceFlag{
|
||||||
Name: "r",
|
Name: "r",
|
||||||
Usage: "the revision number of the snapshot",
|
Usage: "the revision number of the snapshot",
|
||||||
Argument: "<revision>",
|
Argument: "<revision>",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "t",
|
Name: "t",
|
||||||
Usage: "list snapshots with the specified tag",
|
Usage: "list snapshots with the specified tag",
|
||||||
Argument: "<tag>",
|
Argument: "<tag>",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "files",
|
Name: "files",
|
||||||
Usage: "print the file list in each snapshot",
|
Usage: "print the file list in each snapshot",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "chunks",
|
Name: "chunks",
|
||||||
Usage: "print chunks in each snapshot or all chunks if no snapshot specified",
|
Usage: "print chunks in each snapshot or all chunks if no snapshot specified",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "reset-passwords",
|
Name: "reset-passwords",
|
||||||
Usage: "take passwords from input rather than keychain/keyring",
|
Usage: "take passwords from input rather than keychain/keyring",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "storage",
|
Name: "storage",
|
||||||
Usage: "retrieve snapshots from the specified storage",
|
Usage: "retrieve snapshots from the specified storage",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
@@ -1328,47 +1325,47 @@ func main() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "check",
|
Name: "check",
|
||||||
Flags: []cli.Flag {
|
Flags: []cli.Flag{
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "all, a",
|
Name: "all, a",
|
||||||
Usage: "check snapshots with any id",
|
Usage: "check snapshots with any id",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "id",
|
Name: "id",
|
||||||
Usage: "check snapshots with the specified id rather than the default one",
|
Usage: "check snapshots with the specified id rather than the default one",
|
||||||
Argument: "<snapshot id>",
|
Argument: "<snapshot id>",
|
||||||
},
|
},
|
||||||
cli.StringSliceFlag {
|
cli.StringSliceFlag{
|
||||||
Name: "r",
|
Name: "r",
|
||||||
Usage: "the revision number of the snapshot",
|
Usage: "the revision number of the snapshot",
|
||||||
Argument: "<revision>",
|
Argument: "<revision>",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "t",
|
Name: "t",
|
||||||
Usage: "check snapshots with the specified tag",
|
Usage: "check snapshots with the specified tag",
|
||||||
Argument: "<tag>",
|
Argument: "<tag>",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "fossils",
|
Name: "fossils",
|
||||||
Usage: "search fossils if a chunk can't be found",
|
Usage: "search fossils if a chunk can't be found",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "resurrect",
|
Name: "resurrect",
|
||||||
Usage: "turn referenced fossils back into chunks",
|
Usage: "turn referenced fossils back into chunks",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "files",
|
Name: "files",
|
||||||
Usage: "verify the integrity of every file",
|
Usage: "verify the integrity of every file",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "stats",
|
Name: "stats",
|
||||||
Usage: "show deduplication statistics (imply -all and all revisions)",
|
Usage: "show deduplication statistics (imply -all and all revisions)",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "tabular",
|
Name: "tabular",
|
||||||
Usage: "show tabular usage and deduplication statistics (imply -stats, -all, and all revisions)",
|
Usage: "show tabular usage and deduplication statistics (imply -stats, -all, and all revisions)",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "storage",
|
Name: "storage",
|
||||||
Usage: "retrieve snapshots from the specified storage",
|
Usage: "retrieve snapshots from the specified storage",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
@@ -1380,18 +1377,18 @@ func main() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "cat",
|
Name: "cat",
|
||||||
Flags: []cli.Flag {
|
Flags: []cli.Flag{
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "id",
|
Name: "id",
|
||||||
Usage: "retrieve from the snapshot with the specified id",
|
Usage: "retrieve from the snapshot with the specified id",
|
||||||
Argument: "<snapshot id>",
|
Argument: "<snapshot id>",
|
||||||
},
|
},
|
||||||
cli.IntFlag {
|
cli.IntFlag{
|
||||||
Name: "r",
|
Name: "r",
|
||||||
Usage: "the revision number of the snapshot",
|
Usage: "the revision number of the snapshot",
|
||||||
Argument: "<revision>",
|
Argument: "<revision>",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "storage",
|
Name: "storage",
|
||||||
Usage: "retrieve the file from the specified storage",
|
Usage: "retrieve the file from the specified storage",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
@@ -1404,22 +1401,22 @@ func main() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
Name: "diff",
|
Name: "diff",
|
||||||
Flags: []cli.Flag {
|
Flags: []cli.Flag{
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "id",
|
Name: "id",
|
||||||
Usage: "diff snapshots with the specified id",
|
Usage: "diff snapshots with the specified id",
|
||||||
Argument: "<snapshot id>",
|
Argument: "<snapshot id>",
|
||||||
},
|
},
|
||||||
cli.IntSliceFlag {
|
cli.IntSliceFlag{
|
||||||
Name: "r",
|
Name: "r",
|
||||||
Usage: "the revision number of the snapshot",
|
Usage: "the revision number of the snapshot",
|
||||||
Argument: "<revision>",
|
Argument: "<revision>",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "hash",
|
Name: "hash",
|
||||||
Usage: "compute the hashes of on-disk files",
|
Usage: "compute the hashes of on-disk files",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "storage",
|
Name: "storage",
|
||||||
Usage: "retrieve files from the specified storage",
|
Usage: "retrieve files from the specified storage",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
@@ -1432,22 +1429,22 @@ func main() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
Name: "history",
|
Name: "history",
|
||||||
Flags: []cli.Flag {
|
Flags: []cli.Flag{
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "id",
|
Name: "id",
|
||||||
Usage: "find the file in the snapshot with the specified id",
|
Usage: "find the file in the snapshot with the specified id",
|
||||||
Argument: "<snapshot id>",
|
Argument: "<snapshot id>",
|
||||||
},
|
},
|
||||||
cli.StringSliceFlag {
|
cli.StringSliceFlag{
|
||||||
Name: "r",
|
Name: "r",
|
||||||
Usage: "show history of the specified revisions",
|
Usage: "show history of the specified revisions",
|
||||||
Argument: "<revision>",
|
Argument: "<revision>",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "hash",
|
Name: "hash",
|
||||||
Usage: "show the hash of the on-disk file",
|
Usage: "show the hash of the on-disk file",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "storage",
|
Name: "storage",
|
||||||
Usage: "retrieve files from the specified storage",
|
Usage: "retrieve files from the specified storage",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
@@ -1460,57 +1457,57 @@ func main() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
Name: "prune",
|
Name: "prune",
|
||||||
Flags: []cli.Flag {
|
Flags: []cli.Flag{
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "id",
|
Name: "id",
|
||||||
Usage: "delete snapshots with the specified id instead of the default one",
|
Usage: "delete snapshots with the specified id instead of the default one",
|
||||||
Argument: "<snapshot id>",
|
Argument: "<snapshot id>",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "all, a",
|
Name: "all, a",
|
||||||
Usage: "match against all snapshot IDs",
|
Usage: "match against all snapshot IDs",
|
||||||
},
|
},
|
||||||
cli.StringSliceFlag {
|
cli.StringSliceFlag{
|
||||||
Name: "r",
|
Name: "r",
|
||||||
Usage: "delete snapshots with the specified revisions",
|
Usage: "delete snapshots with the specified revisions",
|
||||||
Argument: "<revision>",
|
Argument: "<revision>",
|
||||||
},
|
},
|
||||||
cli.StringSliceFlag {
|
cli.StringSliceFlag{
|
||||||
Name: "t",
|
Name: "t",
|
||||||
Usage: "delete snapshots with the specifed tags",
|
Usage: "delete snapshots with the specifed tags",
|
||||||
Argument: "<tag>",
|
Argument: "<tag>",
|
||||||
},
|
},
|
||||||
cli.StringSliceFlag {
|
cli.StringSliceFlag{
|
||||||
Name: "keep",
|
Name: "keep",
|
||||||
Usage: "keep 1 snapshot every n days for snapshots older than m days",
|
Usage: "keep 1 snapshot every n days for snapshots older than m days",
|
||||||
Argument: "<n:m>",
|
Argument: "<n:m>",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "exhaustive",
|
Name: "exhaustive",
|
||||||
Usage: "remove all unreferenced chunks (not just those referenced by deleted snapshots)",
|
Usage: "remove all unreferenced chunks (not just those referenced by deleted snapshots)",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "exclusive",
|
Name: "exclusive",
|
||||||
Usage: "assume exclusive acess to the storage (disable two-step fossil collection)",
|
Usage: "assume exclusive acess to the storage (disable two-step fossil collection)",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "dry-run, d",
|
Name: "dry-run, d",
|
||||||
Usage: "show what would have been deleted",
|
Usage: "show what would have been deleted",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "delete-only",
|
Name: "delete-only",
|
||||||
Usage: "delete fossils previously collected (if deletable) and don't collect fossils",
|
Usage: "delete fossils previously collected (if deletable) and don't collect fossils",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "collect-only",
|
Name: "collect-only",
|
||||||
Usage: "identify and collect fossils, but don't delete fossils previously collected",
|
Usage: "identify and collect fossils, but don't delete fossils previously collected",
|
||||||
},
|
},
|
||||||
cli.StringSliceFlag {
|
cli.StringSliceFlag{
|
||||||
Name: "ignore",
|
Name: "ignore",
|
||||||
Usage: "ignore snapshots with the specified id when deciding if fossils can be deleted",
|
Usage: "ignore snapshots with the specified id when deciding if fossils can be deleted",
|
||||||
Argument: "<id>",
|
Argument: "<id>",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "storage",
|
Name: "storage",
|
||||||
Usage: "prune snapshots from the specified storage",
|
Usage: "prune snapshots from the specified storage",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
@@ -1521,11 +1518,10 @@ func main() {
|
|||||||
Action: pruneSnapshots,
|
Action: pruneSnapshots,
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
||||||
{
|
{
|
||||||
Name: "password",
|
Name: "password",
|
||||||
Flags: []cli.Flag {
|
Flags: []cli.Flag{
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "storage",
|
Name: "storage",
|
||||||
Usage: "change the password used to access the specified storage",
|
Usage: "change the password used to access the specified storage",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
@@ -1538,28 +1534,28 @@ func main() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
Name: "add",
|
Name: "add",
|
||||||
Flags: []cli.Flag {
|
Flags: []cli.Flag{
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "encrypt, e",
|
Name: "encrypt, e",
|
||||||
Usage: "Encrypt the storage with a password",
|
Usage: "Encrypt the storage with a password",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "chunk-size, c",
|
Name: "chunk-size, c",
|
||||||
Value: "4M",
|
Value: "4M",
|
||||||
Usage: "the average size of chunks",
|
Usage: "the average size of chunks",
|
||||||
Argument: "4M",
|
Argument: "4M",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "max-chunk-size, max",
|
Name: "max-chunk-size, max",
|
||||||
Usage: "the maximum size of chunks (defaults to chunk-size * 4)",
|
Usage: "the maximum size of chunks (defaults to chunk-size * 4)",
|
||||||
Argument: "16M",
|
Argument: "16M",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "min-chunk-size, min",
|
Name: "min-chunk-size, min",
|
||||||
Usage: "the minimum size of chunks (defaults to chunk-size / 4)",
|
Usage: "the minimum size of chunks (defaults to chunk-size / 4)",
|
||||||
Argument: "1M",
|
Argument: "1M",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "copy",
|
Name: "copy",
|
||||||
Usage: "make the new storage compatible with an existing one to allow for copy operations",
|
Usage: "make the new storage compatible with an existing one to allow for copy operations",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
@@ -1572,40 +1568,40 @@ func main() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
Name: "set",
|
Name: "set",
|
||||||
Flags: []cli.Flag {
|
Flags: []cli.Flag{
|
||||||
cli.GenericFlag {
|
cli.GenericFlag{
|
||||||
Name: "encrypt, e",
|
Name: "encrypt, e",
|
||||||
Usage: "encrypt the storage with a password",
|
Usage: "encrypt the storage with a password",
|
||||||
Value: &TriBool{},
|
Value: &TriBool{},
|
||||||
Arg: "true",
|
Arg: "true",
|
||||||
},
|
},
|
||||||
cli.GenericFlag {
|
cli.GenericFlag{
|
||||||
Name: "no-backup",
|
Name: "no-backup",
|
||||||
Usage: "backup to this storage is prohibited",
|
Usage: "backup to this storage is prohibited",
|
||||||
Value: &TriBool{},
|
Value: &TriBool{},
|
||||||
Arg: "true",
|
Arg: "true",
|
||||||
},
|
},
|
||||||
cli.GenericFlag {
|
cli.GenericFlag{
|
||||||
Name: "no-restore",
|
Name: "no-restore",
|
||||||
Usage: "restore from this storage is prohibited",
|
Usage: "restore from this storage is prohibited",
|
||||||
Value: &TriBool{},
|
Value: &TriBool{},
|
||||||
Arg: "true",
|
Arg: "true",
|
||||||
},
|
},
|
||||||
cli.GenericFlag {
|
cli.GenericFlag{
|
||||||
Name: "no-save-password",
|
Name: "no-save-password",
|
||||||
Usage: "don't save password or access keys to keychain/keyring",
|
Usage: "don't save password or access keys to keychain/keyring",
|
||||||
Value: &TriBool{},
|
Value: &TriBool{},
|
||||||
Arg: "true",
|
Arg: "true",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "key",
|
Name: "key",
|
||||||
Usage: "add a key/password whose value is supplied by the -value option",
|
Usage: "add a key/password whose value is supplied by the -value option",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "value",
|
Name: "value",
|
||||||
Usage: "the value of the key/password",
|
Usage: "the value of the key/password",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "storage",
|
Name: "storage",
|
||||||
Usage: "use the specified storage instead of the default one",
|
Usage: "use the specified storage instead of the default one",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
@@ -1617,40 +1613,40 @@ func main() {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "copy",
|
Name: "copy",
|
||||||
Flags: []cli.Flag {
|
Flags: []cli.Flag{
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "id",
|
Name: "id",
|
||||||
Usage: "copy snapshots with the specified id instead of all snapshot ids",
|
Usage: "copy snapshots with the specified id instead of all snapshot ids",
|
||||||
Argument: "<snapshot id>",
|
Argument: "<snapshot id>",
|
||||||
},
|
},
|
||||||
cli.StringSliceFlag {
|
cli.StringSliceFlag{
|
||||||
Name: "r",
|
Name: "r",
|
||||||
Usage: "copy snapshots with the specified revisions",
|
Usage: "copy snapshots with the specified revisions",
|
||||||
Argument: "<revision>",
|
Argument: "<revision>",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "from",
|
Name: "from",
|
||||||
Usage: "copy snapshots from the specified storage",
|
Usage: "copy snapshots from the specified storage",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "to",
|
Name: "to",
|
||||||
Usage: "copy snapshots to the specified storage",
|
Usage: "copy snapshots to the specified storage",
|
||||||
Argument: "<storage name>",
|
Argument: "<storage name>",
|
||||||
},
|
},
|
||||||
cli.IntFlag {
|
cli.IntFlag{
|
||||||
Name: "download-limit-rate",
|
Name: "download-limit-rate",
|
||||||
Value: 0,
|
Value: 0,
|
||||||
Usage: "the maximum download rate (in kilobytes/sec)",
|
Usage: "the maximum download rate (in kilobytes/sec)",
|
||||||
Argument: "<kB/s>",
|
Argument: "<kB/s>",
|
||||||
},
|
},
|
||||||
cli.IntFlag {
|
cli.IntFlag{
|
||||||
Name: "upload-limit-rate",
|
Name: "upload-limit-rate",
|
||||||
Value: 0,
|
Value: 0,
|
||||||
Usage: "the maximum upload rate (in kilobytes/sec)",
|
Usage: "the maximum upload rate (in kilobytes/sec)",
|
||||||
Argument: "<kB/s>",
|
Argument: "<kB/s>",
|
||||||
},
|
},
|
||||||
cli.IntFlag {
|
cli.IntFlag{
|
||||||
Name: "threads",
|
Name: "threads",
|
||||||
Value: 1,
|
Value: 1,
|
||||||
Usage: "number of downloading threads",
|
Usage: "number of downloading threads",
|
||||||
@@ -1664,17 +1660,17 @@ func main() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
Name: "info",
|
Name: "info",
|
||||||
Flags: []cli.Flag {
|
Flags: []cli.Flag{
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "encrypt, e",
|
Name: "encrypt, e",
|
||||||
Usage: "The storage is encrypted with a password",
|
Usage: "The storage is encrypted with a password",
|
||||||
},
|
},
|
||||||
cli.StringFlag {
|
cli.StringFlag{
|
||||||
Name: "repository",
|
Name: "repository",
|
||||||
Usage: "retrieve saved passwords from the specified repository",
|
Usage: "retrieve saved passwords from the specified repository",
|
||||||
Argument: "<repository directory>",
|
Argument: "<repository directory>",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "reset-passwords",
|
Name: "reset-passwords",
|
||||||
Usage: "take passwords from input rather than keychain/keyring",
|
Usage: "take passwords from input rather than keychain/keyring",
|
||||||
},
|
},
|
||||||
@@ -1683,31 +1679,30 @@ func main() {
|
|||||||
ArgsUsage: "<storage url>",
|
ArgsUsage: "<storage url>",
|
||||||
Action: infoStorage,
|
Action: infoStorage,
|
||||||
},
|
},
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
app.Flags = []cli.Flag {
|
app.Flags = []cli.Flag{
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "verbose, v",
|
Name: "verbose, v",
|
||||||
Usage: "show more detailed information",
|
Usage: "show more detailed information",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "debug, d",
|
Name: "debug, d",
|
||||||
Usage: "show even more detailed information, useful for debugging",
|
Usage: "show even more detailed information, useful for debugging",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "log",
|
Name: "log",
|
||||||
Usage: "enable log-style output",
|
Usage: "enable log-style output",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "stack",
|
Name: "stack",
|
||||||
Usage: "print the stack trace when an error occurs",
|
Usage: "print the stack trace when an error occurs",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "no-script",
|
Name: "no-script",
|
||||||
Usage: "do not run script before or after command execution",
|
Usage: "do not run script before or after command execution",
|
||||||
},
|
},
|
||||||
cli.BoolFlag {
|
cli.BoolFlag{
|
||||||
Name: "background",
|
Name: "background",
|
||||||
Usage: "read passwords, tokens, or keys only from keychain/keyring or env",
|
Usage: "read passwords, tokens, or keys only from keychain/keyring or env",
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -5,16 +5,16 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"sync"
|
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"io/ioutil"
|
||||||
"mime/multipart"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"mime/multipart"
|
||||||
|
"net/http"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
@@ -29,6 +29,7 @@ func (err ACDError) Error() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var ACDRefreshTokenURL = "https://duplicacy.com/acd_refresh"
|
var ACDRefreshTokenURL = "https://duplicacy.com/acd_refresh"
|
||||||
|
|
||||||
type ACDClient struct {
|
type ACDClient struct {
|
||||||
HTTPClient *http.Client
|
HTTPClient *http.Client
|
||||||
|
|
||||||
@@ -42,7 +43,6 @@ type ACDClient struct {
|
|||||||
TestMode bool
|
TestMode bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func NewACDClient(tokenFile string) (*ACDClient, error) {
|
func NewACDClient(tokenFile string) (*ACDClient, error) {
|
||||||
|
|
||||||
description, err := ioutil.ReadFile(tokenFile)
|
description, err := ioutil.ReadFile(tokenFile)
|
||||||
@@ -106,7 +106,7 @@ func (client *ACDClient) call(url string, method string, input interface{}, cont
|
|||||||
|
|
||||||
if url != ACDRefreshTokenURL {
|
if url != ACDRefreshTokenURL {
|
||||||
client.TokenLock.Lock()
|
client.TokenLock.Lock()
|
||||||
request.Header.Set("Authorization", "Bearer " + client.Token.AccessToken)
|
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
|
||||||
client.TokenLock.Unlock()
|
client.TokenLock.Unlock()
|
||||||
}
|
}
|
||||||
if contentType != "" {
|
if contentType != "" {
|
||||||
@@ -126,20 +126,20 @@ func (client *ACDClient) call(url string, method string, input interface{}, cont
|
|||||||
buffer := new(bytes.Buffer)
|
buffer := new(bytes.Buffer)
|
||||||
buffer.ReadFrom(response.Body)
|
buffer.ReadFrom(response.Body)
|
||||||
response.Body.Close()
|
response.Body.Close()
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: buffer.String()}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: buffer.String()}
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.StatusCode == 400 {
|
if response.StatusCode == 400 {
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
|
|
||||||
e := &ACDError {
|
e := &ACDError{
|
||||||
Status: response.StatusCode,
|
Status: response.StatusCode,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.NewDecoder(response.Body).Decode(e); err == nil {
|
if err := json.NewDecoder(response.Body).Decode(e); err == nil {
|
||||||
return nil, 0, e
|
return nil, 0, e
|
||||||
} else {
|
} else {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Bad input parameter"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Bad input parameter"}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,7 +148,7 @@ func (client *ACDClient) call(url string, method string, input interface{}, cont
|
|||||||
if response.StatusCode == 401 {
|
if response.StatusCode == 401 {
|
||||||
|
|
||||||
if url == ACDRefreshTokenURL {
|
if url == ACDRefreshTokenURL {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Unauthorized"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Unauthorized"}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.RefreshToken()
|
err = client.RefreshToken()
|
||||||
@@ -158,15 +158,15 @@ func (client *ACDClient) call(url string, method string, input interface{}, cont
|
|||||||
|
|
||||||
continue
|
continue
|
||||||
} else if response.StatusCode == 403 {
|
} else if response.StatusCode == 403 {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Forbidden"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Forbidden"}
|
||||||
} else if response.StatusCode == 404 {
|
} else if response.StatusCode == 404 {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Resource not found"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Resource not found"}
|
||||||
} else if response.StatusCode == 409 {
|
} else if response.StatusCode == 409 {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Conflict"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Conflict"}
|
||||||
} else if response.StatusCode == 411 {
|
} else if response.StatusCode == 411 {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Length required"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Length required"}
|
||||||
} else if response.StatusCode == 412 {
|
} else if response.StatusCode == 412 {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Precondition failed"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Precondition failed"}
|
||||||
} else if response.StatusCode == 429 || response.StatusCode == 500 {
|
} else if response.StatusCode == 429 || response.StatusCode == 500 {
|
||||||
reason := "Too many requests"
|
reason := "Too many requests"
|
||||||
if response.StatusCode == 500 {
|
if response.StatusCode == 500 {
|
||||||
@@ -178,9 +178,9 @@ func (client *ACDClient) call(url string, method string, input interface{}, cont
|
|||||||
backoff *= 2
|
backoff *= 2
|
||||||
continue
|
continue
|
||||||
} else if response.StatusCode == 503 {
|
} else if response.StatusCode == 503 {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Service unavailable"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Service unavailable"}
|
||||||
} else {
|
} else {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Unknown error"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Unknown error"}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -231,7 +231,7 @@ func (client *ACDClient) GetEndpoint() (err error) {
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := &ACDGetEndpointOutput {}
|
output := &ACDGetEndpointOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -287,7 +287,7 @@ func (client *ACDClient) ListEntries(parentID string, listFiles bool) ([]ACDEntr
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := &ACDListEntriesOutput {}
|
output := &ACDListEntriesOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -321,7 +321,7 @@ func (client *ACDClient) ListByName(parentID string, name string) (string, bool,
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := &ACDListEntriesOutput {}
|
output := &ACDListEntriesOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
@@ -351,7 +351,7 @@ func (client *ACDClient) UploadFile(parentID string, name string, content []byte
|
|||||||
metadata := make(map[string]interface{})
|
metadata := make(map[string]interface{})
|
||||||
metadata["name"] = name
|
metadata["name"] = name
|
||||||
metadata["kind"] = "FILE"
|
metadata["kind"] = "FILE"
|
||||||
metadata["parents"] = []string{ parentID }
|
metadata["parents"] = []string{parentID}
|
||||||
|
|
||||||
metadataJSON, err := json.Marshal(metadata)
|
metadataJSON, err := json.Marshal(metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -389,7 +389,7 @@ func (client *ACDClient) UploadFile(parentID string, name string, content []byte
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
entry := ACDEntry {}
|
entry := ACDEntry{}
|
||||||
if err = json.NewDecoder(readCloser).Decode(&entry); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&entry); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@@ -434,7 +434,7 @@ func (client *ACDClient) CreateDirectory(parentID string, name string) (string,
|
|||||||
parameters := make(map[string]interface{})
|
parameters := make(map[string]interface{})
|
||||||
parameters["name"] = name
|
parameters["name"] = name
|
||||||
parameters["kind"] = "FOLDER"
|
parameters["kind"] = "FOLDER"
|
||||||
parameters["parents"] = []string {parentID}
|
parameters["parents"] = []string{parentID}
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "POST", parameters, "")
|
readCloser, _, err := client.call(url, "POST", parameters, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -443,7 +443,7 @@ func (client *ACDClient) CreateDirectory(parentID string, name string) (string,
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
entry := ACDEntry {}
|
entry := ACDEntry{}
|
||||||
if err = json.NewDecoder(readCloser).Decode(&entry); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&entry); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,11 +5,11 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@@ -81,7 +81,7 @@ func TestACDClient(t *testing.T) {
|
|||||||
maxFileSize := 64 * 1024
|
maxFileSize := 64 * 1024
|
||||||
|
|
||||||
for i := 0; i < numberOfFiles; i++ {
|
for i := 0; i < numberOfFiles; i++ {
|
||||||
content := make([]byte, rand.Int() % maxFileSize + 1)
|
content := make([]byte, rand.Int()%maxFileSize+1)
|
||||||
_, err = crypto_rand.Read(content)
|
_, err = crypto_rand.Read(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error generating random content: %v", err)
|
t.Errorf("Error generating random content: %v", err)
|
||||||
|
|||||||
@@ -28,7 +28,7 @@ func CreateACDStorage(tokenFile string, storagePath string, threads int) (storag
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &ACDStorage {
|
storage = &ACDStorage{
|
||||||
client: client,
|
client: client,
|
||||||
idCache: make(map[string]string),
|
idCache: make(map[string]string),
|
||||||
idCacheLock: &sync.Mutex{},
|
idCacheLock: &sync.Mutex{},
|
||||||
@@ -42,7 +42,7 @@ func CreateACDStorage(tokenFile string, storagePath string, threads int) (storag
|
|||||||
|
|
||||||
storage.idCache[""] = storagePathID
|
storage.idCache[""] = storagePathID
|
||||||
|
|
||||||
for _, dir := range []string { "chunks", "fossils", "snapshots" } {
|
for _, dir := range []string{"chunks", "fossils", "snapshots"} {
|
||||||
dirID, isDir, _, err := client.ListByName(storagePathID, dir)
|
dirID, isDir, _, err := client.ListByName(storagePathID, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -53,7 +53,7 @@ func CreateACDStorage(tokenFile string, storagePath string, threads int) (storag
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else if !isDir {
|
} else if !isDir {
|
||||||
return nil, fmt.Errorf("%s/%s is not a directory", storagePath + "/" + dir)
|
return nil, fmt.Errorf("%s/%s is not a directory", storagePath+"/"+dir)
|
||||||
}
|
}
|
||||||
storage.idCache[dir] = dirID
|
storage.idCache[dir] = dirID
|
||||||
}
|
}
|
||||||
@@ -88,10 +88,9 @@ func (storage *ACDStorage) deletePathID(path string) {
|
|||||||
storage.idCacheLock.Unlock()
|
storage.idCacheLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (storage *ACDStorage) convertFilePath(filePath string) string {
|
||||||
func (storage *ACDStorage) convertFilePath(filePath string) (string) {
|
|
||||||
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
||||||
return "fossils/" + filePath[len("chunks/"):len(filePath) - len(".fsl")]
|
return "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
||||||
}
|
}
|
||||||
return filePath
|
return filePath
|
||||||
}
|
}
|
||||||
@@ -113,13 +112,13 @@ func (storage *ACDStorage) getIDFromPath(threadIndex int, path string) (fileID s
|
|||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
}
|
}
|
||||||
if parentID == "" {
|
if parentID == "" {
|
||||||
if i == len(names) - 1 {
|
if i == len(names)-1 {
|
||||||
return "", false, 0, nil
|
return "", false, 0, nil
|
||||||
} else {
|
} else {
|
||||||
return "", false, 0, fmt.Errorf("File path '%s' does not exist", path)
|
return "", false, 0, fmt.Errorf("File path '%s' does not exist", path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if i != len(names) - 1 && !isDir {
|
if i != len(names)-1 && !isDir {
|
||||||
return "", false, 0, fmt.Errorf("Invalid path %s", path)
|
return "", false, 0, fmt.Errorf("Invalid path %s", path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -131,8 +130,8 @@ func (storage *ACDStorage) getIDFromPath(threadIndex int, path string) (fileID s
|
|||||||
func (storage *ACDStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
func (storage *ACDStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "snapshots" {
|
if dir == "snapshots" {
|
||||||
@@ -146,7 +145,7 @@ func (storage *ACDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
storage.savePathID(entry.Name, entry.ID)
|
storage.savePathID(entry.Name, entry.ID)
|
||||||
subDirs = append(subDirs, entry.Name + "/")
|
subDirs = append(subDirs, entry.Name+"/")
|
||||||
}
|
}
|
||||||
return subDirs, nil, nil
|
return subDirs, nil, nil
|
||||||
} else if strings.HasPrefix(dir, "snapshots/") {
|
} else if strings.HasPrefix(dir, "snapshots/") {
|
||||||
@@ -170,27 +169,26 @@ func (storage *ACDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
files := []string{}
|
files := []string{}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
storage.savePathID(dir + "/" + entry.Name, entry.ID)
|
storage.savePathID(dir+"/"+entry.Name, entry.ID)
|
||||||
files = append(files, entry.Name)
|
files = append(files, entry.Name)
|
||||||
}
|
}
|
||||||
return files, nil, nil
|
return files, nil, nil
|
||||||
} else {
|
} else {
|
||||||
files := []string{}
|
files := []string{}
|
||||||
sizes := []int64{}
|
sizes := []int64{}
|
||||||
for _, parent := range []string {"chunks", "fossils" } {
|
for _, parent := range []string{"chunks", "fossils"} {
|
||||||
entries, err := storage.client.ListEntries(storage.getPathID(parent), true)
|
entries, err := storage.client.ListEntries(storage.getPathID(parent), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
name := entry.Name
|
name := entry.Name
|
||||||
if parent == "fossils" {
|
if parent == "fossils" {
|
||||||
name += ".fsl"
|
name += ".fsl"
|
||||||
}
|
}
|
||||||
|
|
||||||
storage.savePathID(parent + "/" + entry.Name, entry.ID)
|
storage.savePathID(parent+"/"+entry.Name, entry.ID)
|
||||||
files = append(files, name)
|
files = append(files, name)
|
||||||
sizes = append(sizes, entry.Size)
|
sizes = append(sizes, entry.Size)
|
||||||
}
|
}
|
||||||
@@ -259,8 +257,8 @@ func (storage *ACDStorage) MoveFile(threadIndex int, from string, to string) (er
|
|||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *ACDStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
func (storage *ACDStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "chunks" || dir == "snapshots" {
|
if dir == "chunks" || dir == "snapshots" {
|
||||||
@@ -288,8 +286,8 @@ func (storage *ACDStorage) CreateDirectory(threadIndex int, dir string) (err err
|
|||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *ACDStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *ACDStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
for len(filePath) > 0 && filePath[len(filePath) - 1] == '/' {
|
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
|
||||||
filePath = filePath[:len(filePath) - 1]
|
filePath = filePath[:len(filePath)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
filePath = storage.convertFilePath(filePath)
|
filePath = storage.convertFilePath(filePath)
|
||||||
@@ -348,7 +346,7 @@ func (storage *ACDStorage) DownloadFile(threadIndex int, filePath string, chunk
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / storage.numberOfThreads)
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -373,7 +371,7 @@ func (storage *ACDStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
storage.savePathID(parent, parentID)
|
storage.savePathID(parent, parentID)
|
||||||
}
|
}
|
||||||
|
|
||||||
fileID, err := storage.client.UploadFile(parentID, path.Base(filePath), content, storage.UploadRateLimit / storage.numberOfThreads)
|
fileID, err := storage.client.UploadFile(parentID, path.Base(filePath), content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
storage.savePathID(filePath, fileID)
|
storage.savePathID(filePath, fileID)
|
||||||
return nil
|
return nil
|
||||||
@@ -389,16 +387,16 @@ func (storage *ACDStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *ACDStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *ACDStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *ACDStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *ACDStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *ACDStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *ACDStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *ACDStorage) IsFastListing() (bool) { return true }
|
func (storage *ACDStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *ACDStorage) EnableTestMode() {}
|
func (storage *ACDStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -43,7 +43,7 @@ func CreateAzureStorage(accountName string, accountKey string,
|
|||||||
return nil, fmt.Errorf("container %s does not exist", containerName)
|
return nil, fmt.Errorf("container %s does not exist", containerName)
|
||||||
}
|
}
|
||||||
|
|
||||||
azureStorage = &AzureStorage {
|
azureStorage = &AzureStorage{
|
||||||
containers: containers,
|
containers: containers,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -62,12 +62,12 @@ func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files
|
|||||||
Timeout uint
|
Timeout uint
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dir) > 0 && dir[len(dir) - 1] != '/' {
|
if len(dir) > 0 && dir[len(dir)-1] != '/' {
|
||||||
dir += "/"
|
dir += "/"
|
||||||
}
|
}
|
||||||
dirLength := len(dir)
|
dirLength := len(dir)
|
||||||
|
|
||||||
parameters := storage.ListBlobsParameters {
|
parameters := storage.ListBlobsParameters{
|
||||||
Prefix: dir,
|
Prefix: dir,
|
||||||
Delimiter: "",
|
Delimiter: "",
|
||||||
}
|
}
|
||||||
@@ -84,7 +84,7 @@ func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files
|
|||||||
if dir == "snapshots/" {
|
if dir == "snapshots/" {
|
||||||
for _, blob := range results.Blobs {
|
for _, blob := range results.Blobs {
|
||||||
name := strings.Split(blob.Name[dirLength:], "/")[0]
|
name := strings.Split(blob.Name[dirLength:], "/")[0]
|
||||||
subDirs[name + "/"] = true
|
subDirs[name+"/"] = true
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for _, blob := range results.Blobs {
|
for _, blob := range results.Blobs {
|
||||||
@@ -175,13 +175,13 @@ func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chun
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.containers))
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.containers))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.containers))
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.containers))
|
||||||
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||||
return blob.CreateBlockBlobFromReader(reader, nil)
|
return blob.CreateBlockBlobFromReader(reader, nil)
|
||||||
|
|
||||||
@@ -189,16 +189,16 @@ func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, conten
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *AzureStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *AzureStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *AzureStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *AzureStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *AzureStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *AzureStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *AzureStorage) IsFastListing() (bool) { return true }
|
func (storage *AzureStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *AzureStorage) EnableTestMode() {}
|
func (storage *AzureStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -5,19 +5,19 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"strconv"
|
"crypto/sha1"
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"io/ioutil"
|
||||||
"strings"
|
|
||||||
"crypto/sha1"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type B2Error struct {
|
type B2Error struct {
|
||||||
@@ -51,7 +51,6 @@ type B2Client struct {
|
|||||||
UploadToken string
|
UploadToken string
|
||||||
|
|
||||||
TestMode bool
|
TestMode bool
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewB2Client(accountID string, applicationKey string) *B2Client {
|
func NewB2Client(accountID string, applicationKey string) *B2Client {
|
||||||
@@ -111,7 +110,7 @@ func (client *B2Client) call(url string, input interface{}) (io.ReadCloser, int6
|
|||||||
}
|
}
|
||||||
|
|
||||||
if url == B2AuthorizationURL {
|
if url == B2AuthorizationURL {
|
||||||
request.Header.Set("Authorization", "Basic " + base64.StdEncoding.EncodeToString([]byte(client.AccountID + ":" + client.ApplicationKey)))
|
request.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(client.AccountID+":"+client.ApplicationKey)))
|
||||||
} else {
|
} else {
|
||||||
request.Header.Set("Authorization", client.AuthorizationToken)
|
request.Header.Set("Authorization", client.AuthorizationToken)
|
||||||
}
|
}
|
||||||
@@ -168,8 +167,7 @@ func (client *B2Client) call(url string, input interface{}) (io.ReadCloser, int6
|
|||||||
|
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
|
|
||||||
e := &B2Error {
|
e := &B2Error{}
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
|
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
@@ -197,7 +195,7 @@ func (client *B2Client) AuthorizeAccount() (err error) {
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := &B2AuthorizeAccountOutput {}
|
output := &B2AuthorizeAccountOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -299,8 +297,7 @@ func (client *B2Client) ListFileNames(startFileName string, singleFile bool, inc
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := B2ListFileNamesOutput {
|
output := B2ListFileNamesOutput{}
|
||||||
}
|
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -382,7 +379,7 @@ func (client *B2Client) HideFile(fileName string) (fileID string, err error) {
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := & B2HideFileOutput {}
|
output := &B2HideFileOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
@@ -405,7 +402,7 @@ type B2GetUploadArgumentOutput struct {
|
|||||||
AuthorizationToken string
|
AuthorizationToken string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) getUploadURL() (error) {
|
func (client *B2Client) getUploadURL() error {
|
||||||
input := make(map[string]string)
|
input := make(map[string]string)
|
||||||
input["bucketId"] = client.BucketID
|
input["bucketId"] = client.BucketID
|
||||||
|
|
||||||
@@ -417,7 +414,7 @@ func (client *B2Client) getUploadURL() (error) {
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := & B2GetUploadArgumentOutput {}
|
output := &B2GetUploadArgumentOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -431,7 +428,6 @@ func (client *B2Client) getUploadURL() (error) {
|
|||||||
|
|
||||||
func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit int) (err error) {
|
func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit int) (err error) {
|
||||||
|
|
||||||
|
|
||||||
hasher := sha1.New()
|
hasher := sha1.New()
|
||||||
hasher.Write(content)
|
hasher.Write(content)
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
@@ -517,4 +513,3 @@ func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit in
|
|||||||
|
|
||||||
return fmt.Errorf("Maximum backoff reached")
|
return fmt.Errorf("Maximum backoff reached")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,15 +5,15 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
)
|
)
|
||||||
|
|
||||||
func createB2ClientForTest(t *testing.T) (*B2Client, string) {
|
func createB2ClientForTest(t *testing.T) (*B2Client, string) {
|
||||||
@@ -79,7 +79,7 @@ func TestB2Client(t *testing.T) {
|
|||||||
|
|
||||||
maxSize := 10000
|
maxSize := 10000
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 20; i++ {
|
||||||
size := rand.Int() % maxSize + 1
|
size := rand.Int()%maxSize + 1
|
||||||
content := make([]byte, size)
|
content := make([]byte, size)
|
||||||
_, err := crypto_rand.Read(content)
|
_, err := crypto_rand.Read(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -90,7 +90,7 @@ func TestB2Client(t *testing.T) {
|
|||||||
hash := sha256.Sum256(content)
|
hash := sha256.Sum256(content)
|
||||||
name := hex.EncodeToString(hash[:])
|
name := hex.EncodeToString(hash[:])
|
||||||
|
|
||||||
err = b2Client.UploadFile(testDirectory + name, content, 100)
|
err = b2Client.UploadFile(testDirectory+name, content, 100)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error uploading file '%s': %v", name, err)
|
t.Errorf("Error uploading file '%s': %v", name, err)
|
||||||
return
|
return
|
||||||
@@ -118,7 +118,7 @@ func TestB2Client(t *testing.T) {
|
|||||||
|
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
if testDirectory + hash != file.FileName {
|
if testDirectory+hash != file.FileName {
|
||||||
t.Errorf("File %s has hash %s", file.FileName, hash)
|
t.Errorf("File %s has hash %s", file.FileName, hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ func CreateB2Storage(accountID string, applicationKey string, bucket string, thr
|
|||||||
clients = append(clients, client)
|
clients = append(clients, client)
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &B2Storage {
|
storage = &B2Storage{
|
||||||
clients: clients,
|
clients: clients,
|
||||||
}
|
}
|
||||||
return storage, nil
|
return storage, nil
|
||||||
@@ -43,8 +43,8 @@ func CreateB2Storage(accountID string, applicationKey string, bucket string, thr
|
|||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
length := len(dir) + 1
|
length := len(dir) + 1
|
||||||
|
|
||||||
@@ -65,7 +65,7 @@ func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string
|
|||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
name := entry.FileName[length:]
|
name := entry.FileName[length:]
|
||||||
subDir := strings.Split(name, "/")[0]
|
subDir := strings.Split(name, "/")[0]
|
||||||
subDirs[subDir + "/"] = true
|
subDirs[subDir+"/"] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
for subDir, _ := range subDirs {
|
for subDir, _ := range subDirs {
|
||||||
@@ -79,7 +79,7 @@ func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string
|
|||||||
}
|
}
|
||||||
lastFile = entry.FileName
|
lastFile = entry.FileName
|
||||||
if entry.Action == "hide" {
|
if entry.Action == "hide" {
|
||||||
files = append(files, entry.FileName[length:] + ".fsl")
|
files = append(files, entry.FileName[length:]+".fsl")
|
||||||
} else {
|
} else {
|
||||||
files = append(files, entry.FileName[length:])
|
files = append(files, entry.FileName[length:])
|
||||||
}
|
}
|
||||||
@@ -98,7 +98,7 @@ func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string
|
|||||||
func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
|
||||||
if strings.HasSuffix(filePath, ".fsl") {
|
if strings.HasSuffix(filePath, ".fsl") {
|
||||||
filePath = filePath[:len(filePath) - len(".fsl")]
|
filePath = filePath[:len(filePath)-len(".fsl")]
|
||||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
|
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -107,7 +107,7 @@ func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err erro
|
|||||||
toBeDeleted := false
|
toBeDeleted := false
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.FileName != filePath || (!toBeDeleted && entry.Action != "hide" ) {
|
if entry.FileName != filePath || (!toBeDeleted && entry.Action != "hide") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -141,12 +141,12 @@ func (storage *B2Storage) MoveFile(threadIndex int, from string, to string) (err
|
|||||||
|
|
||||||
if strings.HasSuffix(from, ".fsl") {
|
if strings.HasSuffix(from, ".fsl") {
|
||||||
filePath = to
|
filePath = to
|
||||||
if from != to + ".fsl" {
|
if from != to+".fsl" {
|
||||||
filePath = ""
|
filePath = ""
|
||||||
}
|
}
|
||||||
} else if strings.HasSuffix(to, ".fsl") {
|
} else if strings.HasSuffix(to, ".fsl") {
|
||||||
filePath = from
|
filePath = from
|
||||||
if to != from + ".fsl" {
|
if to != from+".fsl" {
|
||||||
filePath = ""
|
filePath = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -182,7 +182,7 @@ func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist b
|
|||||||
isFossil := false
|
isFossil := false
|
||||||
if strings.HasSuffix(filePath, ".fsl") {
|
if strings.HasSuffix(filePath, ".fsl") {
|
||||||
isFossil = true
|
isFossil = true
|
||||||
filePath = filePath[:len(filePath) - len(".fsl")]
|
filePath = filePath[:len(filePath)-len(".fsl")]
|
||||||
}
|
}
|
||||||
|
|
||||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, isFossil)
|
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, isFossil)
|
||||||
@@ -225,27 +225,27 @@ func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.clients))
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.clients))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
return storage.clients[threadIndex].UploadFile(filePath, content, storage.UploadRateLimit / len(storage.clients))
|
return storage.clients[threadIndex].UploadFile(filePath, content, storage.UploadRateLimit/len(storage.clients))
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *B2Storage) IsCacheNeeded() (bool) { return true }
|
func (storage *B2Storage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *B2Storage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *B2Storage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *B2Storage) IsStrongConsistent() (bool) { return true }
|
func (storage *B2Storage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *B2Storage) IsFastListing() (bool) { return true }
|
func (storage *B2Storage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *B2Storage) EnableTestMode() {
|
func (storage *B2Storage) EnableTestMode() {
|
||||||
|
|||||||
@@ -5,21 +5,21 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"os"
|
"encoding/hex"
|
||||||
"io"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"time"
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"strings"
|
"time"
|
||||||
"strconv"
|
|
||||||
"runtime"
|
|
||||||
"encoding/hex"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// BackupManager performs the two major operations, backup and restore, and passes other operations, mostly related to
|
// BackupManager performs the two major operations, backup and restore, and passes other operations, mostly related to
|
||||||
@@ -35,12 +35,10 @@ type BackupManager struct {
|
|||||||
config *Config // contains a number of options
|
config *Config // contains a number of options
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func (manager *BackupManager) SetDryRun(dryRun bool) {
|
func (manager *BackupManager) SetDryRun(dryRun bool) {
|
||||||
manager.config.dryRun = dryRun
|
manager.config.dryRun = dryRun
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
|
// CreateBackupManager creates a backup manager using the specified 'storage'. 'snapshotID' is a unique id to
|
||||||
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
|
// identify snapshots created for this repository. 'top' is the top directory of the repository. 'password' is the
|
||||||
// master key which can be nil if encryption is not enabled.
|
// master key which can be nil if encryption is not enabled.
|
||||||
@@ -58,7 +56,7 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
|
|||||||
|
|
||||||
snapshotManager := CreateSnapshotManager(config, storage)
|
snapshotManager := CreateSnapshotManager(config, storage)
|
||||||
|
|
||||||
backupManager := &BackupManager {
|
backupManager := &BackupManager{
|
||||||
snapshotID: snapshotID,
|
snapshotID: snapshotID,
|
||||||
storage: storage,
|
storage: storage,
|
||||||
|
|
||||||
@@ -87,7 +85,7 @@ func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, subdir := range [] string { "chunks", "snapshots" } {
|
for _, subdir := range []string{"chunks", "snapshots"} {
|
||||||
err := os.Mkdir(path.Join(cacheDir, subdir), 0744)
|
err := os.Mkdir(path.Join(cacheDir, subdir), 0744)
|
||||||
if err != nil && !os.IsExist(err) {
|
if err != nil && !os.IsExist(err) {
|
||||||
LOG_ERROR("BACKUP_CACHE", "Failed to create the snapshot cache subdir: %v", err)
|
LOG_ERROR("BACKUP_CACHE", "Failed to create the snapshot cache subdir: %v", err)
|
||||||
@@ -106,7 +104,7 @@ func (manager *BackupManager) SetupSnapshotCache(storageName string) bool {
|
|||||||
//
|
//
|
||||||
// This function assumes the Size field of each entry is equal to the length of the chunk content that belong
|
// This function assumes the Size field of each entry is equal to the length of the chunk content that belong
|
||||||
// to the file.
|
// to the file.
|
||||||
func setEntryContent(entries[] *Entry, chunkLengths[]int, offset int) {
|
func setEntryContent(entries []*Entry, chunkLengths []int, offset int) {
|
||||||
if len(entries) == 0 {
|
if len(entries) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -120,7 +118,7 @@ func setEntryContent(entries[] *Entry, chunkLengths[]int, offset int) {
|
|||||||
entries[i].StartOffset = 0
|
entries[i].StartOffset = 0
|
||||||
for j, length := range chunkLengths {
|
for j, length := range chunkLengths {
|
||||||
|
|
||||||
for totalChunkSize + int64(length) >= totalFileSize {
|
for totalChunkSize+int64(length) >= totalFileSize {
|
||||||
entries[i].EndChunk = j + offset
|
entries[i].EndChunk = j + offset
|
||||||
entries[i].EndOffset = int(totalFileSize - totalChunkSize)
|
entries[i].EndOffset = int(totalFileSize - totalChunkSize)
|
||||||
|
|
||||||
@@ -131,7 +129,7 @@ func setEntryContent(entries[] *Entry, chunkLengths[]int, offset int) {
|
|||||||
|
|
||||||
// If the current file ends at the end of the current chunk, the next file will
|
// If the current file ends at the end of the current chunk, the next file will
|
||||||
// start at the next chunk
|
// start at the next chunk
|
||||||
if totalChunkSize + int64(length) == totalFileSize {
|
if totalChunkSize+int64(length) == totalFileSize {
|
||||||
entries[i].StartChunk = j + 1 + offset
|
entries[i].StartChunk = j + 1 + offset
|
||||||
entries[i].StartOffset = 0
|
entries[i].StartOffset = 0
|
||||||
} else {
|
} else {
|
||||||
@@ -217,7 +215,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
allChunks, _ := manager.SnapshotManager.ListAllFiles(manager.storage, "chunks/")
|
allChunks, _ := manager.SnapshotManager.ListAllFiles(manager.storage, "chunks/")
|
||||||
|
|
||||||
for _, chunk := range allChunks {
|
for _, chunk := range allChunks {
|
||||||
if len(chunk) == 0 || chunk[len(chunk) - 1] == '/' {
|
if len(chunk) == 0 || chunk[len(chunk)-1] == '/' {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -230,7 +228,6 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if incompleteSnapshot != nil {
|
if incompleteSnapshot != nil {
|
||||||
|
|
||||||
// This is the last chunk from the incomplete snapshot that can be found in the cache
|
// This is the last chunk from the incomplete snapshot that can be found in the cache
|
||||||
@@ -256,8 +253,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
incompleteSnapshot.Files = files
|
incompleteSnapshot.Files = files
|
||||||
|
|
||||||
// Remove incomplete chunks (they may not have been uploaded)
|
// Remove incomplete chunks (they may not have been uploaded)
|
||||||
incompleteSnapshot.ChunkHashes = incompleteSnapshot.ChunkHashes[:lastCompleteChunk + 1]
|
incompleteSnapshot.ChunkHashes = incompleteSnapshot.ChunkHashes[:lastCompleteChunk+1]
|
||||||
incompleteSnapshot.ChunkLengths = incompleteSnapshot.ChunkLengths[:lastCompleteChunk + 1]
|
incompleteSnapshot.ChunkLengths = incompleteSnapshot.ChunkLengths[:lastCompleteChunk+1]
|
||||||
remoteSnapshot = incompleteSnapshot
|
remoteSnapshot = incompleteSnapshot
|
||||||
LOG_INFO("FILE_SKIP", "Skipped %d files from previous incomplete backup", len(files))
|
LOG_INFO("FILE_SKIP", "Skipped %d files from previous incomplete backup", len(files))
|
||||||
}
|
}
|
||||||
@@ -276,8 +273,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
var totalModifiedFileSize int64 // total size of modified files
|
var totalModifiedFileSize int64 // total size of modified files
|
||||||
var uploadedModifiedFileSize int64 // portions that have been uploaded (including cache hits)
|
var uploadedModifiedFileSize int64 // portions that have been uploaded (including cache hits)
|
||||||
|
|
||||||
var modifiedEntries [] *Entry // Files that has been modified or newly created
|
var modifiedEntries []*Entry // Files that has been modified or newly created
|
||||||
var preservedEntries [] *Entry // Files unchanges
|
var preservedEntries []*Entry // Files unchanges
|
||||||
|
|
||||||
// If the quick mode is disable and there isn't an incomplete snapshot from last (failed) backup,
|
// If the quick mode is disable and there isn't an incomplete snapshot from last (failed) backup,
|
||||||
// we simply treat all files as if they were new, and break them into chunks.
|
// we simply treat all files as if they were new, and break them into chunks.
|
||||||
@@ -361,7 +358,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
entry.EndChunk -= deletedChunks
|
entry.EndChunk -= deletedChunks
|
||||||
}
|
}
|
||||||
|
|
||||||
var uploadedEntries [] *Entry
|
var uploadedEntries []*Entry
|
||||||
var uploadedChunkHashes []string
|
var uploadedChunkHashes []string
|
||||||
var uploadedChunkLengths []int
|
var uploadedChunkLengths []int
|
||||||
var uploadedChunkLock = &sync.Mutex{}
|
var uploadedChunkLock = &sync.Mutex{}
|
||||||
@@ -398,7 +395,6 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
LOG_INFO("SNAPSHOT_FAIL", "Will abort the backup on chunk %d", chunkToFail)
|
LOG_INFO("SNAPSHOT_FAIL", "Will abort the backup on chunk %d", chunkToFail)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
chunkMaker := CreateChunkMaker(manager.config, false)
|
chunkMaker := CreateChunkMaker(manager.config, false)
|
||||||
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, threads, nil)
|
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, threads, nil)
|
||||||
|
|
||||||
@@ -472,11 +468,11 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
speed := uploadedModifiedFileSize / (now - startUploadingTime)
|
speed := uploadedModifiedFileSize / (now - startUploadingTime)
|
||||||
remainingTime := int64(0)
|
remainingTime := int64(0)
|
||||||
if speed > 0 {
|
if speed > 0 {
|
||||||
remainingTime = (totalModifiedFileSize - uploadedModifiedFileSize) / speed + 1
|
remainingTime = (totalModifiedFileSize-uploadedModifiedFileSize)/speed + 1
|
||||||
}
|
}
|
||||||
percentage := float32(uploadedModifiedFileSize * 1000 / totalModifiedFileSize)
|
percentage := float32(uploadedModifiedFileSize * 1000 / totalModifiedFileSize)
|
||||||
LOG_INFO("UPLOAD_PROGRESS", "%s chunk %d size %d, %sB/s %s %.1f%%", action, chunkIndex,
|
LOG_INFO("UPLOAD_PROGRESS", "%s chunk %d size %d, %sB/s %s %.1f%%", action, chunkIndex,
|
||||||
chunkSize, PrettySize(speed), PrettyTime(remainingTime), percentage / 10)
|
chunkSize, PrettySize(speed), PrettyTime(remainingTime), percentage/10)
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic.AddInt64(&numberOfCollectedChunks, 1)
|
atomic.AddInt64(&numberOfCollectedChunks, 1)
|
||||||
@@ -488,7 +484,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
// Break files into chunks
|
// Break files into chunks
|
||||||
chunkMaker.ForEachChunk(
|
chunkMaker.ForEachChunk(
|
||||||
fileReader.CurrentFile,
|
fileReader.CurrentFile,
|
||||||
func (chunk *Chunk, final bool) {
|
func(chunk *Chunk, final bool) {
|
||||||
|
|
||||||
hash := chunk.GetHash()
|
hash := chunk.GetHash()
|
||||||
chunkID := chunk.GetID()
|
chunkID := chunk.GetID()
|
||||||
@@ -498,7 +494,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
|
|
||||||
_, found := chunkCache[chunkID]
|
_, found := chunkCache[chunkID]
|
||||||
if found {
|
if found {
|
||||||
if time.Now().Unix() - lastUploadingTime > keepUploadAlive {
|
if time.Now().Unix()-lastUploadingTime > keepUploadAlive {
|
||||||
LOG_INFO("UPLOAD_KEEPALIVE", "Skip chunk cache to keep connection alive")
|
LOG_INFO("UPLOAD_KEEPALIVE", "Skip chunk cache to keep connection alive")
|
||||||
found = false
|
found = false
|
||||||
}
|
}
|
||||||
@@ -524,7 +520,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
}
|
}
|
||||||
|
|
||||||
},
|
},
|
||||||
func (fileSize int64, hash string) (io.Reader, bool) {
|
func(fileSize int64, hash string) (io.Reader, bool) {
|
||||||
|
|
||||||
// Must lock here because the RunAtError function called by other threads may access uploadedEntries
|
// Must lock here because the RunAtError function called by other threads may access uploadedEntries
|
||||||
uploadedChunkLock.Lock()
|
uploadedChunkLock.Lock()
|
||||||
@@ -647,8 +643,8 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
if showStatistics {
|
if showStatistics {
|
||||||
|
|
||||||
LOG_INFO("BACKUP_STATS", "Files: %d total, %s bytes; %d new, %s bytes",
|
LOG_INFO("BACKUP_STATS", "Files: %d total, %s bytes; %d new, %s bytes",
|
||||||
len(preservedEntries) + len(uploadedEntries),
|
len(preservedEntries)+len(uploadedEntries),
|
||||||
PrettyNumber(preservedFileSize + uploadedFileSize),
|
PrettyNumber(preservedFileSize+uploadedFileSize),
|
||||||
len(uploadedEntries), PrettyNumber(uploadedFileSize))
|
len(uploadedEntries), PrettyNumber(uploadedFileSize))
|
||||||
|
|
||||||
LOG_INFO("BACKUP_STATS", "File chunks: %d total, %s bytes; %d new, %s bytes, %s bytes uploaded",
|
LOG_INFO("BACKUP_STATS", "File chunks: %d total, %s bytes; %d new, %s bytes, %s bytes uploaded",
|
||||||
@@ -662,17 +658,17 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
PrettyNumber(totalUploadedSnapshotChunkBytes))
|
PrettyNumber(totalUploadedSnapshotChunkBytes))
|
||||||
|
|
||||||
LOG_INFO("BACKUP_STATS", "All chunks: %d total, %s bytes; %d new, %s bytes, %s bytes uploaded",
|
LOG_INFO("BACKUP_STATS", "All chunks: %d total, %s bytes; %d new, %s bytes, %s bytes uploaded",
|
||||||
len(localSnapshot.ChunkHashes) + totalSnapshotChunks,
|
len(localSnapshot.ChunkHashes)+totalSnapshotChunks,
|
||||||
PrettyNumber(totalFileChunkLength + totalSnapshotChunkLength),
|
PrettyNumber(totalFileChunkLength+totalSnapshotChunkLength),
|
||||||
int(numberOfNewFileChunks) + numberOfNewSnapshotChunks,
|
int(numberOfNewFileChunks)+numberOfNewSnapshotChunks,
|
||||||
PrettyNumber(totalUploadedFileChunkLength + totalUploadedSnapshotChunkLength),
|
PrettyNumber(totalUploadedFileChunkLength+totalUploadedSnapshotChunkLength),
|
||||||
PrettyNumber(totalUploadedFileChunkBytes + totalUploadedSnapshotChunkBytes))
|
PrettyNumber(totalUploadedFileChunkBytes+totalUploadedSnapshotChunkBytes))
|
||||||
|
|
||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
if now == startTime {
|
if now == startTime {
|
||||||
now = startTime + 1
|
now = startTime + 1
|
||||||
}
|
}
|
||||||
LOG_INFO("BACKUP_STATS", "Total running time: %s", PrettyTime(now - startTime))
|
LOG_INFO("BACKUP_STATS", "Total running time: %s", PrettyTime(now-startTime))
|
||||||
}
|
}
|
||||||
|
|
||||||
skipped := ""
|
skipped := ""
|
||||||
@@ -696,7 +692,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(skipped) > 0 {
|
if len(skipped) > 0 {
|
||||||
if len(skippedDirectories) + len(skippedFiles) == 1 {
|
if len(skippedDirectories)+len(skippedFiles) == 1 {
|
||||||
skipped += " was"
|
skipped += " was"
|
||||||
} else {
|
} else {
|
||||||
skipped += " were"
|
skipped += " were"
|
||||||
@@ -715,7 +711,7 @@ func (manager *BackupManager) Backup(top string, quickMode bool, threads int, ta
|
|||||||
// the same as 'top'. 'quickMode' will bypass files with unchanged sizes and timestamps. 'deleteMode' will
|
// the same as 'top'. 'quickMode' will bypass files with unchanged sizes and timestamps. 'deleteMode' will
|
||||||
// remove local files that don't exist in the snapshot. 'patterns' is used to include/exclude certain files.
|
// remove local files that don't exist in the snapshot. 'patterns' is used to include/exclude certain files.
|
||||||
func (manager *BackupManager) Restore(top string, revision int, inPlace bool, quickMode bool, threads int, overwrite bool,
|
func (manager *BackupManager) Restore(top string, revision int, inPlace bool, quickMode bool, threads int, overwrite bool,
|
||||||
deleteMode bool, showStatistics bool, patterns [] string) bool {
|
deleteMode bool, showStatistics bool, patterns []string) bool {
|
||||||
|
|
||||||
startTime := time.Now().Unix()
|
startTime := time.Now().Unix()
|
||||||
|
|
||||||
@@ -736,7 +732,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
_, err := os.Stat(top)
|
_, err := os.Stat(top)
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
err = os.Mkdir(top, 0744)
|
err = os.Mkdir(top, 0744)
|
||||||
if err != nil{
|
if err != nil {
|
||||||
LOG_ERROR("RESTORE_MKDIR", "Can't create the directory to be restored: %v", err)
|
LOG_ERROR("RESTORE_MKDIR", "Can't create the directory to be restored: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -760,7 +756,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
|
|
||||||
LOG_INFO("RESTORE_START", "Restoring %s to revision %d", top, revision)
|
LOG_INFO("RESTORE_START", "Restoring %s to revision %d", top, revision)
|
||||||
|
|
||||||
var includedFiles [] *Entry
|
var includedFiles []*Entry
|
||||||
|
|
||||||
// Include/exclude some files if needed
|
// Include/exclude some files if needed
|
||||||
if len(patterns) > 0 {
|
if len(patterns) > 0 {
|
||||||
@@ -781,7 +777,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
var extraFiles []string
|
var extraFiles []string
|
||||||
|
|
||||||
// These will store files to be downloaded.
|
// These will store files to be downloaded.
|
||||||
fileEntries := make([]*Entry, 0, len(remoteSnapshot.Files) / 2)
|
fileEntries := make([]*Entry, 0, len(remoteSnapshot.Files)/2)
|
||||||
|
|
||||||
var totalFileSize int64
|
var totalFileSize int64
|
||||||
var downloadedFileSize int64
|
var downloadedFileSize int64
|
||||||
@@ -817,7 +813,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
if entry.IsLink() {
|
if entry.IsLink() {
|
||||||
stat, err := os.Lstat(fullPath)
|
stat, err := os.Lstat(fullPath)
|
||||||
if stat != nil {
|
if stat != nil {
|
||||||
if stat.Mode() & os.ModeSymlink != 0 {
|
if stat.Mode()&os.ModeSymlink != 0 {
|
||||||
isRegular, link, err := Readlink(fullPath)
|
isRegular, link, err := Readlink(fullPath)
|
||||||
if err == nil && link == entry.Link && !isRegular {
|
if err == nil && link == entry.Link && !isRegular {
|
||||||
continue
|
continue
|
||||||
@@ -900,7 +896,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
|
|
||||||
// Handle zero size files.
|
// Handle zero size files.
|
||||||
if file.Size == 0 {
|
if file.Size == 0 {
|
||||||
newFile, err := os.OpenFile(fullPath, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, file.GetPermissions())
|
newFile, err := os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.GetPermissions())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_OPEN", "Failed to create empty file: %v", err)
|
LOG_ERROR("DOWNLOAD_OPEN", "Failed to create empty file: %v", err)
|
||||||
return false
|
return false
|
||||||
@@ -924,11 +920,10 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
|
|||||||
file.RestoreMetadata(fullPath, nil)
|
file.RestoreMetadata(fullPath, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if deleteMode && len(patterns) == 0 {
|
if deleteMode && len(patterns) == 0 {
|
||||||
// Reverse the order to make sure directories are empty before being deleted
|
// Reverse the order to make sure directories are empty before being deleted
|
||||||
for i := range extraFiles {
|
for i := range extraFiles {
|
||||||
file := extraFiles[len(extraFiles) - 1 - i]
|
file := extraFiles[len(extraFiles)-1-i]
|
||||||
fullPath := joinPath(top, file)
|
fullPath := joinPath(top, file)
|
||||||
os.Remove(fullPath)
|
os.Remove(fullPath)
|
||||||
LOG_INFO("RESTORE_DELETE", "Deleted %s", file)
|
LOG_INFO("RESTORE_DELETE", "Deleted %s", file)
|
||||||
@@ -988,7 +983,7 @@ func (encoder *fileEncoder) NextFile() (io.Reader, bool) {
|
|||||||
if encoder.currentIndex == len(encoder.files) {
|
if encoder.currentIndex == len(encoder.files) {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
if encoder.currentIndex == len(encoder.files) - 1 {
|
if encoder.currentIndex == len(encoder.files)-1 {
|
||||||
encoder.buffer.Write([]byte("]"))
|
encoder.buffer.Write([]byte("]"))
|
||||||
encoder.currentIndex++
|
encoder.currentIndex++
|
||||||
return encoder, true
|
return encoder, true
|
||||||
@@ -1019,7 +1014,7 @@ func (encoder *fileEncoder) NextFile() (io.Reader, bool) {
|
|||||||
// UploadSnapshot uploads the specified snapshot to the storage. It turns Files, ChunkHashes, and ChunkLengths into
|
// UploadSnapshot uploads the specified snapshot to the storage. It turns Files, ChunkHashes, and ChunkLengths into
|
||||||
// sequences of chunks, and uploads these chunks, and finally the snapshot file.
|
// sequences of chunks, and uploads these chunks, and finally the snapshot file.
|
||||||
func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *ChunkUploader, top string, snapshot *Snapshot,
|
func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *ChunkUploader, top string, snapshot *Snapshot,
|
||||||
chunkCache map[string]bool, ) ( totalSnapshotChunkSize int64,
|
chunkCache map[string]bool) (totalSnapshotChunkSize int64,
|
||||||
numberOfNewSnapshotChunks int, totalUploadedSnapshotChunkSize int64,
|
numberOfNewSnapshotChunks int, totalUploadedSnapshotChunkSize int64,
|
||||||
totalUploadedSnapshotChunkBytes int64) {
|
totalUploadedSnapshotChunkBytes int64) {
|
||||||
|
|
||||||
@@ -1046,10 +1041,10 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
|
|||||||
|
|
||||||
// uploadSequenceFunc uploads chunks read from 'reader'.
|
// uploadSequenceFunc uploads chunks read from 'reader'.
|
||||||
uploadSequenceFunc := func(reader io.Reader,
|
uploadSequenceFunc := func(reader io.Reader,
|
||||||
nextReader func(size int64, hash string)(io.Reader, bool)) (sequence[]string) {
|
nextReader func(size int64, hash string) (io.Reader, bool)) (sequence []string) {
|
||||||
|
|
||||||
chunkMaker.ForEachChunk(reader,
|
chunkMaker.ForEachChunk(reader,
|
||||||
func (chunk *Chunk, final bool) {
|
func(chunk *Chunk, final bool) {
|
||||||
totalSnapshotChunkSize += int64(chunk.GetLength())
|
totalSnapshotChunkSize += int64(chunk.GetLength())
|
||||||
chunkID := chunk.GetID()
|
chunkID := chunk.GetID()
|
||||||
if _, found := chunkCache[chunkID]; found {
|
if _, found := chunkCache[chunkID]; found {
|
||||||
@@ -1064,7 +1059,7 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
|
|||||||
return sequence
|
return sequence
|
||||||
}
|
}
|
||||||
|
|
||||||
sequences := []string { "chunks", "lengths" }
|
sequences := []string{"chunks", "lengths"}
|
||||||
// The file list is assumed not to be too large when fixed-size chunking is used
|
// The file list is assumed not to be too large when fixed-size chunking is used
|
||||||
if chunkMaker.minimumChunkSize == chunkMaker.maximumChunkSize {
|
if chunkMaker.minimumChunkSize == chunkMaker.maximumChunkSize {
|
||||||
sequences = append(sequences, "files")
|
sequences = append(sequences, "files")
|
||||||
@@ -1081,7 +1076,7 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
|
|||||||
}
|
}
|
||||||
|
|
||||||
sequence := uploadSequenceFunc(bytes.NewReader(contents),
|
sequence := uploadSequenceFunc(bytes.NewReader(contents),
|
||||||
func (fileSize int64, hash string) (io.Reader, bool) {
|
func(fileSize int64, hash string) (io.Reader, bool) {
|
||||||
return nil, false
|
return nil, false
|
||||||
})
|
})
|
||||||
snapshot.SetSequence(sequenceType, sequence)
|
snapshot.SetSequence(sequenceType, sequence)
|
||||||
@@ -1090,7 +1085,7 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
|
|||||||
// File sequence may be too big to fit into the memory. So we encode files one by one and take advantages of
|
// File sequence may be too big to fit into the memory. So we encode files one by one and take advantages of
|
||||||
// the multi-reader capability of the chunk maker.
|
// the multi-reader capability of the chunk maker.
|
||||||
if chunkMaker.minimumChunkSize != chunkMaker.maximumChunkSize {
|
if chunkMaker.minimumChunkSize != chunkMaker.maximumChunkSize {
|
||||||
encoder := fileEncoder {
|
encoder := fileEncoder{
|
||||||
top: top,
|
top: top,
|
||||||
readAttributes: snapshot.discardAttributes,
|
readAttributes: snapshot.discardAttributes,
|
||||||
files: snapshot.Files,
|
files: snapshot.Files,
|
||||||
@@ -1100,7 +1095,7 @@ func (manager *BackupManager) UploadSnapshot(chunkMaker *ChunkMaker, uploader *C
|
|||||||
|
|
||||||
encoder.buffer.Write([]byte("["))
|
encoder.buffer.Write([]byte("["))
|
||||||
sequence := uploadSequenceFunc(encoder,
|
sequence := uploadSequenceFunc(encoder,
|
||||||
func (fileSize int64, hash string) (io.Reader, bool) {
|
func(fileSize int64, hash string) (io.Reader, bool) {
|
||||||
return encoder.NextFile()
|
return encoder.NextFile()
|
||||||
})
|
})
|
||||||
snapshot.SetSequence("files", sequence)
|
snapshot.SetSequence("files", sequence)
|
||||||
@@ -1147,24 +1142,24 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
if temporaryPath != fullPath {
|
if temporaryPath != fullPath {
|
||||||
os.Remove(temporaryPath)
|
os.Remove(temporaryPath)
|
||||||
}
|
}
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
// These are used to break the existing file into chunks.
|
// These are used to break the existing file into chunks.
|
||||||
var existingChunks [] string
|
var existingChunks []string
|
||||||
var existingLengths [] int
|
var existingLengths []int
|
||||||
|
|
||||||
// These are to enable fast lookup of what chunks are available in the existing file.
|
// These are to enable fast lookup of what chunks are available in the existing file.
|
||||||
offsetMap := make(map[string] int64)
|
offsetMap := make(map[string]int64)
|
||||||
lengthMap := make(map[string] int)
|
lengthMap := make(map[string]int)
|
||||||
var offset int64
|
var offset int64
|
||||||
|
|
||||||
existingFile, err = os.Open(fullPath)
|
existingFile, err = os.Open(fullPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
// macOS has no sparse file support
|
// macOS has no sparse file support
|
||||||
if inPlace && entry.Size > 100 * 1024 * 1024 && runtime.GOOS != "darwin" {
|
if inPlace && entry.Size > 100*1024*1024 && runtime.GOOS != "darwin" {
|
||||||
// Create an empty sparse file
|
// Create an empty sparse file
|
||||||
existingFile, err = os.OpenFile(fullPath, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0600)
|
existingFile, err = os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing: %v", fullPath, err)
|
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing: %v", fullPath, err)
|
||||||
return false
|
return false
|
||||||
@@ -1173,10 +1168,10 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
n := int64(1)
|
n := int64(1)
|
||||||
// There is a go bug on Windows (https://github.com/golang/go/issues/21681) that causes Seek to fail
|
// There is a go bug on Windows (https://github.com/golang/go/issues/21681) that causes Seek to fail
|
||||||
// if the lower 32 bit of the offset argument is 0xffffffff. Therefore we need to avoid that value by increasing n.
|
// if the lower 32 bit of the offset argument is 0xffffffff. Therefore we need to avoid that value by increasing n.
|
||||||
if uint32(entry.Size) == 0 && (entry.Size >> 32) > 0 {
|
if uint32(entry.Size) == 0 && (entry.Size>>32) > 0 {
|
||||||
n = int64(2)
|
n = int64(2)
|
||||||
}
|
}
|
||||||
_, err = existingFile.Seek(entry.Size - n, 0)
|
_, err = existingFile.Seek(entry.Size-n, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to resize the initial file %s for in-place writing: %v", fullPath, err)
|
LOG_ERROR("DOWNLOAD_CREATE", "Failed to resize the initial file %s for in-place writing: %v", fullPath, err)
|
||||||
return false
|
return false
|
||||||
@@ -1211,11 +1206,11 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
// In inplace mode, we only consider chunks in the existing file with the same offsets, so we
|
// In inplace mode, we only consider chunks in the existing file with the same offsets, so we
|
||||||
// break the original file at offsets retrieved from the backup
|
// break the original file at offsets retrieved from the backup
|
||||||
fileHasher := manager.config.NewFileHasher()
|
fileHasher := manager.config.NewFileHasher()
|
||||||
buffer := make([]byte, 64 * 1024)
|
buffer := make([]byte, 64*1024)
|
||||||
err = nil
|
err = nil
|
||||||
// We set to read one more byte so the file hash will be different if the file to be restored is a
|
// We set to read one more byte so the file hash will be different if the file to be restored is a
|
||||||
// truncated portion of the existing file
|
// truncated portion of the existing file
|
||||||
for i := entry.StartChunk; i <= entry.EndChunk + 1; i++ {
|
for i := entry.StartChunk; i <= entry.EndChunk+1; i++ {
|
||||||
hasher := manager.config.NewKeyedHasher(manager.config.HashKey)
|
hasher := manager.config.NewKeyedHasher(manager.config.HashKey)
|
||||||
chunkSize := 1 // the size of extra chunk beyond EndChunk
|
chunkSize := 1 // the size of extra chunk beyond EndChunk
|
||||||
if i == entry.StartChunk {
|
if i == entry.StartChunk {
|
||||||
@@ -1264,7 +1259,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
// we run the chunk maker to split the original file.
|
// we run the chunk maker to split the original file.
|
||||||
chunkMaker.ForEachChunk(
|
chunkMaker.ForEachChunk(
|
||||||
existingFile,
|
existingFile,
|
||||||
func (chunk *Chunk, final bool) {
|
func(chunk *Chunk, final bool) {
|
||||||
hash := chunk.GetHash()
|
hash := chunk.GetHash()
|
||||||
chunkSize := chunk.GetLength()
|
chunkSize := chunk.GetLength()
|
||||||
existingChunks = append(existingChunks, hash)
|
existingChunks = append(existingChunks, hash)
|
||||||
@@ -1273,7 +1268,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
lengthMap[hash] = chunkSize
|
lengthMap[hash] = chunkSize
|
||||||
offset += int64(chunkSize)
|
offset += int64(chunkSize)
|
||||||
},
|
},
|
||||||
func (fileSize int64, hash string) (io.Reader, bool) {
|
func(fileSize int64, hash string) (io.Reader, bool) {
|
||||||
fileHash = hash
|
fileHash = hash
|
||||||
return nil, false
|
return nil, false
|
||||||
})
|
})
|
||||||
@@ -1298,7 +1293,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
|
|
||||||
if existingFile == nil {
|
if existingFile == nil {
|
||||||
// Create an empty file
|
// Create an empty file
|
||||||
existingFile, err = os.OpenFile(fullPath, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0600)
|
existingFile, err = os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing", fullPath)
|
LOG_ERROR("DOWNLOAD_CREATE", "Failed to create the file %s for in-place writing", fullPath)
|
||||||
}
|
}
|
||||||
@@ -1357,12 +1352,12 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
chunk := chunkDownloader.WaitForChunk(i)
|
chunk := chunkDownloader.WaitForChunk(i)
|
||||||
_, err = existingFile.Write(chunk.GetBytes()[start: end])
|
_, err = existingFile.Write(chunk.GetBytes()[start:end])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_WRITE", "Failed to write to the file: %v", err)
|
LOG_ERROR("DOWNLOAD_WRITE", "Failed to write to the file: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
hasher.Write(chunk.GetBytes()[start: end])
|
hasher.Write(chunk.GetBytes()[start:end])
|
||||||
}
|
}
|
||||||
|
|
||||||
offset += int64(end - start)
|
offset += int64(end - start)
|
||||||
@@ -1385,7 +1380,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
} else {
|
} else {
|
||||||
|
|
||||||
// Create the temporary file.
|
// Create the temporary file.
|
||||||
newFile, err = os.OpenFile(temporaryPath, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0600)
|
newFile, err = os.OpenFile(temporaryPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open file for writing: %v", err)
|
LOG_ERROR("DOWNLOAD_OPEN", "Failed to open file for writing: %v", err)
|
||||||
return false
|
return false
|
||||||
@@ -1435,7 +1430,7 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
if i == entry.EndChunk {
|
if i == entry.EndChunk {
|
||||||
end = entry.EndOffset
|
end = entry.EndOffset
|
||||||
}
|
}
|
||||||
data = chunk.GetBytes()[start: end]
|
data = chunk.GetBytes()[start:end]
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = newFile.Write(data)
|
_, err = newFile.Write(data)
|
||||||
@@ -1455,7 +1450,6 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if existingFile != nil {
|
if existingFile != nil {
|
||||||
existingFile.Close()
|
existingFile.Close()
|
||||||
existingFile = nil
|
existingFile = nil
|
||||||
@@ -1508,8 +1502,8 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
revisionMap[snapshotID][revision] = true
|
revisionMap[snapshotID][revision] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
var snapshots [] *Snapshot
|
var snapshots []*Snapshot
|
||||||
var snapshotIDs [] string
|
var snapshotIDs []string
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if snapshotID == "" {
|
if snapshotID == "" {
|
||||||
@@ -1519,7 +1513,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
snapshotIDs = [] string { snapshotID }
|
snapshotIDs = []string{snapshotID}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, id := range snapshotIDs {
|
for _, id := range snapshotIDs {
|
||||||
@@ -1643,7 +1637,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_DEBUG("SNAPSHOT_COPY", "Chunks to copy = %d, to skip = %d, total = %d", chunksToCopy, chunksToSkip, chunksToCopy + chunksToSkip)
|
LOG_DEBUG("SNAPSHOT_COPY", "Chunks to copy = %d, to skip = %d, total = %d", chunksToCopy, chunksToSkip, chunksToCopy+chunksToSkip)
|
||||||
LOG_DEBUG("SNAPSHOT_COPY", "Total chunks in source snapshot revisions = %d\n", len(chunks))
|
LOG_DEBUG("SNAPSHOT_COPY", "Total chunks in source snapshot revisions = %d\n", len(chunks))
|
||||||
|
|
||||||
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, false, threads)
|
chunkDownloader := CreateChunkDownloader(manager.config, manager.storage, nil, false, threads)
|
||||||
@@ -1686,7 +1680,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
|
|||||||
chunkDownloader.Stop()
|
chunkDownloader.Stop()
|
||||||
chunkUploader.Stop()
|
chunkUploader.Stop()
|
||||||
|
|
||||||
LOG_INFO("SNAPSHOT_COPY", "Copy complete, %d total chunks, %d chunks copied, %d skipped", totalCopied + totalSkipped, totalCopied, totalSkipped)
|
LOG_INFO("SNAPSHOT_COPY", "Copy complete, %d total chunks, %d chunks copied, %d skipped", totalCopied+totalSkipped, totalCopied, totalSkipped)
|
||||||
|
|
||||||
for _, snapshot := range snapshots {
|
for _, snapshot := range snapshots {
|
||||||
if revisionMap[snapshot.ID][snapshot.Revision] == false {
|
if revisionMap[snapshot.ID][snapshot.Revision] == false {
|
||||||
|
|||||||
@@ -5,21 +5,21 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
crypto_rand "crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
"io"
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"testing"
|
"testing"
|
||||||
"math/rand"
|
|
||||||
"encoding/hex"
|
|
||||||
"time"
|
"time"
|
||||||
"crypto/sha256"
|
|
||||||
crypto_rand "crypto/rand"
|
|
||||||
|
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
)
|
)
|
||||||
|
|
||||||
func createRandomFile(path string, maxSize int) {
|
func createRandomFile(path string, maxSize int) {
|
||||||
file, err := os.OpenFile(path, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0644)
|
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("RANDOM_FILE", "Can't open %s for writing: %v", path, err)
|
LOG_ERROR("RANDOM_FILE", "Can't open %s for writing: %v", path, err)
|
||||||
return
|
return
|
||||||
@@ -27,9 +27,9 @@ func createRandomFile(path string, maxSize int) {
|
|||||||
|
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
size := maxSize / 2 + rand.Int() % (maxSize / 2)
|
size := maxSize/2 + rand.Int()%(maxSize/2)
|
||||||
|
|
||||||
buffer := make([]byte, 32 * 1024)
|
buffer := make([]byte, 32*1024)
|
||||||
for size > 0 {
|
for size > 0 {
|
||||||
bytes := size
|
bytes := size
|
||||||
if bytes > cap(buffer) {
|
if bytes > cap(buffer) {
|
||||||
@@ -65,7 +65,7 @@ func modifyFile(path string, portion float32) {
|
|||||||
if file != nil {
|
if file != nil {
|
||||||
file.Close()
|
file.Close()
|
||||||
}
|
}
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
size, err := file.Seek(0, 2)
|
size, err := file.Seek(0, 2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -73,7 +73,7 @@ func modifyFile(path string, portion float32) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
length := int (float32(size) * portion)
|
length := int(float32(size) * portion)
|
||||||
start := rand.Int() % (int(size) - length)
|
start := rand.Int() % (int(size) - length)
|
||||||
|
|
||||||
_, err = file.Seek(int64(start), 0)
|
_, err = file.Seek(int64(start), 0)
|
||||||
@@ -186,28 +186,28 @@ func TestBackupManager(t *testing.T) {
|
|||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
testDir := path.Join(os.TempDir(), "duplicacy_test")
|
testDir := path.Join(os.TempDir(), "duplicacy_test")
|
||||||
os.RemoveAll(testDir)
|
os.RemoveAll(testDir)
|
||||||
os.MkdirAll(testDir, 0700)
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
os.Mkdir(testDir + "/repository1", 0700)
|
os.Mkdir(testDir+"/repository1", 0700)
|
||||||
os.Mkdir(testDir + "/repository1/dir1", 0700)
|
os.Mkdir(testDir+"/repository1/dir1", 0700)
|
||||||
os.Mkdir(testDir + "/repository1/.duplicacy", 0700)
|
os.Mkdir(testDir+"/repository1/.duplicacy", 0700)
|
||||||
os.Mkdir(testDir + "/repository2", 0700)
|
os.Mkdir(testDir+"/repository2", 0700)
|
||||||
os.Mkdir(testDir + "/repository2/.duplicacy", 0700)
|
os.Mkdir(testDir+"/repository2/.duplicacy", 0700)
|
||||||
|
|
||||||
maxFileSize := 1000000
|
maxFileSize := 1000000
|
||||||
//maxFileSize := 200000
|
//maxFileSize := 200000
|
||||||
|
|
||||||
createRandomFile(testDir + "/repository1/file1", maxFileSize)
|
createRandomFile(testDir+"/repository1/file1", maxFileSize)
|
||||||
createRandomFile(testDir + "/repository1/file2", maxFileSize)
|
createRandomFile(testDir+"/repository1/file2", maxFileSize)
|
||||||
createRandomFile(testDir + "/repository1/dir1/file3", maxFileSize)
|
createRandomFile(testDir+"/repository1/dir1/file3", maxFileSize)
|
||||||
|
|
||||||
threads := 1
|
threads := 1
|
||||||
|
|
||||||
storage, err := loadStorage(testDir + "/storage", threads)
|
storage, err := loadStorage(testDir+"/storage", threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create storage: %v", err)
|
t.Errorf("Failed to create storage: %v", err)
|
||||||
return
|
return
|
||||||
@@ -227,16 +227,15 @@ func TestBackupManager(t *testing.T) {
|
|||||||
|
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
if testFixedChunkSize {
|
if testFixedChunkSize {
|
||||||
if !ConfigStorage(storage, 100, 64 * 1024, 64 * 1024, 64 * 1024, password, nil) {
|
if !ConfigStorage(storage, 100, 64*1024, 64*1024, 64*1024, password, nil) {
|
||||||
t.Errorf("Failed to initialize the storage")
|
t.Errorf("Failed to initialize the storage")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !ConfigStorage(storage, 100, 64 * 1024, 256 * 1024, 16 * 1024, password, nil) {
|
if !ConfigStorage(storage, 100, 64*1024, 256*1024, 16*1024, password, nil) {
|
||||||
t.Errorf("Failed to initialize the storage")
|
t.Errorf("Failed to initialize the storage")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
@@ -244,13 +243,13 @@ func TestBackupManager(t *testing.T) {
|
|||||||
backupManager.SetupSnapshotCache("default")
|
backupManager.SetupSnapshotCache("default")
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "first", false, false)
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false)
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir + "/repository2", threads, /*inPlace=*/false, /*quickMode=*/false, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
/*deleteMode=*/ false /*showStatistics=*/, false /*patterns=*/, nil)
|
||||||
|
|
||||||
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
|
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
|
||||||
t.Errorf("File %s does not exist", f)
|
t.Errorf("File %s does not exist", f)
|
||||||
continue
|
continue
|
||||||
@@ -263,18 +262,18 @@ func TestBackupManager(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
modifyFile(testDir + "/repository1/file1", 0.1)
|
modifyFile(testDir+"/repository1/file1", 0.1)
|
||||||
modifyFile(testDir + "/repository1/file2", 0.2)
|
modifyFile(testDir+"/repository1/file2", 0.2)
|
||||||
modifyFile(testDir + "/repository1/dir1/file3", 0.3)
|
modifyFile(testDir+"/repository1/dir1/file3", 0.3)
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "second", false, false)
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false)
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir + "/repository2", 2, /*inPlace=*/true, /*quickMode=*/true, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
/*deleteMode=*/ false /*showStatistics=*/, false /*patterns=*/, nil)
|
||||||
|
|
||||||
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
hash2 := getFileHash(testDir + "/repository2/" + f)
|
hash2 := getFileHash(testDir + "/repository2/" + f)
|
||||||
if hash1 != hash2 {
|
if hash1 != hash2 {
|
||||||
@@ -284,25 +283,25 @@ func TestBackupManager(t *testing.T) {
|
|||||||
|
|
||||||
// Truncate file2 and add a few empty directories
|
// Truncate file2 and add a few empty directories
|
||||||
truncateFile(testDir + "/repository1/file2")
|
truncateFile(testDir + "/repository1/file2")
|
||||||
os.Mkdir(testDir + "/repository1/dir2", 0700)
|
os.Mkdir(testDir+"/repository1/dir2", 0700)
|
||||||
os.Mkdir(testDir + "/repository1/dir2/dir3", 0700)
|
os.Mkdir(testDir+"/repository1/dir2/dir3", 0700)
|
||||||
os.Mkdir(testDir + "/repository1/dir4", 0700)
|
os.Mkdir(testDir+"/repository1/dir4", 0700)
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/false, threads, "third", false, false)
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "third", false, false)
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
// Create some directories and files under repository2 that will be deleted during restore
|
// Create some directories and files under repository2 that will be deleted during restore
|
||||||
os.Mkdir(testDir + "/repository2/dir5", 0700)
|
os.Mkdir(testDir+"/repository2/dir5", 0700)
|
||||||
os.Mkdir(testDir + "/repository2/dir5/dir6", 0700)
|
os.Mkdir(testDir+"/repository2/dir5/dir6", 0700)
|
||||||
os.Mkdir(testDir + "/repository2/dir7", 0700)
|
os.Mkdir(testDir+"/repository2/dir7", 0700)
|
||||||
createRandomFile(testDir + "/repository2/file4", 100)
|
createRandomFile(testDir+"/repository2/file4", 100)
|
||||||
createRandomFile(testDir + "/repository2/dir5/file5", 100)
|
createRandomFile(testDir+"/repository2/dir5/file5", 100)
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
backupManager.Restore(testDir + "/repository2", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/true, /*showStatistics=*/false, /*patterns=*/nil)
|
/*deleteMode=*/ true /*showStatistics=*/, false /*patterns=*/, nil)
|
||||||
|
|
||||||
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
hash2 := getFileHash(testDir + "/repository2/" + f)
|
hash2 := getFileHash(testDir + "/repository2/" + f)
|
||||||
if hash1 != hash2 {
|
if hash1 != hash2 {
|
||||||
@@ -311,25 +310,25 @@ func TestBackupManager(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// These files/dirs should not exist because deleteMode == true
|
// These files/dirs should not exist because deleteMode == true
|
||||||
checkExistence(t, testDir + "/repository2/dir5", false, false);
|
checkExistence(t, testDir+"/repository2/dir5", false, false)
|
||||||
checkExistence(t, testDir + "/repository2/dir5/dir6", false, false);
|
checkExistence(t, testDir+"/repository2/dir5/dir6", false, false)
|
||||||
checkExistence(t, testDir + "/repository2/dir7", false, false);
|
checkExistence(t, testDir+"/repository2/dir7", false, false)
|
||||||
checkExistence(t, testDir + "/repository2/file4", false, false);
|
checkExistence(t, testDir+"/repository2/file4", false, false)
|
||||||
checkExistence(t, testDir + "/repository2/dir5/file5", false, false);
|
checkExistence(t, testDir+"/repository2/dir5/file5", false, false)
|
||||||
|
|
||||||
// These empty dirs should exist
|
// These empty dirs should exist
|
||||||
checkExistence(t, testDir + "/repository2/dir2", true, true);
|
checkExistence(t, testDir+"/repository2/dir2", true, true)
|
||||||
checkExistence(t, testDir + "/repository2/dir2/dir3", true, true);
|
checkExistence(t, testDir+"/repository2/dir2/dir3", true, true)
|
||||||
checkExistence(t, testDir + "/repository2/dir4", true, true);
|
checkExistence(t, testDir+"/repository2/dir4", true, true)
|
||||||
|
|
||||||
// Remove file2 and dir1/file3 and restore them from revision 3
|
// Remove file2 and dir1/file3 and restore them from revision 3
|
||||||
os.Remove(testDir + "/repository1/file2")
|
os.Remove(testDir + "/repository1/file2")
|
||||||
os.Remove(testDir + "/repository1/dir1/file3")
|
os.Remove(testDir + "/repository1/dir1/file3")
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager.Restore(testDir + "/repository1", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/[]string{"+file2", "+dir1/file3", "-*"})
|
/*deleteMode=*/ false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"})
|
||||||
|
|
||||||
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
hash2 := getFileHash(testDir + "/repository2/" + f)
|
hash2 := getFileHash(testDir + "/repository2/" + f)
|
||||||
if hash1 != hash2 {
|
if hash1 != hash2 {
|
||||||
|
|||||||
@@ -5,28 +5,27 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"fmt"
|
|
||||||
"hash"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"runtime"
|
"compress/zlib"
|
||||||
"crypto/cipher"
|
|
||||||
"crypto/aes"
|
"crypto/aes"
|
||||||
|
"crypto/cipher"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"compress/zlib"
|
"fmt"
|
||||||
|
"hash"
|
||||||
|
"io"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
"github.com/bkaradzic/go-lz4"
|
"github.com/bkaradzic/go-lz4"
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
|
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
|
||||||
// we maintain a pool of previously used buffers.
|
// we maintain a pool of previously used buffers.
|
||||||
var chunkBufferPool chan *bytes.Buffer = make(chan *bytes.Buffer, runtime.NumCPU() * 16)
|
var chunkBufferPool chan *bytes.Buffer = make(chan *bytes.Buffer, runtime.NumCPU()*16)
|
||||||
|
|
||||||
func AllocateChunkBuffer() (buffer *bytes.Buffer) {
|
func AllocateChunkBuffer() (buffer *bytes.Buffer) {
|
||||||
select {
|
select {
|
||||||
case buffer = <- chunkBufferPool:
|
case buffer = <-chunkBufferPool:
|
||||||
default:
|
default:
|
||||||
buffer = new(bytes.Buffer)
|
buffer = new(bytes.Buffer)
|
||||||
}
|
}
|
||||||
@@ -76,9 +75,9 @@ func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Chunk {
|
return &Chunk{
|
||||||
buffer : buffer,
|
buffer: buffer,
|
||||||
config : config,
|
config: config,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -92,7 +91,7 @@ func (chunk *Chunk) GetLength() int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetBytes returns data available in this chunk
|
// GetBytes returns data available in this chunk
|
||||||
func (chunk *Chunk) GetBytes() [] byte {
|
func (chunk *Chunk) GetBytes() []byte {
|
||||||
return chunk.buffer.Bytes()
|
return chunk.buffer.Bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -114,7 +113,7 @@ func (chunk *Chunk) Reset(hashNeeded bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Write implements the Writer interface.
|
// Write implements the Writer interface.
|
||||||
func (chunk *Chunk) Write(p []byte) (int, error){
|
func (chunk *Chunk) Write(p []byte) (int, error) {
|
||||||
|
|
||||||
// buffer may be nil, when the chunk is used for computing the hash only.
|
// buffer may be nil, when the chunk is used for computing the hash only.
|
||||||
if chunk.buffer == nil {
|
if chunk.buffer == nil {
|
||||||
@@ -132,7 +131,7 @@ func (chunk *Chunk) Write(p []byte) (int, error){
|
|||||||
|
|
||||||
// GetHash returns the chunk hash.
|
// GetHash returns the chunk hash.
|
||||||
func (chunk *Chunk) GetHash() string {
|
func (chunk *Chunk) GetHash() string {
|
||||||
if (len(chunk.hash) == 0) {
|
if len(chunk.hash) == 0 {
|
||||||
chunk.hash = chunk.hasher.Sum(nil)
|
chunk.hash = chunk.hasher.Sum(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -179,7 +178,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
encryptedBuffer.Reset()
|
encryptedBuffer.Reset()
|
||||||
defer func() {
|
defer func() {
|
||||||
ReleaseChunkBuffer(encryptedBuffer)
|
ReleaseChunkBuffer(encryptedBuffer)
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
if len(encryptionKey) > 0 {
|
if len(encryptionKey) > 0 {
|
||||||
|
|
||||||
@@ -229,7 +228,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
if availableLength < maximumLength {
|
if availableLength < maximumLength {
|
||||||
encryptedBuffer.Grow(maximumLength - availableLength)
|
encryptedBuffer.Grow(maximumLength - availableLength)
|
||||||
}
|
}
|
||||||
written, err := lz4.Encode(encryptedBuffer.Bytes()[offset + 4:], chunk.buffer.Bytes())
|
written, err := lz4.Encode(encryptedBuffer.Bytes()[offset+4:], chunk.buffer.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("LZ4 compression error: %v", err)
|
return fmt.Errorf("LZ4 compression error: %v", err)
|
||||||
}
|
}
|
||||||
@@ -258,7 +257,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
|
|
||||||
// The encrypted data will be appended to the duplicacy header and the once.
|
// The encrypted data will be appended to the duplicacy header and the once.
|
||||||
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
|
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
|
||||||
encryptedBuffer.Bytes()[offset: offset + dataLength + paddingLength], nil)
|
encryptedBuffer.Bytes()[offset:offset+dataLength+paddingLength], nil)
|
||||||
|
|
||||||
encryptedBuffer.Truncate(len(encryptedBytes))
|
encryptedBuffer.Truncate(len(encryptedBytes))
|
||||||
|
|
||||||
@@ -278,7 +277,7 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
encryptedBuffer.Reset()
|
encryptedBuffer.Reset()
|
||||||
defer func() {
|
defer func() {
|
||||||
ReleaseChunkBuffer(encryptedBuffer)
|
ReleaseChunkBuffer(encryptedBuffer)
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||||
|
|
||||||
@@ -309,15 +308,15 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
if string(encryptedBuffer.Bytes()[:headerLength - 1]) != ENCRYPTION_HEADER[:headerLength - 1] {
|
if string(encryptedBuffer.Bytes()[:headerLength-1]) != ENCRYPTION_HEADER[:headerLength-1] {
|
||||||
return fmt.Errorf("The storage doesn't seem to be encrypted")
|
return fmt.Errorf("The storage doesn't seem to be encrypted")
|
||||||
}
|
}
|
||||||
|
|
||||||
if encryptedBuffer.Bytes()[headerLength - 1] != 0 {
|
if encryptedBuffer.Bytes()[headerLength-1] != 0 {
|
||||||
return fmt.Errorf("Unsupported encryption version %d", encryptedBuffer.Bytes()[headerLength - 1])
|
return fmt.Errorf("Unsupported encryption version %d", encryptedBuffer.Bytes()[headerLength-1])
|
||||||
}
|
}
|
||||||
|
|
||||||
nonce := encryptedBuffer.Bytes()[headerLength: offset]
|
nonce := encryptedBuffer.Bytes()[headerLength:offset]
|
||||||
|
|
||||||
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
|
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
|
||||||
encryptedBuffer.Bytes()[offset:], nil)
|
encryptedBuffer.Bytes()[offset:], nil)
|
||||||
@@ -326,7 +325,7 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
paddingLength := int(decryptedBytes[len(decryptedBytes) - 1])
|
paddingLength := int(decryptedBytes[len(decryptedBytes)-1])
|
||||||
if paddingLength == 0 {
|
if paddingLength == 0 {
|
||||||
paddingLength = 256
|
paddingLength = 256
|
||||||
}
|
}
|
||||||
@@ -335,10 +334,10 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < paddingLength; i++ {
|
for i := 0; i < paddingLength; i++ {
|
||||||
padding := decryptedBytes[len(decryptedBytes) - 1 - i]
|
padding := decryptedBytes[len(decryptedBytes)-1-i]
|
||||||
if padding != byte(paddingLength) {
|
if padding != byte(paddingLength) {
|
||||||
return fmt.Errorf("Incorrect padding of length %d: %x", paddingLength,
|
return fmt.Errorf("Incorrect padding of length %d: %x", paddingLength,
|
||||||
decryptedBytes[len(decryptedBytes) - paddingLength:])
|
decryptedBytes[len(decryptedBytes)-paddingLength:])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -379,4 +378,3 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,10 +5,10 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestChunk(t *testing.T) {
|
func TestChunk(t *testing.T) {
|
||||||
@@ -67,7 +67,6 @@ func TestChunk(t *testing.T) {
|
|||||||
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
|
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ type ChunkDownloader struct {
|
|||||||
showStatistics bool // Show a stats log for each chunk if true
|
showStatistics bool // Show a stats log for each chunk if true
|
||||||
threads int // Number of threads
|
threads int // Number of threads
|
||||||
|
|
||||||
taskList [] ChunkDownloadTask // The list of chunks to be downloaded
|
taskList []ChunkDownloadTask // The list of chunks to be downloaded
|
||||||
completedTasks map[int]bool // Store downloaded chunks
|
completedTasks map[int]bool // Store downloaded chunks
|
||||||
lastChunkIndex int // a monotonically increasing number indicating the last chunk to be downloaded
|
lastChunkIndex int // a monotonically increasing number indicating the last chunk to be downloaded
|
||||||
|
|
||||||
@@ -53,7 +53,7 @@ type ChunkDownloader struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int) *ChunkDownloader {
|
func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int) *ChunkDownloader {
|
||||||
downloader := &ChunkDownloader {
|
downloader := &ChunkDownloader{
|
||||||
config: config,
|
config: config,
|
||||||
storage: storage,
|
storage: storage,
|
||||||
snapshotCache: snapshotCache,
|
snapshotCache: snapshotCache,
|
||||||
@@ -77,20 +77,20 @@ func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileS
|
|||||||
defer CatchLogException()
|
defer CatchLogException()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case task := <- downloader.taskQueue:
|
case task := <-downloader.taskQueue:
|
||||||
downloader.Download(threadIndex, task)
|
downloader.Download(threadIndex, task)
|
||||||
case <- downloader.stopChannel:
|
case <-downloader.stopChannel:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} (i)
|
}(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
return downloader
|
return downloader
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddFiles adds chunks needed by the specified files to the download list.
|
// AddFiles adds chunks needed by the specified files to the download list.
|
||||||
func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry) {
|
func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files []*Entry) {
|
||||||
|
|
||||||
downloader.taskList = nil
|
downloader.taskList = nil
|
||||||
lastChunkIndex := -1
|
lastChunkIndex := -1
|
||||||
@@ -102,7 +102,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry)
|
|||||||
}
|
}
|
||||||
for i := file.StartChunk; i <= file.EndChunk; i++ {
|
for i := file.StartChunk; i <= file.EndChunk; i++ {
|
||||||
if lastChunkIndex != i {
|
if lastChunkIndex != i {
|
||||||
task := ChunkDownloadTask {
|
task := ChunkDownloadTask{
|
||||||
chunkIndex: len(downloader.taskList),
|
chunkIndex: len(downloader.taskList),
|
||||||
chunkHash: snapshot.ChunkHashes[i],
|
chunkHash: snapshot.ChunkHashes[i],
|
||||||
chunkLength: snapshot.ChunkLengths[i],
|
chunkLength: snapshot.ChunkLengths[i],
|
||||||
@@ -111,13 +111,13 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry)
|
|||||||
downloader.taskList = append(downloader.taskList, task)
|
downloader.taskList = append(downloader.taskList, task)
|
||||||
downloader.totalChunkSize += int64(snapshot.ChunkLengths[i])
|
downloader.totalChunkSize += int64(snapshot.ChunkLengths[i])
|
||||||
} else {
|
} else {
|
||||||
downloader.taskList[len(downloader.taskList) - 1].needed = true
|
downloader.taskList[len(downloader.taskList)-1].needed = true
|
||||||
}
|
}
|
||||||
lastChunkIndex = i
|
lastChunkIndex = i
|
||||||
}
|
}
|
||||||
file.StartChunk = len(downloader.taskList) - (file.EndChunk - file.StartChunk) - 1
|
file.StartChunk = len(downloader.taskList) - (file.EndChunk - file.StartChunk) - 1
|
||||||
file.EndChunk = len(downloader.taskList) - 1
|
file.EndChunk = len(downloader.taskList) - 1
|
||||||
if file.EndChunk - file.StartChunk > maximumChunks {
|
if file.EndChunk-file.StartChunk > maximumChunks {
|
||||||
maximumChunks = file.EndChunk - file.StartChunk
|
maximumChunks = file.EndChunk - file.StartChunk
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -125,7 +125,7 @@ func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry)
|
|||||||
|
|
||||||
// AddChunk adds a single chunk the download list.
|
// AddChunk adds a single chunk the download list.
|
||||||
func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
|
func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
|
||||||
task := ChunkDownloadTask {
|
task := ChunkDownloadTask{
|
||||||
chunkIndex: len(downloader.taskList),
|
chunkIndex: len(downloader.taskList),
|
||||||
chunkHash: chunkHash,
|
chunkHash: chunkHash,
|
||||||
chunkLength: 0,
|
chunkLength: 0,
|
||||||
@@ -137,7 +137,7 @@ func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
|
|||||||
downloader.taskQueue <- task
|
downloader.taskQueue <- task
|
||||||
downloader.numberOfDownloadingChunks++
|
downloader.numberOfDownloadingChunks++
|
||||||
downloader.numberOfActiveChunks++
|
downloader.numberOfActiveChunks++
|
||||||
downloader.taskList[len(downloader.taskList) - 1].isDownloading = true
|
downloader.taskList[len(downloader.taskList)-1].isDownloading = true
|
||||||
}
|
}
|
||||||
return len(downloader.taskList) - 1
|
return len(downloader.taskList) - 1
|
||||||
}
|
}
|
||||||
@@ -163,7 +163,7 @@ func (downloader *ChunkDownloader) Prefetch(file *Entry) {
|
|||||||
downloader.numberOfDownloadingChunks++
|
downloader.numberOfDownloadingChunks++
|
||||||
downloader.numberOfActiveChunks++
|
downloader.numberOfActiveChunks++
|
||||||
}
|
}
|
||||||
} else{
|
} else {
|
||||||
LOG_DEBUG("DOWNLOAD_PREFETCH", "%s chunk %s is not needed", file.Path,
|
LOG_DEBUG("DOWNLOAD_PREFETCH", "%s chunk %s is not needed", file.Path,
|
||||||
downloader.config.GetChunkIDFromHash(task.chunkHash))
|
downloader.config.GetChunkIDFromHash(task.chunkHash))
|
||||||
}
|
}
|
||||||
@@ -226,7 +226,7 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
|
|||||||
|
|
||||||
// Now wait until the chunk to be downloaded appears in the completed tasks
|
// Now wait until the chunk to be downloaded appears in the completed tasks
|
||||||
for _, found := downloader.completedTasks[chunkIndex]; !found; _, found = downloader.completedTasks[chunkIndex] {
|
for _, found := downloader.completedTasks[chunkIndex]; !found; _, found = downloader.completedTasks[chunkIndex] {
|
||||||
completion := <- downloader.completionChannel
|
completion := <-downloader.completionChannel
|
||||||
downloader.completedTasks[completion.chunkIndex] = true
|
downloader.completedTasks[completion.chunkIndex] = true
|
||||||
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
||||||
downloader.numberOfDownloadedChunks++
|
downloader.numberOfDownloadedChunks++
|
||||||
@@ -238,7 +238,7 @@ func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
|
|||||||
// Stop terminates all downloading goroutines
|
// Stop terminates all downloading goroutines
|
||||||
func (downloader *ChunkDownloader) Stop() {
|
func (downloader *ChunkDownloader) Stop() {
|
||||||
for downloader.numberOfDownloadingChunks > 0 {
|
for downloader.numberOfDownloadingChunks > 0 {
|
||||||
completion := <- downloader.completionChannel
|
completion := <-downloader.completionChannel
|
||||||
downloader.completedTasks[completion.chunkIndex] = true
|
downloader.completedTasks[completion.chunkIndex] = true
|
||||||
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
||||||
downloader.numberOfDownloadedChunks++
|
downloader.numberOfDownloadedChunks++
|
||||||
@@ -286,7 +286,7 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
|||||||
} else {
|
} else {
|
||||||
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been loaded from the snapshot cache", chunkID)
|
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been loaded from the snapshot cache", chunkID)
|
||||||
|
|
||||||
downloader.completionChannel <- ChunkDownloadCompletion{ chunk: chunk, chunkIndex:task.chunkIndex }
|
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -330,7 +330,6 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash)
|
err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("UPLOAD_CHUNK", "Failed to decrypt the chunk %s: %v", chunkID, err)
|
LOG_ERROR("UPLOAD_CHUNK", "Failed to decrypt the chunk %s: %v", chunkID, err)
|
||||||
@@ -362,16 +361,16 @@ func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadT
|
|||||||
speed := downloadedChunkSize / (now - downloader.startTime)
|
speed := downloadedChunkSize / (now - downloader.startTime)
|
||||||
remainingTime := int64(0)
|
remainingTime := int64(0)
|
||||||
if speed > 0 {
|
if speed > 0 {
|
||||||
remainingTime = (downloader.totalChunkSize - downloadedChunkSize) / speed + 1
|
remainingTime = (downloader.totalChunkSize-downloadedChunkSize)/speed + 1
|
||||||
}
|
}
|
||||||
percentage := float32(downloadedChunkSize * 1000 / downloader.totalChunkSize)
|
percentage := float32(downloadedChunkSize * 1000 / downloader.totalChunkSize)
|
||||||
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
|
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
|
||||||
task.chunkIndex + 1, chunk.GetLength(),
|
task.chunkIndex+1, chunk.GetLength(),
|
||||||
PrettySize(speed), PrettyTime(remainingTime), percentage / 10)
|
PrettySize(speed), PrettyTime(remainingTime), percentage/10)
|
||||||
} else {
|
} else {
|
||||||
LOG_DEBUG("CHUNK_DOWNLOAD", "Chunk %s has been downloaded", chunkID)
|
LOG_DEBUG("CHUNK_DOWNLOAD", "Chunk %s has been downloaded", chunkID)
|
||||||
}
|
}
|
||||||
|
|
||||||
downloader.completionChannel <- ChunkDownloadCompletion{ chunk: chunk, chunkIndex:task.chunkIndex }
|
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,10 +5,10 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ChunkMaker breaks data into chunks using buzhash. To save memory, the chunk maker only use a circular buffer
|
// ChunkMaker breaks data into chunks using buzhash. To save memory, the chunk maker only use a circular buffer
|
||||||
@@ -35,7 +35,7 @@ type ChunkMaker struct {
|
|||||||
// buzhash.
|
// buzhash.
|
||||||
func CreateChunkMaker(config *Config, hashOnly bool) *ChunkMaker {
|
func CreateChunkMaker(config *Config, hashOnly bool) *ChunkMaker {
|
||||||
size := 1
|
size := 1
|
||||||
for size * 2 <= config.AverageChunkSize {
|
for size*2 <= config.AverageChunkSize {
|
||||||
size *= 2
|
size *= 2
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -44,7 +44,7 @@ func CreateChunkMaker(config *Config, hashOnly bool) *ChunkMaker {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
maker := &ChunkMaker {
|
maker := &ChunkMaker{
|
||||||
hashMask: uint64(config.AverageChunkSize - 1),
|
hashMask: uint64(config.AverageChunkSize - 1),
|
||||||
maximumChunkSize: config.MaximumChunkSize,
|
maximumChunkSize: config.MaximumChunkSize,
|
||||||
minimumChunkSize: config.MinimumChunkSize,
|
minimumChunkSize: config.MinimumChunkSize,
|
||||||
@@ -61,12 +61,12 @@ func CreateChunkMaker(config *Config, hashOnly bool) *ChunkMaker {
|
|||||||
|
|
||||||
for i := 0; i < 64; i++ {
|
for i := 0; i < 64; i++ {
|
||||||
for j := 0; j < 4; j++ {
|
for j := 0; j < 4; j++ {
|
||||||
maker.randomTable[4 * i + j] = binary.LittleEndian.Uint64(randomData[8 * j : 8 * j + 8])
|
maker.randomTable[4*i+j] = binary.LittleEndian.Uint64(randomData[8*j : 8*j+8])
|
||||||
}
|
}
|
||||||
randomData = sha256.Sum256(randomData[:])
|
randomData = sha256.Sum256(randomData[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
maker.buffer = make([]byte, 2 * config.MinimumChunkSize)
|
maker.buffer = make([]byte, 2*config.MinimumChunkSize)
|
||||||
|
|
||||||
return maker
|
return maker
|
||||||
}
|
}
|
||||||
@@ -79,7 +79,7 @@ func rotateLeftByOne(value uint64) uint64 {
|
|||||||
return (value << 1) | (value >> 63)
|
return (value << 1) | (value >> 63)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (maker *ChunkMaker) buzhashSum(sum uint64, data [] byte) uint64 {
|
func (maker *ChunkMaker) buzhashSum(sum uint64, data []byte) uint64 {
|
||||||
for i := 0; i < len(data); i++ {
|
for i := 0; i < len(data); i++ {
|
||||||
sum = rotateLeftByOne(sum) ^ maker.randomTable[data[i]]
|
sum = rotateLeftByOne(sum) ^ maker.randomTable[data[i]]
|
||||||
}
|
}
|
||||||
@@ -94,7 +94,7 @@ func (maker *ChunkMaker) buzhashUpdate(sum uint64, out byte, in byte, length int
|
|||||||
// 'nextReader' returns false, it will process remaining data in the buffer and then quit. When a chunk is identified,
|
// 'nextReader' returns false, it will process remaining data in the buffer and then quit. When a chunk is identified,
|
||||||
// it will call 'endOfChunk' to return the chunk size and a boolean flag indicating if it is the last chunk.
|
// it will call 'endOfChunk' to return the chunk size and a boolean flag indicating if it is the last chunk.
|
||||||
func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *Chunk, final bool),
|
func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *Chunk, final bool),
|
||||||
nextReader func(size int64, hash string)(io.Reader, bool)) {
|
nextReader func(size int64, hash string) (io.Reader, bool)) {
|
||||||
|
|
||||||
maker.bufferStart = 0
|
maker.bufferStart = 0
|
||||||
maker.bufferSize = 0
|
maker.bufferSize = 0
|
||||||
@@ -121,13 +121,13 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
|
|||||||
|
|
||||||
// Move data from the buffer to the chunk.
|
// Move data from the buffer to the chunk.
|
||||||
fill := func(count int) {
|
fill := func(count int) {
|
||||||
if maker.bufferStart + count < maker.bufferCapacity {
|
if maker.bufferStart+count < maker.bufferCapacity {
|
||||||
chunk.Write(maker.buffer[maker.bufferStart : maker.bufferStart + count])
|
chunk.Write(maker.buffer[maker.bufferStart : maker.bufferStart+count])
|
||||||
maker.bufferStart += count
|
maker.bufferStart += count
|
||||||
maker.bufferSize -= count
|
maker.bufferSize -= count
|
||||||
} else {
|
} else {
|
||||||
chunk.Write(maker.buffer[maker.bufferStart :])
|
chunk.Write(maker.buffer[maker.bufferStart:])
|
||||||
chunk.Write(maker.buffer[: count - (maker.bufferCapacity - maker.bufferStart)])
|
chunk.Write(maker.buffer[:count-(maker.bufferCapacity-maker.bufferStart)])
|
||||||
maker.bufferStart = count - (maker.bufferCapacity - maker.bufferStart)
|
maker.bufferStart = count - (maker.bufferCapacity - maker.bufferStart)
|
||||||
maker.bufferSize -= count
|
maker.bufferSize -= count
|
||||||
}
|
}
|
||||||
@@ -148,7 +148,7 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
|
|||||||
for {
|
for {
|
||||||
maker.bufferStart = 0
|
maker.bufferStart = 0
|
||||||
for maker.bufferStart < maker.minimumChunkSize && !isEOF {
|
for maker.bufferStart < maker.minimumChunkSize && !isEOF {
|
||||||
count, err := reader.Read(maker.buffer[maker.bufferStart : maker.minimumChunkSize])
|
count, err := reader.Read(maker.buffer[maker.bufferStart:maker.minimumChunkSize])
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
@@ -197,7 +197,7 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
|
|||||||
count = maker.bufferStart - start
|
count = maker.bufferStart - start
|
||||||
}
|
}
|
||||||
|
|
||||||
count, err = reader.Read(maker.buffer[start : start + count])
|
count, err = reader.Read(maker.buffer[start : start+count])
|
||||||
|
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
|
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
|
||||||
@@ -205,7 +205,7 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
|
|||||||
}
|
}
|
||||||
|
|
||||||
maker.bufferSize += count
|
maker.bufferSize += count
|
||||||
fileHasher.Write(maker.buffer[start : start + count])
|
fileHasher.Write(maker.buffer[start : start+count])
|
||||||
fileSize += int64(count)
|
fileSize += int64(count)
|
||||||
|
|
||||||
// if EOF is seen, try to switch to next file and continue
|
// if EOF is seen, try to switch to next file and continue
|
||||||
@@ -229,18 +229,17 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Minimum chunk size has been reached. Calculate the buzhash for the minimum size chunk.
|
// Minimum chunk size has been reached. Calculate the buzhash for the minimum size chunk.
|
||||||
if (!minimumReached) {
|
if !minimumReached {
|
||||||
|
|
||||||
bytes := maker.minimumChunkSize
|
bytes := maker.minimumChunkSize
|
||||||
|
|
||||||
if maker.bufferStart + bytes < maker.bufferCapacity {
|
if maker.bufferStart+bytes < maker.bufferCapacity {
|
||||||
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart : maker.bufferStart + bytes])
|
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:maker.bufferStart+bytes])
|
||||||
} else {
|
} else {
|
||||||
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart :])
|
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:])
|
||||||
hashSum = maker.buzhashSum(hashSum,
|
hashSum = maker.buzhashSum(hashSum,
|
||||||
maker.buffer[: bytes - (maker.bufferCapacity - maker.bufferStart)])
|
maker.buffer[:bytes-(maker.bufferCapacity-maker.bufferStart)])
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hashSum & maker.hashMask) == 0 {
|
if (hashSum & maker.hashMask) == 0 {
|
||||||
@@ -258,7 +257,7 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
|
|||||||
bytes := maker.bufferSize - maker.minimumChunkSize
|
bytes := maker.bufferSize - maker.minimumChunkSize
|
||||||
isEOC := false
|
isEOC := false
|
||||||
maxSize := maker.maximumChunkSize - chunk.GetLength()
|
maxSize := maker.maximumChunkSize - chunk.GetLength()
|
||||||
for i := 0; i < maker.bufferSize - maker.minimumChunkSize; i++ {
|
for i := 0; i < maker.bufferSize-maker.minimumChunkSize; i++ {
|
||||||
out := maker.bufferStart + i
|
out := maker.bufferStart + i
|
||||||
if out >= maker.bufferCapacity {
|
if out >= maker.bufferCapacity {
|
||||||
out -= maker.bufferCapacity
|
out -= maker.bufferCapacity
|
||||||
@@ -269,7 +268,7 @@ func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *C
|
|||||||
}
|
}
|
||||||
|
|
||||||
hashSum = maker.buzhashUpdate(hashSum, maker.buffer[out], maker.buffer[in], maker.minimumChunkSize)
|
hashSum = maker.buzhashUpdate(hashSum, maker.buffer[out], maker.buffer[in], maker.minimumChunkSize)
|
||||||
if (hashSum & maker.hashMask) == 0 || i == maxSize - maker.minimumChunkSize - 1 {
|
if (hashSum&maker.hashMask) == 0 || i == maxSize-maker.minimumChunkSize-1 {
|
||||||
// A chunk is completed.
|
// A chunk is completed.
|
||||||
bytes = i + 1 + maker.minimumChunkSize
|
bytes = i + 1 + maker.minimumChunkSize
|
||||||
isEOC = true
|
isEOC = true
|
||||||
|
|||||||
@@ -5,12 +5,12 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
|
||||||
"io"
|
"io"
|
||||||
|
"math/rand"
|
||||||
"sort"
|
"sort"
|
||||||
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunkSize,
|
func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunkSize,
|
||||||
@@ -29,14 +29,14 @@ func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunk
|
|||||||
|
|
||||||
maker := CreateChunkMaker(config, false)
|
maker := CreateChunkMaker(config, false)
|
||||||
|
|
||||||
var chunks [] string
|
var chunks []string
|
||||||
totalChunkSize := 0
|
totalChunkSize := 0
|
||||||
totalFileSize := int64(0)
|
totalFileSize := int64(0)
|
||||||
|
|
||||||
//LOG_INFO("CHUNK_SPLIT", "bufferCapacity: %d", bufferCapacity)
|
//LOG_INFO("CHUNK_SPLIT", "bufferCapacity: %d", bufferCapacity)
|
||||||
|
|
||||||
buffers := make([] *bytes.Buffer, n)
|
buffers := make([]*bytes.Buffer, n)
|
||||||
sizes := make([] int, n)
|
sizes := make([]int, n)
|
||||||
sizes[0] = 0
|
sizes[0] = 0
|
||||||
for i := 1; i < n; i++ {
|
for i := 1; i < n; i++ {
|
||||||
same := true
|
same := true
|
||||||
@@ -54,20 +54,20 @@ func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunk
|
|||||||
|
|
||||||
sort.Sort(sort.IntSlice(sizes))
|
sort.Sort(sort.IntSlice(sizes))
|
||||||
|
|
||||||
for i := 0; i < n - 1; i++ {
|
for i := 0; i < n-1; i++ {
|
||||||
buffers[i] = bytes.NewBuffer(content[sizes[i] : sizes[i + 1]])
|
buffers[i] = bytes.NewBuffer(content[sizes[i]:sizes[i+1]])
|
||||||
}
|
}
|
||||||
buffers[n - 1] = bytes.NewBuffer(content[sizes[n - 1]:])
|
buffers[n-1] = bytes.NewBuffer(content[sizes[n-1]:])
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
|
|
||||||
maker.ForEachChunk(buffers[0],
|
maker.ForEachChunk(buffers[0],
|
||||||
func (chunk *Chunk, final bool) {
|
func(chunk *Chunk, final bool) {
|
||||||
//LOG_INFO("CHUNK_SPLIT", "i: %d, chunk: %s, size: %d", i, chunk.GetHash(), size)
|
//LOG_INFO("CHUNK_SPLIT", "i: %d, chunk: %s, size: %d", i, chunk.GetHash(), size)
|
||||||
chunks = append(chunks, chunk.GetHash())
|
chunks = append(chunks, chunk.GetHash())
|
||||||
totalChunkSize += chunk.GetLength()
|
totalChunkSize += chunk.GetLength()
|
||||||
},
|
},
|
||||||
func (size int64, hash string) (io.Reader, bool) {
|
func(size int64, hash string) (io.Reader, bool) {
|
||||||
totalFileSize += size
|
totalFileSize += size
|
||||||
i++
|
i++
|
||||||
if i >= len(buffers) {
|
if i >= len(buffers) {
|
||||||
@@ -76,7 +76,7 @@ func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunk
|
|||||||
return buffers[i], true
|
return buffers[i], true
|
||||||
})
|
})
|
||||||
|
|
||||||
if (totalFileSize != int64(totalChunkSize)) {
|
if totalFileSize != int64(totalChunkSize) {
|
||||||
LOG_ERROR("CHUNK_SPLIT", "total chunk size: %d, total file size: %d", totalChunkSize, totalFileSize)
|
LOG_ERROR("CHUNK_SPLIT", "total chunk size: %d, total file size: %d", totalChunkSize, totalFileSize)
|
||||||
}
|
}
|
||||||
return chunks, totalChunkSize
|
return chunks, totalChunkSize
|
||||||
@@ -84,9 +84,8 @@ func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunk
|
|||||||
|
|
||||||
func TestChunkMaker(t *testing.T) {
|
func TestChunkMaker(t *testing.T) {
|
||||||
|
|
||||||
|
|
||||||
//sizes := [...] int { 64 }
|
//sizes := [...] int { 64 }
|
||||||
sizes := [...] int { 64, 256, 1024, 1024 * 10 }
|
sizes := [...]int{64, 256, 1024, 1024 * 10}
|
||||||
|
|
||||||
for _, size := range sizes {
|
for _, size := range sizes {
|
||||||
|
|
||||||
@@ -99,15 +98,15 @@ func TestChunkMaker(t *testing.T) {
|
|||||||
|
|
||||||
chunkArray1, totalSize1 := splitIntoChunks(content, 10, 32, 64, 16, 32)
|
chunkArray1, totalSize1 := splitIntoChunks(content, 10, 32, 64, 16, 32)
|
||||||
|
|
||||||
capacities := [...]int { 32, 33, 34, 61, 62, 63, 64, 65, 66, 126, 127, 128, 129, 130,
|
capacities := [...]int{32, 33, 34, 61, 62, 63, 64, 65, 66, 126, 127, 128, 129, 130,
|
||||||
255, 256, 257, 511, 512, 513, 1023, 1024, 1025,
|
255, 256, 257, 511, 512, 513, 1023, 1024, 1025,
|
||||||
32, 48, 64, 128, 256, 512, 1024, 2048, }
|
32, 48, 64, 128, 256, 512, 1024, 2048}
|
||||||
|
|
||||||
//capacities := [...]int { 32 }
|
//capacities := [...]int { 32 }
|
||||||
|
|
||||||
for _, capacity := range capacities {
|
for _, capacity := range capacities {
|
||||||
|
|
||||||
for _, n := range [...]int { 6, 7, 8, 9, 10 } {
|
for _, n := range [...]int{6, 7, 8, 9, 10} {
|
||||||
chunkArray2, totalSize2 := splitIntoChunks(content, n, 32, 64, 16, capacity)
|
chunkArray2, totalSize2 := splitIntoChunks(content, n, 32, 64, 16, capacity)
|
||||||
|
|
||||||
if totalSize1 != totalSize2 {
|
if totalSize1 != totalSize2 {
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ import (
|
|||||||
|
|
||||||
// ChunkUploadTask represents a chunk to be uploaded.
|
// ChunkUploadTask represents a chunk to be uploaded.
|
||||||
type ChunkUploadTask struct {
|
type ChunkUploadTask struct {
|
||||||
chunk * Chunk
|
chunk *Chunk
|
||||||
chunkIndex int
|
chunkIndex int
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -36,7 +36,7 @@ type ChunkUploader struct {
|
|||||||
// CreateChunkUploader creates a chunk uploader.
|
// CreateChunkUploader creates a chunk uploader.
|
||||||
func CreateChunkUploader(config *Config, storage Storage, snapshotCache *FileStorage, threads int,
|
func CreateChunkUploader(config *Config, storage Storage, snapshotCache *FileStorage, threads int,
|
||||||
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)) *ChunkUploader {
|
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)) *ChunkUploader {
|
||||||
uploader := &ChunkUploader {
|
uploader := &ChunkUploader{
|
||||||
config: config,
|
config: config,
|
||||||
storage: storage,
|
storage: storage,
|
||||||
snapshotCache: snapshotCache,
|
snapshotCache: snapshotCache,
|
||||||
@@ -56,20 +56,20 @@ func (uploader *ChunkUploader) Start() {
|
|||||||
defer CatchLogException()
|
defer CatchLogException()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case task := <- uploader.taskQueue:
|
case task := <-uploader.taskQueue:
|
||||||
uploader.Upload(threadIndex, task)
|
uploader.Upload(threadIndex, task)
|
||||||
case <- uploader.stopChannel:
|
case <-uploader.stopChannel:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} (i)
|
}(i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartChunk sends a chunk to be uploaded to a waiting uploading goroutine. It may block if all uploading goroutines are busy.
|
// StartChunk sends a chunk to be uploaded to a waiting uploading goroutine. It may block if all uploading goroutines are busy.
|
||||||
func (uploader *ChunkUploader) StartChunk(chunk *Chunk, chunkIndex int) {
|
func (uploader *ChunkUploader) StartChunk(chunk *Chunk, chunkIndex int) {
|
||||||
atomic.AddInt32(&uploader.numberOfUploadingTasks, 1)
|
atomic.AddInt32(&uploader.numberOfUploadingTasks, 1)
|
||||||
uploader.taskQueue <- ChunkUploadTask {
|
uploader.taskQueue <- ChunkUploadTask{
|
||||||
chunk: chunk,
|
chunk: chunk,
|
||||||
chunkIndex: chunkIndex,
|
chunkIndex: chunkIndex,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,10 +6,10 @@ package duplicacy
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"time"
|
|
||||||
"path"
|
"path"
|
||||||
"testing"
|
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@@ -32,7 +32,7 @@ func TestUploaderAndDownloader(t *testing.T) {
|
|||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
||||||
os.RemoveAll(testDir)
|
os.RemoveAll(testDir)
|
||||||
@@ -48,7 +48,7 @@ func TestUploaderAndDownloader(t *testing.T) {
|
|||||||
storage.EnableTestMode()
|
storage.EnableTestMode()
|
||||||
storage.SetRateLimits(testRateLimit, testRateLimit)
|
storage.SetRateLimits(testRateLimit, testRateLimit)
|
||||||
|
|
||||||
for _, dir := range []string { "chunks", "snapshots" } {
|
for _, dir := range []string{"chunks", "snapshots"} {
|
||||||
err = storage.CreateDirectory(0, dir)
|
err = storage.CreateDirectory(0, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create directory %s: %v", dir, err)
|
t.Errorf("Failed to create directory %s: %v", dir, err)
|
||||||
@@ -56,7 +56,6 @@ func TestUploaderAndDownloader(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
numberOfChunks := 100
|
numberOfChunks := 100
|
||||||
maxChunkSize := 64 * 1024
|
maxChunkSize := 64 * 1024
|
||||||
|
|
||||||
@@ -68,11 +67,11 @@ func TestUploaderAndDownloader(t *testing.T) {
|
|||||||
|
|
||||||
config := CreateConfig()
|
config := CreateConfig()
|
||||||
config.MinimumChunkSize = 100
|
config.MinimumChunkSize = 100
|
||||||
config.chunkPool = make(chan *Chunk, numberOfChunks * 2)
|
config.chunkPool = make(chan *Chunk, numberOfChunks*2)
|
||||||
totalFileSize := 0
|
totalFileSize := 0
|
||||||
|
|
||||||
for i := 0; i < numberOfChunks; i++ {
|
for i := 0; i < numberOfChunks; i++ {
|
||||||
content := make([]byte, rand.Int() % maxChunkSize + 1)
|
content := make([]byte, rand.Int()%maxChunkSize+1)
|
||||||
_, err = crypto_rand.Read(content)
|
_, err = crypto_rand.Read(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error generating random content: %v", err)
|
t.Errorf("Error generating random content: %v", err)
|
||||||
@@ -102,7 +101,6 @@ func TestUploaderAndDownloader(t *testing.T) {
|
|||||||
|
|
||||||
chunkUploader.Stop()
|
chunkUploader.Stop()
|
||||||
|
|
||||||
|
|
||||||
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
|
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
|
||||||
chunkDownloader.totalChunkSize = int64(totalFileSize)
|
chunkDownloader.totalChunkSize = int64(totalFileSize)
|
||||||
|
|
||||||
@@ -120,7 +118,7 @@ func TestUploaderAndDownloader(t *testing.T) {
|
|||||||
chunkDownloader.Stop()
|
chunkDownloader.Stop()
|
||||||
|
|
||||||
for _, file := range listChunks(storage) {
|
for _, file := range listChunks(storage) {
|
||||||
err = storage.DeleteFile(0, "chunks/" + file)
|
err = storage.DeleteFile(0, "chunks/"+file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to delete the file %s: %v", file, err)
|
t.Errorf("Failed to delete the file %s: %v", file, err)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -5,18 +5,18 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"os"
|
"crypto/hmac"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash"
|
"hash"
|
||||||
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"crypto/rand"
|
|
||||||
"crypto/hmac"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
|
|
||||||
blake2 "github.com/minio/blake2b-simd"
|
blake2 "github.com/minio/blake2b-simd"
|
||||||
)
|
)
|
||||||
@@ -58,7 +58,7 @@ type Config struct {
|
|||||||
dryRun bool
|
dryRun bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create an alias to avoid recursive calls on Config.MarshalJSON
|
// Create an alias to avoid recursive calls on Config.MarshalJSON
|
||||||
type aliasedConfig Config
|
type aliasedConfig Config
|
||||||
|
|
||||||
type jsonableConfig struct {
|
type jsonableConfig struct {
|
||||||
@@ -70,9 +70,9 @@ type jsonableConfig struct {
|
|||||||
FileKey string `json:"file-key"`
|
FileKey string `json:"file-key"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) MarshalJSON() ([] byte, error) {
|
func (config *Config) MarshalJSON() ([]byte, error) {
|
||||||
|
|
||||||
return json.Marshal(&jsonableConfig {
|
return json.Marshal(&jsonableConfig{
|
||||||
aliasedConfig: (*aliasedConfig)(config),
|
aliasedConfig: (*aliasedConfig)(config),
|
||||||
ChunkSeed: hex.EncodeToString(config.ChunkSeed),
|
ChunkSeed: hex.EncodeToString(config.ChunkSeed),
|
||||||
HashKey: hex.EncodeToString(config.HashKey),
|
HashKey: hex.EncodeToString(config.HashKey),
|
||||||
@@ -84,7 +84,7 @@ func (config *Config) MarshalJSON() ([] byte, error) {
|
|||||||
|
|
||||||
func (config *Config) UnmarshalJSON(description []byte) (err error) {
|
func (config *Config) UnmarshalJSON(description []byte) (err error) {
|
||||||
|
|
||||||
aliased := &jsonableConfig {
|
aliased := &jsonableConfig{
|
||||||
aliasedConfig: (*aliasedConfig)(config),
|
aliasedConfig: (*aliasedConfig)(config),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -133,7 +133,7 @@ func (config *Config) Print() {
|
|||||||
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
|
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
|
||||||
isEncrypted bool, copyFrom *Config) (config *Config) {
|
isEncrypted bool, copyFrom *Config) (config *Config) {
|
||||||
|
|
||||||
config = &Config {
|
config = &Config{
|
||||||
CompressionLevel: compressionLevel,
|
CompressionLevel: compressionLevel,
|
||||||
AverageChunkSize: averageChunkSize,
|
AverageChunkSize: averageChunkSize,
|
||||||
MaximumChunkSize: maximumChunkSize,
|
MaximumChunkSize: maximumChunkSize,
|
||||||
@@ -142,7 +142,7 @@ func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maxi
|
|||||||
|
|
||||||
if isEncrypted {
|
if isEncrypted {
|
||||||
// Randomly generate keys
|
// Randomly generate keys
|
||||||
keys := make([]byte, 32 * 5)
|
keys := make([]byte, 32*5)
|
||||||
_, err := rand.Read(keys)
|
_, err := rand.Read(keys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("CONFIG_KEY", "Failed to generate random keys: %v", err)
|
LOG_ERROR("CONFIG_KEY", "Failed to generate random keys: %v", err)
|
||||||
@@ -171,26 +171,26 @@ func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maxi
|
|||||||
config.HashKey = copyFrom.HashKey
|
config.HashKey = copyFrom.HashKey
|
||||||
}
|
}
|
||||||
|
|
||||||
config.chunkPool = make(chan *Chunk, runtime.NumCPU() * 16)
|
config.chunkPool = make(chan *Chunk, runtime.NumCPU()*16)
|
||||||
|
|
||||||
return config
|
return config
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateConfig() (config *Config) {
|
func CreateConfig() (config *Config) {
|
||||||
return &Config {
|
return &Config{
|
||||||
HashKey: DEFAULT_KEY,
|
HashKey: DEFAULT_KEY,
|
||||||
IDKey: DEFAULT_KEY,
|
IDKey: DEFAULT_KEY,
|
||||||
CompressionLevel: DEFAULT_COMPRESSION_LEVEL,
|
CompressionLevel: DEFAULT_COMPRESSION_LEVEL,
|
||||||
chunkPool: make(chan *Chunk, runtime.NumCPU() * 16),
|
chunkPool: make(chan *Chunk, runtime.NumCPU()*16),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) GetChunk() (chunk *Chunk) {
|
func (config *Config) GetChunk() (chunk *Chunk) {
|
||||||
select {
|
select {
|
||||||
case chunk = <- config.chunkPool :
|
case chunk = <-config.chunkPool:
|
||||||
default:
|
default:
|
||||||
numberOfChunks := atomic.AddInt32(&config.numberOfChunks, 1)
|
numberOfChunks := atomic.AddInt32(&config.numberOfChunks, 1)
|
||||||
if numberOfChunks >= int32(runtime.NumCPU() * 16) {
|
if numberOfChunks >= int32(runtime.NumCPU()*16) {
|
||||||
LOG_WARN("CONFIG_CHUNK", "%d chunks have been allocated", numberOfChunks)
|
LOG_WARN("CONFIG_CHUNK", "%d chunks have been allocated", numberOfChunks)
|
||||||
if _, found := os.LookupEnv("DUPLICACY_CHUNK_DEBUG"); found {
|
if _, found := os.LookupEnv("DUPLICACY_CHUNK_DEBUG"); found {
|
||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
@@ -201,7 +201,7 @@ func (config *Config) GetChunk() (chunk *Chunk) {
|
|||||||
return chunk
|
return chunk
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) PutChunk(chunk *Chunk){
|
func (config *Config) PutChunk(chunk *Chunk) {
|
||||||
|
|
||||||
if chunk == nil {
|
if chunk == nil {
|
||||||
return
|
return
|
||||||
@@ -216,7 +216,7 @@ func (config *Config) PutChunk(chunk *Chunk){
|
|||||||
|
|
||||||
func (config *Config) NewKeyedHasher(key []byte) hash.Hash {
|
func (config *Config) NewKeyedHasher(key []byte) hash.Hash {
|
||||||
if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
||||||
hasher, err := blake2.New(&blake2.Config{ Size: 32, Key:key })
|
hasher, err := blake2.New(&blake2.Config{Size: 32, Key: key})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("HASH_KEY", "Invalid hash key: %x", key)
|
LOG_ERROR("HASH_KEY", "Invalid hash key: %x", key)
|
||||||
}
|
}
|
||||||
@@ -259,9 +259,9 @@ func (hasher *DummyHasher) BlockSize() int {
|
|||||||
|
|
||||||
func (config *Config) NewFileHasher() hash.Hash {
|
func (config *Config) NewFileHasher() hash.Hash {
|
||||||
if SkipFileHash {
|
if SkipFileHash {
|
||||||
return &DummyHasher {}
|
return &DummyHasher{}
|
||||||
} else if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
} else if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
||||||
hasher, _ := blake2.New(&blake2.Config{ Size: 32 })
|
hasher, _ := blake2.New(&blake2.Config{Size: 32})
|
||||||
return hasher
|
return hasher
|
||||||
} else {
|
} else {
|
||||||
return sha256.New()
|
return sha256.New()
|
||||||
@@ -344,7 +344,7 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func UploadConfig(storage Storage, config *Config, password string) (bool) {
|
func UploadConfig(storage Storage, config *Config, password string) bool {
|
||||||
|
|
||||||
// This is the key to encrypt the config file.
|
// This is the key to encrypt the config file.
|
||||||
var masterKey []byte
|
var masterKey []byte
|
||||||
@@ -390,7 +390,7 @@ func UploadConfig(storage Storage, config *Config, password string) (bool) {
|
|||||||
config.Print()
|
config.Print()
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, subDir := range []string {"chunks", "snapshots"} {
|
for _, subDir := range []string{"chunks", "snapshots"} {
|
||||||
err = storage.CreateDirectory(0, subDir)
|
err = storage.CreateDirectory(0, subDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("CONFIG_MKDIR", "Failed to create storage subdirectory: %v", err)
|
LOG_ERROR("CONFIG_MKDIR", "Failed to create storage subdirectory: %v", err)
|
||||||
@@ -417,7 +417,6 @@ func ConfigStorage(storage Storage, compressionLevel int, averageChunkSize int,
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
config := CreateConfigFromParameters(compressionLevel, averageChunkSize, maximumChunkSize, minimumChunkSize, len(password) > 0,
|
config := CreateConfigFromParameters(compressionLevel, averageChunkSize, maximumChunkSize, minimumChunkSize, len(password) > 0,
|
||||||
copyFrom)
|
copyFrom)
|
||||||
if config == nil {
|
if config == nil {
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/gilbertchen/go-dropbox"
|
"github.com/gilbertchen/go-dropbox"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -31,11 +32,11 @@ func CreateDropboxStorage(accessToken string, storageDir string, threads int) (s
|
|||||||
storageDir = "/" + storageDir
|
storageDir = "/" + storageDir
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(storageDir) > 1 && storageDir[len(storageDir) - 1] == '/' {
|
if len(storageDir) > 1 && storageDir[len(storageDir)-1] == '/' {
|
||||||
storageDir = storageDir[:len(storageDir) - 1]
|
storageDir = storageDir[:len(storageDir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &DropboxStorage {
|
storage = &DropboxStorage{
|
||||||
clients: clients,
|
clients: clients,
|
||||||
storageDir: storageDir,
|
storageDir: storageDir,
|
||||||
}
|
}
|
||||||
@@ -55,13 +56,13 @@ func (storage *DropboxStorage) ListFiles(threadIndex int, dir string) (files []s
|
|||||||
dir = "/" + dir
|
dir = "/" + dir
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dir) > 1 && dir[len(dir) - 1] == '/' {
|
if len(dir) > 1 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
input := &dropbox.ListFolderInput {
|
input := &dropbox.ListFolderInput{
|
||||||
Path : storage.storageDir + dir,
|
Path: storage.storageDir + dir,
|
||||||
Recursive : false,
|
Recursive: false,
|
||||||
IncludeMediaInfo: false,
|
IncludeMediaInfo: false,
|
||||||
IncludeDeleted: false,
|
IncludeDeleted: false,
|
||||||
}
|
}
|
||||||
@@ -85,7 +86,7 @@ func (storage *DropboxStorage) ListFiles(threadIndex int, dir string) (files []s
|
|||||||
|
|
||||||
if output.HasMore {
|
if output.HasMore {
|
||||||
output, err = storage.clients[threadIndex].ListFolderContinue(
|
output, err = storage.clients[threadIndex].ListFolderContinue(
|
||||||
&dropbox.ListFolderContinueInput { Cursor: output.Cursor, })
|
&dropbox.ListFolderContinueInput{Cursor: output.Cursor})
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
break
|
break
|
||||||
@@ -102,7 +103,7 @@ func (storage *DropboxStorage) DeleteFile(threadIndex int, filePath string) (err
|
|||||||
filePath = "/" + filePath
|
filePath = "/" + filePath
|
||||||
}
|
}
|
||||||
|
|
||||||
input := &dropbox.DeleteInput {
|
input := &dropbox.DeleteInput{
|
||||||
Path: storage.storageDir + filePath,
|
Path: storage.storageDir + filePath,
|
||||||
}
|
}
|
||||||
_, err = storage.clients[threadIndex].Delete(input)
|
_, err = storage.clients[threadIndex].Delete(input)
|
||||||
@@ -123,7 +124,7 @@ func (storage *DropboxStorage) MoveFile(threadIndex int, from string, to string)
|
|||||||
if to != "" && to[0] != '/' {
|
if to != "" && to[0] != '/' {
|
||||||
to = "/" + to
|
to = "/" + to
|
||||||
}
|
}
|
||||||
input := &dropbox.MoveInput {
|
input := &dropbox.MoveInput{
|
||||||
FromPath: storage.storageDir + from,
|
FromPath: storage.storageDir + from,
|
||||||
ToPath: storage.storageDir + to,
|
ToPath: storage.storageDir + to,
|
||||||
}
|
}
|
||||||
@@ -137,12 +138,12 @@ func (storage *DropboxStorage) CreateDirectory(threadIndex int, dir string) (err
|
|||||||
dir = "/" + dir
|
dir = "/" + dir
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dir) > 1 && dir[len(dir) - 1] == '/' {
|
if len(dir) > 1 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
input := &dropbox.CreateFolderInput {
|
input := &dropbox.CreateFolderInput{
|
||||||
Path : storage.storageDir + dir,
|
Path: storage.storageDir + dir,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = storage.clients[threadIndex].CreateFolder(input)
|
_, err = storage.clients[threadIndex].CreateFolder(input)
|
||||||
@@ -161,7 +162,7 @@ func (storage *DropboxStorage) GetFileInfo(threadIndex int, filePath string) (ex
|
|||||||
filePath = "/" + filePath
|
filePath = "/" + filePath
|
||||||
}
|
}
|
||||||
|
|
||||||
input := &dropbox.GetMetadataInput {
|
input := &dropbox.GetMetadataInput{
|
||||||
Path: storage.storageDir + filePath,
|
Path: storage.storageDir + filePath,
|
||||||
IncludeMediaInfo: false,
|
IncludeMediaInfo: false,
|
||||||
}
|
}
|
||||||
@@ -191,9 +192,9 @@ func (storage *DropboxStorage) FindChunk(threadIndex int, chunkID string, isFoss
|
|||||||
// The minimum level of directories to dive into before searching for the chunk file.
|
// The minimum level of directories to dive into before searching for the chunk file.
|
||||||
minimumLevel := 1
|
minimumLevel := 1
|
||||||
|
|
||||||
for level := 0; level * 2 < len(chunkID); level ++ {
|
for level := 0; level*2 < len(chunkID); level++ {
|
||||||
if level >= minimumLevel {
|
if level >= minimumLevel {
|
||||||
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
|
filePath = path.Join(dir, chunkID[2*level:]) + suffix
|
||||||
var size int64
|
var size int64
|
||||||
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -205,7 +206,7 @@ func (storage *DropboxStorage) FindChunk(threadIndex int, chunkID string, isFoss
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Find the subdirectory the chunk file may reside.
|
// Find the subdirectory the chunk file may reside.
|
||||||
subDir := path.Join(dir, chunkID[2 * level: 2 * level + 2])
|
subDir := path.Join(dir, chunkID[2*level:2*level+2])
|
||||||
exist, _, _, err = storage.GetFileInfo(threadIndex, subDir)
|
exist, _, _, err = storage.GetFileInfo(threadIndex, subDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
@@ -228,7 +229,7 @@ func (storage *DropboxStorage) FindChunk(threadIndex int, chunkID string, isFoss
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Teh chunk must be under this subdirectory but it doesn't exist.
|
// Teh chunk must be under this subdirectory but it doesn't exist.
|
||||||
return path.Join(dir, chunkID[2 * level:])[1:] + suffix, false, 0, nil
|
return path.Join(dir, chunkID[2*level:])[1:] + suffix, false, 0, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -245,7 +246,7 @@ func (storage *DropboxStorage) DownloadFile(threadIndex int, filePath string, ch
|
|||||||
filePath = "/" + filePath
|
filePath = "/" + filePath
|
||||||
}
|
}
|
||||||
|
|
||||||
input := &dropbox.DownloadInput {
|
input := &dropbox.DownloadInput{
|
||||||
Path: storage.storageDir + filePath,
|
Path: storage.storageDir + filePath,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -256,7 +257,7 @@ func (storage *DropboxStorage) DownloadFile(threadIndex int, filePath string, ch
|
|||||||
|
|
||||||
defer output.Body.Close()
|
defer output.Body.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit / len(storage.clients))
|
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.clients))
|
||||||
return err
|
return err
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -267,12 +268,12 @@ func (storage *DropboxStorage) UploadFile(threadIndex int, filePath string, cont
|
|||||||
filePath = "/" + filePath
|
filePath = "/" + filePath
|
||||||
}
|
}
|
||||||
|
|
||||||
input := &dropbox.UploadInput {
|
input := &dropbox.UploadInput{
|
||||||
Path: storage.storageDir + filePath,
|
Path: storage.storageDir + filePath,
|
||||||
Mode: dropbox.WriteModeOverwrite,
|
Mode: dropbox.WriteModeOverwrite,
|
||||||
AutoRename: false,
|
AutoRename: false,
|
||||||
Mute: true,
|
Mute: true,
|
||||||
Reader: CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.clients)),
|
Reader: CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.clients)),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = storage.clients[threadIndex].Upload(input)
|
_, err = storage.clients[threadIndex].Upload(input)
|
||||||
@@ -281,16 +282,16 @@ func (storage *DropboxStorage) UploadFile(threadIndex int, filePath string, cont
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *DropboxStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *DropboxStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *DropboxStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *DropboxStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *DropboxStorage) IsStrongConsistent() (bool) { return false }
|
func (storage *DropboxStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *DropboxStorage) IsFastListing() (bool) { return false }
|
func (storage *DropboxStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *DropboxStorage) EnableTestMode() {}
|
func (storage *DropboxStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -4,22 +4,20 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"fmt"
|
|
||||||
"path/filepath"
|
|
||||||
"io/ioutil"
|
|
||||||
"sort"
|
|
||||||
"regexp"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"strings"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
// This is the hidden directory in the repository for storing various files.
|
// This is the hidden directory in the repository for storing various files.
|
||||||
var DUPLICACY_DIRECTORY = ".duplicacy"
|
var DUPLICACY_DIRECTORY = ".duplicacy"
|
||||||
var DUPLICACY_FILE = ".duplicacy"
|
var DUPLICACY_FILE = ".duplicacy"
|
||||||
@@ -50,18 +48,18 @@ type Entry struct {
|
|||||||
// CreateEntry creates an entry from file properties.
|
// CreateEntry creates an entry from file properties.
|
||||||
func CreateEntry(path string, size int64, time int64, mode uint32) *Entry {
|
func CreateEntry(path string, size int64, time int64, mode uint32) *Entry {
|
||||||
|
|
||||||
if len(path) > 0 && path[len(path) - 1] != '/' && (mode & uint32(os.ModeDir)) != 0 {
|
if len(path) > 0 && path[len(path)-1] != '/' && (mode&uint32(os.ModeDir)) != 0 {
|
||||||
path += "/"
|
path += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Entry {
|
return &Entry{
|
||||||
Path : path,
|
Path: path,
|
||||||
Size : size,
|
Size: size,
|
||||||
Time : time,
|
Time: time,
|
||||||
Mode : mode,
|
Mode: mode,
|
||||||
|
|
||||||
UID : -1,
|
UID: -1,
|
||||||
GID : -1,
|
GID: -1,
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -72,15 +70,15 @@ func CreateEntryFromFileInfo(fileInfo os.FileInfo, directory string) *Entry {
|
|||||||
|
|
||||||
mode := fileInfo.Mode()
|
mode := fileInfo.Mode()
|
||||||
|
|
||||||
if mode & os.ModeDir != 0 && mode & os.ModeSymlink != 0 {
|
if mode&os.ModeDir != 0 && mode&os.ModeSymlink != 0 {
|
||||||
mode ^= os.ModeDir
|
mode ^= os.ModeDir
|
||||||
}
|
}
|
||||||
|
|
||||||
if path[len(path) - 1] != '/' && mode & os.ModeDir != 0 {
|
if path[len(path)-1] != '/' && mode&os.ModeDir != 0 {
|
||||||
path += "/"
|
path += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
entry := &Entry {
|
entry := &Entry{
|
||||||
Path: path,
|
Path: path,
|
||||||
Size: fileInfo.Size(),
|
Size: fileInfo.Size(),
|
||||||
Time: fileInfo.ModTime().Unix(),
|
Time: fileInfo.ModTime().Unix(),
|
||||||
@@ -95,14 +93,14 @@ func CreateEntryFromFileInfo(fileInfo os.FileInfo, directory string) *Entry {
|
|||||||
// CreateEntryFromJSON creates an entry from a json description.
|
// CreateEntryFromJSON creates an entry from a json description.
|
||||||
func (entry *Entry) UnmarshalJSON(description []byte) (err error) {
|
func (entry *Entry) UnmarshalJSON(description []byte) (err error) {
|
||||||
|
|
||||||
var object map[string]interface {}
|
var object map[string]interface{}
|
||||||
|
|
||||||
err = json.Unmarshal(description, &object)
|
err = json.Unmarshal(description, &object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var value interface {}
|
var value interface{}
|
||||||
var ok bool
|
var ok bool
|
||||||
|
|
||||||
if value, ok = object["name"]; ok {
|
if value, ok = object["name"]; ok {
|
||||||
@@ -171,7 +169,7 @@ func (entry *Entry) UnmarshalJSON(description []byte) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if value, ok = object["attributes"]; ok {
|
if value, ok = object["attributes"]; ok {
|
||||||
if attributes, ok := value.(map[string]interface {}); !ok {
|
if attributes, ok := value.(map[string]interface{}); !ok {
|
||||||
return fmt.Errorf("Attributes are invalid for file '%s' in the snapshot", entry.Path)
|
return fmt.Errorf("Attributes are invalid for file '%s' in the snapshot", entry.Path)
|
||||||
} else {
|
} else {
|
||||||
entry.Attributes = make(map[string][]byte)
|
entry.Attributes = make(map[string][]byte)
|
||||||
@@ -251,7 +249,7 @@ func (entry *Entry) convertToObject(encodeName bool) map[string]interface{} {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON returns the json description of an entry.
|
// MarshalJSON returns the json description of an entry.
|
||||||
func (entry *Entry) MarshalJSON() ([] byte, error) {
|
func (entry *Entry) MarshalJSON() ([]byte, error) {
|
||||||
|
|
||||||
object := entry.convertToObject(true)
|
object := entry.convertToObject(true)
|
||||||
description, err := json.Marshal(object)
|
description, err := json.Marshal(object)
|
||||||
@@ -259,15 +257,15 @@ func (entry *Entry) MarshalJSON() ([] byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) IsFile() bool {
|
func (entry *Entry) IsFile() bool {
|
||||||
return entry.Mode & uint32(os.ModeType) == 0
|
return entry.Mode&uint32(os.ModeType) == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) IsDir() bool {
|
func (entry *Entry) IsDir() bool {
|
||||||
return entry.Mode & uint32(os.ModeDir) != 0
|
return entry.Mode&uint32(os.ModeDir) != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) IsLink() bool {
|
func (entry *Entry) IsLink() bool {
|
||||||
return entry.Mode & uint32(os.ModeSymlink) != 0
|
return entry.Mode&uint32(os.ModeSymlink) != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) GetPermissions() os.FileMode {
|
func (entry *Entry) GetPermissions() os.FileMode {
|
||||||
@@ -275,12 +273,12 @@ func (entry *Entry) GetPermissions() os.FileMode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) IsSameAs(other *Entry) bool {
|
func (entry *Entry) IsSameAs(other *Entry) bool {
|
||||||
return entry.Size == other.Size && entry.Time <= other.Time + 1 && entry.Time >= other.Time - 1
|
return entry.Size == other.Size && entry.Time <= other.Time+1 && entry.Time >= other.Time-1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) IsSameAsFileInfo(other os.FileInfo) bool {
|
func (entry *Entry) IsSameAsFileInfo(other os.FileInfo) bool {
|
||||||
time := other.ModTime().Unix()
|
time := other.ModTime().Unix()
|
||||||
return entry.Size == other.Size() && entry.Time <= time + 1 && entry.Time >= time - 1
|
return entry.Size == other.Size() && entry.Time <= time+1 && entry.Time >= time-1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) String(maxSizeDigits int) string {
|
func (entry *Entry) String(maxSizeDigits int) string {
|
||||||
@@ -299,7 +297,7 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo) bool
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*fileInfo).Mode() & os.ModePerm != entry.GetPermissions() {
|
if (*fileInfo).Mode()&os.ModePerm != entry.GetPermissions() {
|
||||||
err := os.Chmod(fullPath, entry.GetPermissions())
|
err := os.Chmod(fullPath, entry.GetPermissions())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("RESTORE_CHMOD", "Failed to set the file permissions: %v", err)
|
LOG_ERROR("RESTORE_CHMOD", "Failed to set the file permissions: %v", err)
|
||||||
@@ -323,7 +321,6 @@ func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo) bool
|
|||||||
return SetOwner(fullPath, entry, fileInfo)
|
return SetOwner(fullPath, entry, fileInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Return -1 if 'left' should appear before 'right', 1 if opposite, and 0 if they are the same.
|
// Return -1 if 'left' should appear before 'right', 1 if opposite, and 0 if they are the same.
|
||||||
// Files are always arranged before subdirectories under the same parent directory.
|
// Files are always arranged before subdirectories under the same parent directory.
|
||||||
func (left *Entry) Compare(right *Entry) int {
|
func (left *Entry) Compare(right *Entry) int {
|
||||||
@@ -417,14 +414,14 @@ func (files FileInfoCompare) Less(i, j int) bool {
|
|||||||
left := files[i]
|
left := files[i]
|
||||||
right := files[j]
|
right := files[j]
|
||||||
|
|
||||||
if left.IsDir() && left.Mode() & os.ModeSymlink == 0 {
|
if left.IsDir() && left.Mode()&os.ModeSymlink == 0 {
|
||||||
if right.IsDir() && right.Mode() & os.ModeSymlink == 0 {
|
if right.IsDir() && right.Mode()&os.ModeSymlink == 0 {
|
||||||
return left.Name() < right.Name()
|
return left.Name() < right.Name()
|
||||||
} else {
|
} else {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if right.IsDir() && right.Mode() & os.ModeSymlink == 0 {
|
if right.IsDir() && right.Mode()&os.ModeSymlink == 0 {
|
||||||
return true
|
return true
|
||||||
} else {
|
} else {
|
||||||
return left.Name() < right.Name()
|
return left.Name() < right.Name()
|
||||||
@@ -434,8 +431,8 @@ func (files FileInfoCompare) Less(i, j int) bool {
|
|||||||
|
|
||||||
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths
|
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths
|
||||||
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files.
|
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files.
|
||||||
func ListEntries(top string, path string, fileList *[]*Entry, patterns [] string, discardAttributes bool) (directoryList []*Entry,
|
func ListEntries(top string, path string, fileList *[]*Entry, patterns []string, discardAttributes bool) (directoryList []*Entry,
|
||||||
skippedFiles [] string, err error) {
|
skippedFiles []string, err error) {
|
||||||
|
|
||||||
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
|
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
|
||||||
|
|
||||||
@@ -449,12 +446,12 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns [] string
|
|||||||
}
|
}
|
||||||
|
|
||||||
normalizedPath := path
|
normalizedPath := path
|
||||||
if len(normalizedPath) > 0 && normalizedPath[len(normalizedPath) - 1] != '/' {
|
if len(normalizedPath) > 0 && normalizedPath[len(normalizedPath)-1] != '/' {
|
||||||
normalizedPath += "/"
|
normalizedPath += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
normalizedTop := top
|
normalizedTop := top
|
||||||
if normalizedTop != "" && normalizedTop[len(normalizedTop) - 1] != '/' {
|
if normalizedTop != "" && normalizedTop[len(normalizedTop)-1] != '/' {
|
||||||
normalizedTop += "/"
|
normalizedTop += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -475,7 +472,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns [] string
|
|||||||
isRegular := false
|
isRegular := false
|
||||||
isRegular, entry.Link, err = Readlink(filepath.Join(top, entry.Path))
|
isRegular, entry.Link, err = Readlink(filepath.Join(top, entry.Path))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("LIST_LINK", "Failed to read the symlink %s: %v", entry.Path, err )
|
LOG_WARN("LIST_LINK", "Failed to read the symlink %s: %v", entry.Path, err)
|
||||||
skippedFiles = append(skippedFiles, entry.Path)
|
skippedFiles = append(skippedFiles, entry.Path)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -485,7 +482,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns [] string
|
|||||||
} else if path == "" && filepath.IsAbs(entry.Link) && !strings.HasPrefix(entry.Link, normalizedTop) {
|
} else if path == "" && filepath.IsAbs(entry.Link) && !strings.HasPrefix(entry.Link, normalizedTop) {
|
||||||
stat, err := os.Stat(filepath.Join(top, entry.Path))
|
stat, err := os.Stat(filepath.Join(top, entry.Path))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err )
|
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err)
|
||||||
skippedFiles = append(skippedFiles, entry.Path)
|
skippedFiles = append(skippedFiles, entry.Path)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -504,7 +501,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns [] string
|
|||||||
entry.ReadAttributes(top)
|
entry.ReadAttributes(top)
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.Mode() & (os.ModeNamedPipe | os.ModeSocket | os.ModeDevice) != 0 {
|
if f.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||||
LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
|
LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
|
||||||
skippedFiles = append(skippedFiles, entry.Path)
|
skippedFiles = append(skippedFiles, entry.Path)
|
||||||
continue
|
continue
|
||||||
@@ -526,7 +523,7 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns [] string
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, j := 0, len(directoryList) - 1; i < j; i, j = i + 1, j - 1 {
|
for i, j := 0, len(directoryList)-1; i < j; i, j = i+1, j-1 {
|
||||||
directoryList[i], directoryList[j] = directoryList[j], directoryList[i]
|
directoryList[i], directoryList[j] = directoryList[j], directoryList[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -534,8 +531,8 @@ func ListEntries(top string, path string, fileList *[]*Entry, patterns [] string
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Diff returns how many bytes remain unmodifiled between two files.
|
// Diff returns how many bytes remain unmodifiled between two files.
|
||||||
func (entry *Entry) Diff(chunkHashes[]string, chunkLengths[]int,
|
func (entry *Entry) Diff(chunkHashes []string, chunkLengths []int,
|
||||||
otherHashes[]string, otherLengths [] int) (modifiedLength int64) {
|
otherHashes []string, otherLengths []int) (modifiedLength int64) {
|
||||||
|
|
||||||
var offset1, offset2 int64
|
var offset1, offset2 int64
|
||||||
i1 := entry.StartChunk
|
i1 := entry.StartChunk
|
||||||
@@ -559,7 +556,7 @@ func (entry *Entry) Diff(chunkHashes[]string, chunkLengths[]int,
|
|||||||
offset2 += int64(otherLengths[i2])
|
offset2 += int64(otherLengths[i2])
|
||||||
i2++
|
i2++
|
||||||
} else {
|
} else {
|
||||||
if chunkHashes[i1] == otherHashes[i2] && end - start == otherLengths[i2] {
|
if chunkHashes[i1] == otherHashes[i2] && end-start == otherLengths[i2] {
|
||||||
} else {
|
} else {
|
||||||
modifiedLength += int64(chunkLengths[i1])
|
modifiedLength += int64(chunkLengths[i1])
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,17 +5,17 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"math/rand"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestEntrySort(t *testing.T) {
|
func TestEntrySort(t *testing.T) {
|
||||||
|
|
||||||
DATA := [...]string {
|
DATA := [...]string{
|
||||||
"ab",
|
"ab",
|
||||||
"ab-",
|
"ab-",
|
||||||
"ab0",
|
"ab0",
|
||||||
@@ -44,15 +44,15 @@ func TestEntrySort(t *testing.T) {
|
|||||||
var entry1, entry2 *Entry
|
var entry1, entry2 *Entry
|
||||||
|
|
||||||
for i, p1 := range DATA {
|
for i, p1 := range DATA {
|
||||||
if p1[len(p1) - 1] == '/' {
|
if p1[len(p1)-1] == '/' {
|
||||||
entry1 = CreateEntry(p1, 0, 0, 0700 | uint32(os.ModeDir))
|
entry1 = CreateEntry(p1, 0, 0, 0700|uint32(os.ModeDir))
|
||||||
} else {
|
} else {
|
||||||
entry1 = CreateEntry(p1, 0, 0, 0700)
|
entry1 = CreateEntry(p1, 0, 0, 0700)
|
||||||
}
|
}
|
||||||
for j, p2 := range DATA {
|
for j, p2 := range DATA {
|
||||||
|
|
||||||
if p2[len(p2) - 1] == '/' {
|
if p2[len(p2)-1] == '/' {
|
||||||
entry2 = CreateEntry(p2, 0, 0, 0700 | uint32(os.ModeDir))
|
entry2 = CreateEntry(p2, 0, 0, 0700|uint32(os.ModeDir))
|
||||||
} else {
|
} else {
|
||||||
entry2 = CreateEntry(p2, 0, 0, 0700)
|
entry2 = CreateEntry(p2, 0, 0, 0700)
|
||||||
}
|
}
|
||||||
@@ -88,7 +88,7 @@ func TestEntryList(t *testing.T) {
|
|||||||
os.RemoveAll(testDir)
|
os.RemoveAll(testDir)
|
||||||
os.MkdirAll(testDir, 0700)
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
DATA := [...]string {
|
DATA := [...]string{
|
||||||
"ab",
|
"ab",
|
||||||
"ab-",
|
"ab-",
|
||||||
"ab0",
|
"ab0",
|
||||||
@@ -107,19 +107,18 @@ func TestEntryList(t *testing.T) {
|
|||||||
"ab3/c",
|
"ab3/c",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
var entry1, entry2 *Entry
|
var entry1, entry2 *Entry
|
||||||
|
|
||||||
for i, p1 := range DATA {
|
for i, p1 := range DATA {
|
||||||
if p1[len(p1) - 1] == '/' {
|
if p1[len(p1)-1] == '/' {
|
||||||
entry1 = CreateEntry(p1, 0, 0, 0700 | uint32(os.ModeDir))
|
entry1 = CreateEntry(p1, 0, 0, 0700|uint32(os.ModeDir))
|
||||||
} else {
|
} else {
|
||||||
entry1 = CreateEntry(p1, 0, 0, 0700)
|
entry1 = CreateEntry(p1, 0, 0, 0700)
|
||||||
}
|
}
|
||||||
for j, p2 := range DATA {
|
for j, p2 := range DATA {
|
||||||
|
|
||||||
if p2[len(p2) - 1] == '/' {
|
if p2[len(p2)-1] == '/' {
|
||||||
entry2 = CreateEntry(p2, 0, 0, 0700 | uint32(os.ModeDir))
|
entry2 = CreateEntry(p2, 0, 0, 0700|uint32(os.ModeDir))
|
||||||
} else {
|
} else {
|
||||||
entry2 = CreateEntry(p2, 0, 0, 0700)
|
entry2 = CreateEntry(p2, 0, 0, 0700)
|
||||||
}
|
}
|
||||||
@@ -151,7 +150,7 @@ func TestEntryList(t *testing.T) {
|
|||||||
for _, file := range DATA {
|
for _, file := range DATA {
|
||||||
|
|
||||||
fullPath := filepath.Join(testDir, file)
|
fullPath := filepath.Join(testDir, file)
|
||||||
if file[len(file) - 1] == '/' {
|
if file[len(file)-1] == '/' {
|
||||||
err := os.Mkdir(fullPath, 0700)
|
err := os.Mkdir(fullPath, 0700)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Mkdir(%s) returned an error: %s", fullPath, err)
|
t.Errorf("Mkdir(%s) returned an error: %s", fullPath, err)
|
||||||
@@ -171,8 +170,8 @@ func TestEntryList(t *testing.T) {
|
|||||||
entries := make([]*Entry, 0, 4)
|
entries := make([]*Entry, 0, 4)
|
||||||
|
|
||||||
for len(directories) > 0 {
|
for len(directories) > 0 {
|
||||||
directory := directories[len(directories) - 1]
|
directory := directories[len(directories)-1]
|
||||||
directories = directories[:len(directories) - 1]
|
directories = directories[:len(directories)-1]
|
||||||
entries = append(entries, directory)
|
entries = append(entries, directory)
|
||||||
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, false)
|
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -217,4 +216,3 @@ func TestEntryList(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,19 +11,19 @@ import (
|
|||||||
// FileReader wraps a number of files and turns them into a series of readers.
|
// FileReader wraps a number of files and turns them into a series of readers.
|
||||||
type FileReader struct {
|
type FileReader struct {
|
||||||
top string
|
top string
|
||||||
files [] *Entry
|
files []*Entry
|
||||||
|
|
||||||
CurrentFile *os.File
|
CurrentFile *os.File
|
||||||
CurrentIndex int
|
CurrentIndex int
|
||||||
CurrentEntry *Entry
|
CurrentEntry *Entry
|
||||||
|
|
||||||
SkippedFiles [] string
|
SkippedFiles []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateFileReader creates a file reader.
|
// CreateFileReader creates a file reader.
|
||||||
func CreateFileReader(top string, files[] *Entry) (*FileReader) {
|
func CreateFileReader(top string, files []*Entry) *FileReader {
|
||||||
|
|
||||||
reader := &FileReader {
|
reader := &FileReader{
|
||||||
top: top,
|
top: top,
|
||||||
files: files,
|
files: files,
|
||||||
CurrentIndex: -1,
|
CurrentIndex: -1,
|
||||||
@@ -35,7 +35,7 @@ func CreateFileReader(top string, files[] *Entry) (*FileReader) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NextFile switchs to the next file in the file reader.
|
// NextFile switchs to the next file in the file reader.
|
||||||
func (reader *FileReader) NextFile() bool{
|
func (reader *FileReader) NextFile() bool {
|
||||||
|
|
||||||
if reader.CurrentFile != nil {
|
if reader.CurrentFile != nil {
|
||||||
reader.CurrentFile.Close()
|
reader.CurrentFile.Close()
|
||||||
@@ -68,7 +68,3 @@ func (reader *FileReader) NextFile() bool{
|
|||||||
reader.CurrentFile = nil
|
reader.CurrentFile = nil
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,13 +5,13 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"time"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FileStorage is a local on-disk file storage implementing the Storage interface.
|
// FileStorage is a local on-disk file storage implementing the Storage interface.
|
||||||
@@ -45,12 +45,12 @@ func CreateFileStorage(storageDir string, minimumLevel int, isCacheNeeded bool,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for storageDir[len(storageDir) - 1] == '/' {
|
for storageDir[len(storageDir)-1] == '/' {
|
||||||
storageDir = storageDir[:len(storageDir) - 1]
|
storageDir = storageDir[:len(storageDir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &FileStorage {
|
storage = &FileStorage{
|
||||||
storageDir : storageDir,
|
storageDir: storageDir,
|
||||||
minimumLevel: minimumLevel,
|
minimumLevel: minimumLevel,
|
||||||
isCacheNeeded: isCacheNeeded,
|
isCacheNeeded: isCacheNeeded,
|
||||||
numberOfThreads: threads,
|
numberOfThreads: threads,
|
||||||
@@ -77,7 +77,7 @@ func (storage *FileStorage) ListFiles(threadIndex int, dir string) (files []stri
|
|||||||
|
|
||||||
for _, f := range list {
|
for _, f := range list {
|
||||||
name := f.Name()
|
name := f.Name()
|
||||||
if f.IsDir() && name[len(name) - 1] != '/' {
|
if f.IsDir() && name[len(name)-1] != '/' {
|
||||||
name += "/"
|
name += "/"
|
||||||
}
|
}
|
||||||
files = append(files, name)
|
files = append(files, name)
|
||||||
@@ -136,23 +136,23 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
|||||||
suffix = ".fsl"
|
suffix = ".fsl"
|
||||||
}
|
}
|
||||||
|
|
||||||
for level := 0; level * 2 < len(chunkID); level ++ {
|
for level := 0; level*2 < len(chunkID); level++ {
|
||||||
if level >= storage.minimumLevel {
|
if level >= storage.minimumLevel {
|
||||||
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
|
filePath = path.Join(dir, chunkID[2*level:]) + suffix
|
||||||
// Use Lstat() instead of Stat() since 1) Stat() doesn't work for deduplicated disks on Windows and 2) there isn't
|
// Use Lstat() instead of Stat() since 1) Stat() doesn't work for deduplicated disks on Windows and 2) there isn't
|
||||||
// really a need to follow the link if filePath is a link.
|
// really a need to follow the link if filePath is a link.
|
||||||
stat, err := os.Lstat(filePath)
|
stat, err := os.Lstat(filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("FS_FIND", "File %s can't be found: %v", filePath, err)
|
LOG_DEBUG("FS_FIND", "File %s can't be found: %v", filePath, err)
|
||||||
} else if stat.IsDir() {
|
} else if stat.IsDir() {
|
||||||
return filePath[len(storage.storageDir) + 1:], false, 0, fmt.Errorf("The path %s is a directory", filePath)
|
return filePath[len(storage.storageDir)+1:], false, 0, fmt.Errorf("The path %s is a directory", filePath)
|
||||||
} else {
|
} else {
|
||||||
return filePath[len(storage.storageDir) + 1:], true, stat.Size(), nil
|
return filePath[len(storage.storageDir)+1:], true, stat.Size(), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find the subdirectory the chunk file may reside.
|
// Find the subdirectory the chunk file may reside.
|
||||||
subDir := path.Join(dir, chunkID[2 * level: 2 * level + 2])
|
subDir := path.Join(dir, chunkID[2*level:2*level+2])
|
||||||
stat, err := os.Stat(subDir)
|
stat, err := os.Stat(subDir)
|
||||||
if err == nil && stat.IsDir() {
|
if err == nil && stat.IsDir() {
|
||||||
dir = subDir
|
dir = subDir
|
||||||
@@ -179,7 +179,7 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The chunk must be under this subdirectory but it doesn't exist.
|
// The chunk must be under this subdirectory but it doesn't exist.
|
||||||
return path.Join(dir, chunkID[2 * level:])[len(storage.storageDir) + 1:] + suffix, false, 0, nil
|
return path.Join(dir, chunkID[2*level:])[len(storage.storageDir)+1:] + suffix, false, 0, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -197,7 +197,7 @@ func (storage *FileStorage) DownloadFile(threadIndex int, filePath string, chunk
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit / storage.numberOfThreads); err != nil {
|
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -218,12 +218,12 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
|
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
|
||||||
|
|
||||||
file, err := os.OpenFile(temporaryFile, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0644)
|
file, err := os.OpenFile(temporaryFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / storage.numberOfThreads)
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
_, err = io.Copy(file, reader)
|
_, err = io.Copy(file, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
file.Close()
|
file.Close()
|
||||||
@@ -248,16 +248,16 @@ func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *FileStorage) IsCacheNeeded () (bool) { return storage.isCacheNeeded }
|
func (storage *FileStorage) IsCacheNeeded() bool { return storage.isCacheNeeded }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *FileStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *FileStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *FileStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *FileStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *FileStorage) IsFastListing() (bool) { return false }
|
func (storage *FileStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *FileStorage) EnableTestMode() {}
|
func (storage *FileStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -5,18 +5,18 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"io"
|
||||||
"path"
|
|
||||||
"time"
|
|
||||||
"sync"
|
|
||||||
"strings"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"encoding/json"
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"path"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
@@ -35,7 +35,6 @@ type GCDStorage struct {
|
|||||||
isConnected bool
|
isConnected bool
|
||||||
numberOfThreads int
|
numberOfThreads int
|
||||||
TestMode bool
|
TestMode bool
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type GCDConfig struct {
|
type GCDConfig struct {
|
||||||
@@ -96,9 +95,9 @@ func (storage *GCDStorage) shouldRetry(threadIndex int, err error) (bool, error)
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *GCDStorage) convertFilePath(filePath string) (string) {
|
func (storage *GCDStorage) convertFilePath(filePath string) string {
|
||||||
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
||||||
return "fossils/" + filePath[len("chunks/"):len(filePath) - len(".fsl")]
|
return "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
||||||
}
|
}
|
||||||
return filePath
|
return filePath
|
||||||
}
|
}
|
||||||
@@ -135,7 +134,7 @@ func (storage *GCDStorage) listFiles(threadIndex int, parentID string, listFiles
|
|||||||
return nil, fmt.Errorf("No parent ID provided")
|
return nil, fmt.Errorf("No parent ID provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
files := []*drive.File {}
|
files := []*drive.File{}
|
||||||
|
|
||||||
startToken := ""
|
startToken := ""
|
||||||
|
|
||||||
@@ -174,7 +173,6 @@ func (storage *GCDStorage) listFiles(threadIndex int, parentID string, listFiles
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
return files, nil
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,7 +236,7 @@ func (storage *GCDStorage) getIDFromPath(threadIndex int, path string) (string,
|
|||||||
if fileID == "" {
|
if fileID == "" {
|
||||||
return "", fmt.Errorf("Path %s doesn't exist", path)
|
return "", fmt.Errorf("Path %s doesn't exist", path)
|
||||||
}
|
}
|
||||||
if i != len(names) - 1 && !isDir {
|
if i != len(names)-1 && !isDir {
|
||||||
return "", fmt.Errorf("Invalid path %s", path)
|
return "", fmt.Errorf("Invalid path %s", path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -253,7 +251,7 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
gcdConfig := &GCDConfig {}
|
gcdConfig := &GCDConfig{}
|
||||||
if err := json.Unmarshal(description, gcdConfig); err != nil {
|
if err := json.Unmarshal(description, gcdConfig); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -271,7 +269,7 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &GCDStorage {
|
storage = &GCDStorage{
|
||||||
service: service,
|
service: service,
|
||||||
numberOfThreads: threads,
|
numberOfThreads: threads,
|
||||||
idCache: make(map[string]string),
|
idCache: make(map[string]string),
|
||||||
@@ -286,7 +284,7 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
|||||||
|
|
||||||
storage.idCache[""] = storagePathID
|
storage.idCache[""] = storagePathID
|
||||||
|
|
||||||
for _, dir := range []string { "chunks", "snapshots", "fossils" } {
|
for _, dir := range []string{"chunks", "snapshots", "fossils"} {
|
||||||
dirID, isDir, _, err := storage.listByName(0, storagePathID, dir)
|
dirID, isDir, _, err := storage.listByName(0, storagePathID, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -297,7 +295,7 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else if !isDir {
|
} else if !isDir {
|
||||||
return nil, fmt.Errorf("%s/%s is not a directory", storagePath + "/" + dir)
|
return nil, fmt.Errorf("%s/%s is not a directory", storagePath+"/"+dir)
|
||||||
} else {
|
} else {
|
||||||
storage.idCache[dir] = dirID
|
storage.idCache[dir] = dirID
|
||||||
}
|
}
|
||||||
@@ -311,8 +309,8 @@ func CreateGCDStorage(tokenFile string, storagePath string, threads int) (storag
|
|||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "snapshots" {
|
if dir == "snapshots" {
|
||||||
@@ -325,8 +323,8 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
subDirs := []string{}
|
subDirs := []string{}
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
storage.savePathID("snapshots/" + file.Name, file.Id)
|
storage.savePathID("snapshots/"+file.Name, file.Id)
|
||||||
subDirs = append(subDirs, file.Name + "/")
|
subDirs = append(subDirs, file.Name+"/")
|
||||||
}
|
}
|
||||||
return subDirs, nil, nil
|
return subDirs, nil, nil
|
||||||
} else if strings.HasPrefix(dir, "snapshots/") {
|
} else if strings.HasPrefix(dir, "snapshots/") {
|
||||||
@@ -343,7 +341,7 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
files := []string{}
|
files := []string{}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
storage.savePathID(dir + "/" + entry.Name, entry.Id)
|
storage.savePathID(dir+"/"+entry.Name, entry.Id)
|
||||||
files = append(files, entry.Name)
|
files = append(files, entry.Name)
|
||||||
}
|
}
|
||||||
return files, nil, nil
|
return files, nil, nil
|
||||||
@@ -351,7 +349,7 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
files := []string{}
|
files := []string{}
|
||||||
sizes := []int64{}
|
sizes := []int64{}
|
||||||
|
|
||||||
for _, parent := range []string { "chunks", "fossils" } {
|
for _, parent := range []string{"chunks", "fossils"} {
|
||||||
entries, err := storage.listFiles(threadIndex, storage.getPathID(parent), true)
|
entries, err := storage.listFiles(threadIndex, storage.getPathID(parent), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@@ -362,7 +360,7 @@ func (storage *GCDStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
if parent == "fossils" {
|
if parent == "fossils" {
|
||||||
name += ".fsl"
|
name += ".fsl"
|
||||||
}
|
}
|
||||||
storage.savePathID(parent + "/" + entry.Name, entry.Id)
|
storage.savePathID(parent+"/"+entry.Name, entry.Id)
|
||||||
files = append(files, name)
|
files = append(files, name)
|
||||||
sizes = append(sizes, entry.Size)
|
sizes = append(sizes, entry.Size)
|
||||||
}
|
}
|
||||||
@@ -438,8 +436,8 @@ func (storage *GCDStorage) MoveFile(threadIndex int, from string, to string) (er
|
|||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
exist, isDir, _, err := storage.GetFileInfo(threadIndex, dir)
|
exist, isDir, _, err := storage.GetFileInfo(threadIndex, dir)
|
||||||
@@ -462,10 +460,10 @@ func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err err
|
|||||||
name = dir[len("snapshots/"):]
|
name = dir[len("snapshots/"):]
|
||||||
}
|
}
|
||||||
|
|
||||||
file := &drive.File {
|
file := &drive.File{
|
||||||
Name: name,
|
Name: name,
|
||||||
MimeType: "application/vnd.google-apps.folder",
|
MimeType: "application/vnd.google-apps.folder",
|
||||||
Parents: []string { parentID },
|
Parents: []string{parentID},
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
@@ -485,8 +483,8 @@ func (storage *GCDStorage) CreateDirectory(threadIndex int, dir string) (err err
|
|||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *GCDStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *GCDStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
for len(filePath) > 0 && filePath[len(filePath) - 1] == '/' {
|
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
|
||||||
filePath = filePath[:len(filePath) - 1]
|
filePath = filePath[:len(filePath)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFileInfo is never called on a fossil
|
// GetFileInfo is never called on a fossil
|
||||||
@@ -568,7 +566,7 @@ func (storage *GCDStorage) DownloadFile(threadIndex int, filePath string, chunk
|
|||||||
|
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, response.Body, storage.DownloadRateLimit / storage.numberOfThreads)
|
_, err = RateLimitedCopy(chunk, response.Body, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -591,14 +589,14 @@ func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
storage.savePathID(parent, parentID)
|
storage.savePathID(parent, parentID)
|
||||||
}
|
}
|
||||||
|
|
||||||
file := &drive.File {
|
file := &drive.File{
|
||||||
Name: path.Base(filePath),
|
Name: path.Base(filePath),
|
||||||
MimeType: "application/octet-stream",
|
MimeType: "application/octet-stream",
|
||||||
Parents: []string { parentID },
|
Parents: []string{parentID},
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / storage.numberOfThreads)
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
_, err = storage.service.Files.Create(file).Media(reader).Fields("id").Do()
|
_, err = storage.service.Files.Create(file).Media(reader).Fields("id").Do()
|
||||||
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
if retry, err := storage.shouldRetry(threadIndex, err); err == nil && !retry {
|
||||||
break
|
break
|
||||||
@@ -614,16 +612,16 @@ func (storage *GCDStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *GCDStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *GCDStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *GCDStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *GCDStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *GCDStorage) IsStrongConsistent() (bool) { return false }
|
func (storage *GCDStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *GCDStorage) IsFastListing() (bool) { return true }
|
func (storage *GCDStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *GCDStorage) EnableTestMode() { storage.TestMode = true }
|
func (storage *GCDStorage) EnableTestMode() { storage.TestMode = true }
|
||||||
|
|||||||
@@ -5,22 +5,22 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
"net/url"
|
|
||||||
"math/rand"
|
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
gcs "cloud.google.com/go/storage"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
"golang.org/x/oauth2/google"
|
"golang.org/x/oauth2/google"
|
||||||
gcs "cloud.google.com/go/storage"
|
"google.golang.org/api/googleapi"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type GCSStorage struct {
|
type GCSStorage struct {
|
||||||
@@ -31,7 +31,6 @@ type GCSStorage struct {
|
|||||||
|
|
||||||
numberOfThreads int
|
numberOfThreads int
|
||||||
TestMode bool
|
TestMode bool
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type GCSConfig struct {
|
type GCSConfig struct {
|
||||||
@@ -51,7 +50,7 @@ func CreateGCSStorage(tokenFile string, bucketName string, storageDir string, th
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var object map[string]interface {}
|
var object map[string]interface{}
|
||||||
|
|
||||||
err = json.Unmarshal(description, &object)
|
err = json.Unmarshal(description, &object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -74,7 +73,7 @@ func CreateGCSStorage(tokenFile string, bucketName string, storageDir string, th
|
|||||||
}
|
}
|
||||||
tokenSource = config.TokenSource(ctx)
|
tokenSource = config.TokenSource(ctx)
|
||||||
} else {
|
} else {
|
||||||
gcsConfig := &GCSConfig {}
|
gcsConfig := &GCSConfig{}
|
||||||
if err := json.Unmarshal(description, gcsConfig); err != nil {
|
if err := json.Unmarshal(description, gcsConfig); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -92,11 +91,11 @@ func CreateGCSStorage(tokenFile string, bucketName string, storageDir string, th
|
|||||||
|
|
||||||
bucket := client.Bucket(bucketName)
|
bucket := client.Bucket(bucketName)
|
||||||
|
|
||||||
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
|
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||||
storageDir += "/"
|
storageDir += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &GCSStorage {
|
storage = &GCSStorage{
|
||||||
bucket: bucket,
|
bucket: bucket,
|
||||||
storageDir: storageDir,
|
storageDir: storageDir,
|
||||||
numberOfThreads: threads,
|
numberOfThreads: threads,
|
||||||
@@ -149,14 +148,13 @@ func (storage *GCSStorage) shouldRetry(backoff *int, err error) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *GCSStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
func (storage *GCSStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
query := gcs.Query {
|
query := gcs.Query{
|
||||||
Prefix: storage.storageDir + dir + "/",
|
Prefix: storage.storageDir + dir + "/",
|
||||||
}
|
}
|
||||||
dirOnly := false
|
dirOnly := false
|
||||||
@@ -260,7 +258,7 @@ func (storage *GCSStorage) DownloadFile(threadIndex int, filePath string, chunk
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / storage.numberOfThreads)
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -271,7 +269,7 @@ func (storage *GCSStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
for {
|
for {
|
||||||
writeCloser := storage.bucket.Object(storage.storageDir + filePath).NewWriter(context.Background())
|
writeCloser := storage.bucket.Object(storage.storageDir + filePath).NewWriter(context.Background())
|
||||||
defer writeCloser.Close()
|
defer writeCloser.Close()
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / storage.numberOfThreads)
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
_, err = io.Copy(writeCloser, reader)
|
_, err = io.Copy(writeCloser, reader)
|
||||||
|
|
||||||
if retry, e := storage.shouldRetry(&backoff, err); e == nil && !retry {
|
if retry, e := storage.shouldRetry(&backoff, err); e == nil && !retry {
|
||||||
@@ -288,16 +286,16 @@ func (storage *GCSStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *GCSStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *GCSStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *GCSStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *GCSStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *GCSStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *GCSStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *GCSStorage) IsFastListing() (bool) { return true }
|
func (storage *GCSStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *GCSStorage) EnableTestMode() { storage.TestMode = true }
|
func (storage *GCSStorage) EnableTestMode() { storage.TestMode = true }
|
||||||
|
|||||||
@@ -5,18 +5,18 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"time"
|
|
||||||
"sync"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"strings"
|
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
net_url "net/url"
|
net_url "net/url"
|
||||||
"math/rand"
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
@@ -65,8 +65,8 @@ func NewHubicClient(tokenFile string) (*HubicClient, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
client := &HubicClient{
|
client := &HubicClient{
|
||||||
HTTPClient: &http.Client {
|
HTTPClient: &http.Client{
|
||||||
Transport: &http.Transport {
|
Transport: &http.Transport{
|
||||||
Dial: (&net.Dialer{
|
Dial: (&net.Dialer{
|
||||||
Timeout: 30 * time.Second,
|
Timeout: 30 * time.Second,
|
||||||
KeepAlive: 30 * time.Second,
|
KeepAlive: 30 * time.Second,
|
||||||
@@ -137,7 +137,7 @@ func (client *HubicClient) call(url string, method string, input interface{}, ex
|
|||||||
|
|
||||||
if url == HubicCredentialURL {
|
if url == HubicCredentialURL {
|
||||||
client.TokenLock.Lock()
|
client.TokenLock.Lock()
|
||||||
request.Header.Set("Authorization", "Bearer " + client.Token.AccessToken)
|
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
|
||||||
client.TokenLock.Unlock()
|
client.TokenLock.Unlock()
|
||||||
} else if url != HubicRefreshTokenURL {
|
} else if url != HubicRefreshTokenURL {
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
@@ -172,11 +172,11 @@ func (client *HubicClient) call(url string, method string, input interface{}, ex
|
|||||||
if response.StatusCode == 401 {
|
if response.StatusCode == 401 {
|
||||||
|
|
||||||
if url == HubicRefreshTokenURL {
|
if url == HubicRefreshTokenURL {
|
||||||
return nil, 0, "", HubicError { Status: response.StatusCode, Message: "Authorization error when refreshing token"}
|
return nil, 0, "", HubicError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
|
||||||
}
|
}
|
||||||
|
|
||||||
if url == HubicCredentialURL {
|
if url == HubicCredentialURL {
|
||||||
return nil, 0, "", HubicError { Status: response.StatusCode, Message: "Authorization error when retrieving credentials"}
|
return nil, 0, "", HubicError{Status: response.StatusCode, Message: "Authorization error when retrieving credentials"}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.RefreshToken()
|
err = client.RefreshToken()
|
||||||
@@ -196,7 +196,7 @@ func (client *HubicClient) call(url string, method string, input interface{}, ex
|
|||||||
backoff *= 2
|
backoff *= 2
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
return nil, 0, "", HubicError { Status: response.StatusCode, Message: "Hubic API error"}
|
return nil, 0, "", HubicError{Status: response.StatusCode, Message: "Hubic API error"}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -264,7 +264,7 @@ type HubicEntry struct {
|
|||||||
|
|
||||||
func (client *HubicClient) ListEntries(path string) ([]HubicEntry, error) {
|
func (client *HubicClient) ListEntries(path string) ([]HubicEntry, error) {
|
||||||
|
|
||||||
if len(path) > 0 && path[len(path) - 1] != '/' {
|
if len(path) > 0 && path[len(path)-1] != '/' {
|
||||||
path += "/"
|
path += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -308,8 +308,8 @@ func (client *HubicClient) ListEntries(path string) ([]HubicEntry, error) {
|
|||||||
marker = entry.Name
|
marker = entry.Name
|
||||||
} else {
|
} else {
|
||||||
marker = entry.Subdir
|
marker = entry.Subdir
|
||||||
for len(entry.Subdir) > 0 && entry.Subdir[len(entry.Subdir) - 1] == '/' {
|
for len(entry.Subdir) > 0 && entry.Subdir[len(entry.Subdir)-1] == '/' {
|
||||||
entry.Subdir = entry.Subdir[:len(entry.Subdir) - 1]
|
entry.Subdir = entry.Subdir[:len(entry.Subdir)-1]
|
||||||
}
|
}
|
||||||
entry.Name = entry.Subdir
|
entry.Name = entry.Subdir
|
||||||
entry.Type = "application/directory"
|
entry.Type = "application/directory"
|
||||||
@@ -329,8 +329,8 @@ func (client *HubicClient) ListEntries(path string) ([]HubicEntry, error) {
|
|||||||
|
|
||||||
func (client *HubicClient) GetFileInfo(path string) (bool, bool, int64, error) {
|
func (client *HubicClient) GetFileInfo(path string) (bool, bool, int64, error) {
|
||||||
|
|
||||||
for len(path) > 0 && path[len(path) - 1] == '/' {
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
path = path[:len(path) - 1]
|
path = path[:len(path)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
@@ -353,8 +353,8 @@ func (client *HubicClient) GetFileInfo(path string) (bool, bool, int64, error) {
|
|||||||
|
|
||||||
func (client *HubicClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
|
func (client *HubicClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
for len(path) > 0 && path[len(path) - 1] == '/' {
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
path = path[:len(path) - 1]
|
path = path[:len(path)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
@@ -367,8 +367,8 @@ func (client *HubicClient) DownloadFile(path string) (io.ReadCloser, int64, erro
|
|||||||
|
|
||||||
func (client *HubicClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
|
func (client *HubicClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
|
||||||
|
|
||||||
for len(path) > 0 && path[len(path) - 1] == '/' {
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
path = path[:len(path) - 1]
|
path = path[:len(path)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
@@ -390,8 +390,8 @@ func (client *HubicClient) UploadFile(path string, content []byte, rateLimit int
|
|||||||
|
|
||||||
func (client *HubicClient) DeleteFile(path string) error {
|
func (client *HubicClient) DeleteFile(path string) error {
|
||||||
|
|
||||||
for len(path) > 0 && path[len(path) - 1] == '/' {
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
path = path[:len(path) - 1]
|
path = path[:len(path)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
@@ -410,12 +410,12 @@ func (client *HubicClient) DeleteFile(path string) error {
|
|||||||
|
|
||||||
func (client *HubicClient) MoveFile(from string, to string) error {
|
func (client *HubicClient) MoveFile(from string, to string) error {
|
||||||
|
|
||||||
for len(from) > 0 && from[len(from) - 1] == '/' {
|
for len(from) > 0 && from[len(from)-1] == '/' {
|
||||||
from = from[:len(from) - 1]
|
from = from[:len(from)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
for len(to) > 0 && to[len(to) - 1] == '/' {
|
for len(to) > 0 && to[len(to)-1] == '/' {
|
||||||
to = to[:len(to) - 1]
|
to = to[:len(to)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
@@ -436,10 +436,10 @@ func (client *HubicClient) MoveFile(from string, to string) error {
|
|||||||
return client.DeleteFile(from)
|
return client.DeleteFile(from)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *HubicClient) CreateDirectory(path string) (error) {
|
func (client *HubicClient) CreateDirectory(path string) error {
|
||||||
|
|
||||||
for len(path) > 0 && path[len(path) - 1] == '/' {
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
path = path[:len(path) - 1]
|
path = path[:len(path)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
|
|||||||
@@ -5,11 +5,11 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@@ -73,7 +73,7 @@ func TestHubicClient(t *testing.T) {
|
|||||||
maxFileSize := 64 * 1024
|
maxFileSize := 64 * 1024
|
||||||
|
|
||||||
for i := 0; i < numberOfFiles; i++ {
|
for i := 0; i < numberOfFiles; i++ {
|
||||||
content := make([]byte, rand.Int() % maxFileSize + 1)
|
content := make([]byte, rand.Int()%maxFileSize+1)
|
||||||
_, err = crypto_rand.Read(content)
|
_, err = crypto_rand.Read(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error generating random content: %v", err)
|
t.Errorf("Error generating random content: %v", err)
|
||||||
@@ -86,7 +86,7 @@ func TestHubicClient(t *testing.T) {
|
|||||||
|
|
||||||
fmt.Printf("file: %s\n", filename)
|
fmt.Printf("file: %s\n", filename)
|
||||||
|
|
||||||
err = hubicClient.UploadFile("test/test1/" + filename, content, 100)
|
err = hubicClient.UploadFile("test/test1/"+filename, content, 100)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
|
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
|
||||||
t.Errorf("Failed to upload the file %s: %v", filename, err)
|
t.Errorf("Failed to upload the file %s: %v", filename, err)
|
||||||
@@ -104,9 +104,9 @@ func TestHubicClient(t *testing.T) {
|
|||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
|
|
||||||
exists, isDir, size, err := hubicClient.GetFileInfo("test/test1/" + entry.Name)
|
exists, isDir, size, err := hubicClient.GetFileInfo("test/test1/" + entry.Name)
|
||||||
fmt.Printf("%s exists: %t, isDir: %t, size: %d, err: %v\n", "test/test1/" + entry.Name, exists, isDir, size, err)
|
fmt.Printf("%s exists: %t, isDir: %t, size: %d, err: %v\n", "test/test1/"+entry.Name, exists, isDir, size, err)
|
||||||
|
|
||||||
err = hubicClient.MoveFile("test/test1/" + entry.Name, "test/test2/" + entry.Name)
|
err = hubicClient.MoveFile("test/test1/"+entry.Name, "test/test2/"+entry.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to move %s: %v", entry.Name, err)
|
t.Errorf("Failed to move %s: %v", entry.Name, err)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -20,8 +20,8 @@ type HubicStorage struct {
|
|||||||
// CreateHubicStorage creates an Hubic storage object.
|
// CreateHubicStorage creates an Hubic storage object.
|
||||||
func CreateHubicStorage(tokenFile string, storagePath string, threads int) (storage *HubicStorage, err error) {
|
func CreateHubicStorage(tokenFile string, storagePath string, threads int) (storage *HubicStorage, err error) {
|
||||||
|
|
||||||
for len(storagePath) > 0 && storagePath[len(storagePath) - 1] == '/' {
|
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
|
||||||
storagePath = storagePath[:len(storagePath) - 1]
|
storagePath = storagePath[:len(storagePath)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := NewHubicClient(tokenFile)
|
client, err := NewHubicClient(tokenFile)
|
||||||
@@ -42,13 +42,13 @@ func CreateHubicStorage(tokenFile string, storagePath string, threads int) (stor
|
|||||||
return nil, fmt.Errorf("Path '%s' is not a directory", storagePath)
|
return nil, fmt.Errorf("Path '%s' is not a directory", storagePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &HubicStorage {
|
storage = &HubicStorage{
|
||||||
client: client,
|
client: client,
|
||||||
storageDir: storagePath,
|
storageDir: storagePath,
|
||||||
numberOfThreads: threads,
|
numberOfThreads: threads,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, path := range []string { "chunks", "snapshots" } {
|
for _, path := range []string{"chunks", "snapshots"} {
|
||||||
dir := storagePath + "/" + path
|
dir := storagePath + "/" + path
|
||||||
exists, isDir, _, err := client.GetFileInfo(dir)
|
exists, isDir, _, err := client.GetFileInfo(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -70,8 +70,8 @@ func CreateHubicStorage(tokenFile string, storagePath string, threads int) (stor
|
|||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *HubicStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
func (storage *HubicStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "snapshots" {
|
if dir == "snapshots" {
|
||||||
@@ -83,7 +83,7 @@ func (storage *HubicStorage) ListFiles(threadIndex int, dir string) ([]string, [
|
|||||||
subDirs := []string{}
|
subDirs := []string{}
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.Type == "application/directory" {
|
if entry.Type == "application/directory" {
|
||||||
subDirs = append(subDirs, entry.Name + "/")
|
subDirs = append(subDirs, entry.Name+"/")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return subDirs, nil, nil
|
return subDirs, nil, nil
|
||||||
@@ -142,8 +142,8 @@ func (storage *HubicStorage) MoveFile(threadIndex int, from string, to string) (
|
|||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *HubicStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
func (storage *HubicStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage.client.CreateDirectory(storage.storageDir + "/" + dir)
|
return storage.client.CreateDirectory(storage.storageDir + "/" + dir)
|
||||||
@@ -152,8 +152,8 @@ func (storage *HubicStorage) CreateDirectory(threadIndex int, dir string) (err e
|
|||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *HubicStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *HubicStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
for len(filePath) > 0 && filePath[len(filePath) - 1] == '/' {
|
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
|
||||||
filePath = filePath[:len(filePath) - 1]
|
filePath = filePath[:len(filePath)-1]
|
||||||
}
|
}
|
||||||
return storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
return storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
||||||
}
|
}
|
||||||
@@ -179,27 +179,27 @@ func (storage *HubicStorage) DownloadFile(threadIndex int, filePath string, chun
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / storage.numberOfThreads)
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *HubicStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *HubicStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
return storage.client.UploadFile(storage.storageDir + "/" + filePath, content, storage.UploadRateLimit / storage.numberOfThreads)
|
return storage.client.UploadFile(storage.storageDir+"/"+filePath, content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *HubicStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *HubicStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *HubicStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *HubicStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *HubicStorage) IsStrongConsistent() (bool) { return false }
|
func (storage *HubicStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *HubicStorage) IsFastListing() (bool) { return true }
|
func (storage *HubicStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *HubicStorage) EnableTestMode() {
|
func (storage *HubicStorage) EnableTestMode() {
|
||||||
|
|||||||
@@ -5,10 +5,10 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io/ioutil"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var keyringFile string
|
var keyringFile string
|
||||||
@@ -33,11 +33,11 @@ func SetKeyringFile(path string) {
|
|||||||
|
|
||||||
func keyringEncrypt(value []byte) ([]byte, error) {
|
func keyringEncrypt(value []byte) ([]byte, error) {
|
||||||
|
|
||||||
dataIn := DATA_BLOB {
|
dataIn := DATA_BLOB{
|
||||||
pbData: &value[0],
|
pbData: &value[0],
|
||||||
cbData: uint32(len(value)),
|
cbData: uint32(len(value)),
|
||||||
}
|
}
|
||||||
dataOut := DATA_BLOB {}
|
dataOut := DATA_BLOB{}
|
||||||
|
|
||||||
r, _, err := procEncryptData.Call(uintptr(unsafe.Pointer(&dataIn)),
|
r, _, err := procEncryptData.Call(uintptr(unsafe.Pointer(&dataIn)),
|
||||||
0, 0, 0, 0, 0, uintptr(unsafe.Pointer(&dataOut)))
|
0, 0, 0, 0, 0, uintptr(unsafe.Pointer(&dataOut)))
|
||||||
@@ -57,11 +57,11 @@ func keyringEncrypt(value []byte) ([]byte, error) {
|
|||||||
|
|
||||||
func keyringDecrypt(value []byte) ([]byte, error) {
|
func keyringDecrypt(value []byte) ([]byte, error) {
|
||||||
|
|
||||||
dataIn := DATA_BLOB {
|
dataIn := DATA_BLOB{
|
||||||
pbData: &value[0],
|
pbData: &value[0],
|
||||||
cbData: uint32(len(value)),
|
cbData: uint32(len(value)),
|
||||||
}
|
}
|
||||||
dataOut := DATA_BLOB {}
|
dataOut := DATA_BLOB{}
|
||||||
|
|
||||||
r, _, err := procDecryptData.Call(uintptr(unsafe.Pointer(&dataIn)),
|
r, _, err := procDecryptData.Call(uintptr(unsafe.Pointer(&dataIn)),
|
||||||
0, 0, 0, 0, 0, uintptr(unsafe.Pointer(&dataOut)))
|
0, 0, 0, 0, 0, uintptr(unsafe.Pointer(&dataOut)))
|
||||||
|
|||||||
@@ -5,12 +5,12 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"os"
|
||||||
|
"runtime/debug"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"runtime/debug"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|||||||
@@ -5,16 +5,16 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
"sync"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"strings"
|
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net/http"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
@@ -108,7 +108,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
|
|
||||||
if url != OneDriveRefreshTokenURL {
|
if url != OneDriveRefreshTokenURL {
|
||||||
client.TokenLock.Lock()
|
client.TokenLock.Lock()
|
||||||
request.Header.Set("Authorization", "Bearer " + client.Token.AccessToken)
|
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
|
||||||
client.TokenLock.Unlock()
|
client.TokenLock.Unlock()
|
||||||
}
|
}
|
||||||
if contentType != "" {
|
if contentType != "" {
|
||||||
@@ -121,7 +121,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
if strings.Contains(err.Error(), "TLS handshake timeout") {
|
if strings.Contains(err.Error(), "TLS handshake timeout") {
|
||||||
// Give a long timeout regardless of backoff when a TLS timeout happens, hoping that
|
// Give a long timeout regardless of backoff when a TLS timeout happens, hoping that
|
||||||
// idle connections are not to be reused on reconnect.
|
// idle connections are not to be reused on reconnect.
|
||||||
retryAfter := time.Duration(rand.Float32() * 60000 + 180000)
|
retryAfter := time.Duration(rand.Float32()*60000 + 180000)
|
||||||
LOG_INFO("ONEDRIVE_RETRY", "TLS handshake timeout; retry after %d milliseconds", retryAfter)
|
LOG_INFO("ONEDRIVE_RETRY", "TLS handshake timeout; retry after %d milliseconds", retryAfter)
|
||||||
time.Sleep(retryAfter * time.Millisecond)
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
} else {
|
} else {
|
||||||
@@ -144,14 +144,14 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
|
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
|
|
||||||
errorResponse := &OneDriveErrorResponse {
|
errorResponse := &OneDriveErrorResponse{
|
||||||
Error: OneDriveError { Status: response.StatusCode },
|
Error: OneDriveError{Status: response.StatusCode},
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.StatusCode == 401 {
|
if response.StatusCode == 401 {
|
||||||
|
|
||||||
if url == OneDriveRefreshTokenURL {
|
if url == OneDriveRefreshTokenURL {
|
||||||
return nil, 0, OneDriveError { Status: response.StatusCode, Message: "Authorization error when refreshing token"}
|
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.RefreshToken()
|
err = client.RefreshToken()
|
||||||
@@ -167,7 +167,7 @@ func (client *OneDriveClient) call(url string, method string, input interface{},
|
|||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
|
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
|
||||||
return nil, 0, OneDriveError { Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response"), }
|
return nil, 0, OneDriveError{Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response")}
|
||||||
}
|
}
|
||||||
|
|
||||||
errorResponse.Error.Status = response.StatusCode
|
errorResponse.Error.Status = response.StatusCode
|
||||||
@@ -213,7 +213,7 @@ func (client *OneDriveClient) RefreshToken() (err error) {
|
|||||||
type OneDriveEntry struct {
|
type OneDriveEntry struct {
|
||||||
ID string
|
ID string
|
||||||
Name string
|
Name string
|
||||||
Folder map[string] interface {}
|
Folder map[string]interface{}
|
||||||
Size int64
|
Size int64
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -245,7 +245,7 @@ func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error)
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := &OneDriveListEntriesOutput {}
|
output := &OneDriveListEntriesOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@@ -340,7 +340,7 @@ func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *OneDriveClient) CreateDirectory(path string, name string) (error) {
|
func (client *OneDriveClient) CreateDirectory(path string, name string) error {
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/root/children"
|
url := OneDriveAPIURL + "/root/children"
|
||||||
|
|
||||||
|
|||||||
@@ -5,11 +5,11 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"fmt"
|
|
||||||
"testing"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"testing"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@@ -30,7 +30,6 @@ func TestOneDriveClient(t *testing.T) {
|
|||||||
fmt.Printf("name: %s, isDir: %t\n", file.Name, len(file.Folder) != 0)
|
fmt.Printf("name: %s, isDir: %t\n", file.Name, len(file.Folder) != 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
testID, _, _, err := oneDriveClient.GetFileInfo("test")
|
testID, _, _, err := oneDriveClient.GetFileInfo("test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list the test directory: %v", err)
|
t.Errorf("Failed to list the test directory: %v", err)
|
||||||
@@ -74,7 +73,7 @@ func TestOneDriveClient(t *testing.T) {
|
|||||||
maxFileSize := 64 * 1024
|
maxFileSize := 64 * 1024
|
||||||
|
|
||||||
for i := 0; i < numberOfFiles; i++ {
|
for i := 0; i < numberOfFiles; i++ {
|
||||||
content := make([]byte, rand.Int() % maxFileSize + 1)
|
content := make([]byte, rand.Int()%maxFileSize+1)
|
||||||
_, err = crypto_rand.Read(content)
|
_, err = crypto_rand.Read(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error generating random content: %v", err)
|
t.Errorf("Error generating random content: %v", err)
|
||||||
@@ -87,7 +86,7 @@ func TestOneDriveClient(t *testing.T) {
|
|||||||
|
|
||||||
fmt.Printf("file: %s\n", filename)
|
fmt.Printf("file: %s\n", filename)
|
||||||
|
|
||||||
err = oneDriveClient.UploadFile("test/test1/" + filename, content, 100)
|
err = oneDriveClient.UploadFile("test/test1/"+filename, content, 100)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
|
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
|
||||||
t.Errorf("Failed to upload the file %s: %v", filename, err)
|
t.Errorf("Failed to upload the file %s: %v", filename, err)
|
||||||
@@ -103,7 +102,7 @@ func TestOneDriveClient(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
err = oneDriveClient.MoveFile("test/test1/" + entry.Name, "test/test2")
|
err = oneDriveClient.MoveFile("test/test1/"+entry.Name, "test/test2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to move %s: %v", entry.Name, err)
|
t.Errorf("Failed to move %s: %v", entry.Name, err)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -21,8 +21,8 @@ type OneDriveStorage struct {
|
|||||||
// CreateOneDriveStorage creates an OneDrive storage object.
|
// CreateOneDriveStorage creates an OneDrive storage object.
|
||||||
func CreateOneDriveStorage(tokenFile string, storagePath string, threads int) (storage *OneDriveStorage, err error) {
|
func CreateOneDriveStorage(tokenFile string, storagePath string, threads int) (storage *OneDriveStorage, err error) {
|
||||||
|
|
||||||
for len(storagePath) > 0 && storagePath[len(storagePath) - 1] == '/' {
|
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
|
||||||
storagePath = storagePath[:len(storagePath) - 1]
|
storagePath = storagePath[:len(storagePath)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := NewOneDriveClient(tokenFile)
|
client, err := NewOneDriveClient(tokenFile)
|
||||||
@@ -43,13 +43,13 @@ func CreateOneDriveStorage(tokenFile string, storagePath string, threads int) (s
|
|||||||
return nil, fmt.Errorf("Path '%s' is not a directory", storagePath)
|
return nil, fmt.Errorf("Path '%s' is not a directory", storagePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &OneDriveStorage {
|
storage = &OneDriveStorage{
|
||||||
client: client,
|
client: client,
|
||||||
storageDir: storagePath,
|
storageDir: storagePath,
|
||||||
numberOfThread: threads,
|
numberOfThread: threads,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, path := range []string { "chunks", "fossils", "snapshots" } {
|
for _, path := range []string{"chunks", "fossils", "snapshots"} {
|
||||||
dir := storagePath + "/" + path
|
dir := storagePath + "/" + path
|
||||||
dirID, isDir, _, err := client.GetFileInfo(dir)
|
dirID, isDir, _, err := client.GetFileInfo(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -71,8 +71,8 @@ func CreateOneDriveStorage(tokenFile string, storagePath string, threads int) (s
|
|||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "snapshots" {
|
if dir == "snapshots" {
|
||||||
@@ -84,7 +84,7 @@ func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string
|
|||||||
subDirs := []string{}
|
subDirs := []string{}
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if len(entry.Folder) > 0 {
|
if len(entry.Folder) > 0 {
|
||||||
subDirs = append(subDirs, entry.Name + "/")
|
subDirs = append(subDirs, entry.Name+"/")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return subDirs, nil, nil
|
return subDirs, nil, nil
|
||||||
@@ -105,7 +105,7 @@ func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string
|
|||||||
} else {
|
} else {
|
||||||
files := []string{}
|
files := []string{}
|
||||||
sizes := []int64{}
|
sizes := []int64{}
|
||||||
for _, parent := range []string {"chunks", "fossils" } {
|
for _, parent := range []string{"chunks", "fossils"} {
|
||||||
entries, err := storage.client.ListEntries(storage.storageDir + "/" + parent)
|
entries, err := storage.client.ListEntries(storage.storageDir + "/" + parent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
@@ -128,7 +128,7 @@ func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string
|
|||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *OneDriveStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *OneDriveStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
if strings.HasSuffix(filePath, ".fsl") && strings.HasPrefix(filePath, "chunks/") {
|
if strings.HasSuffix(filePath, ".fsl") && strings.HasPrefix(filePath, "chunks/") {
|
||||||
filePath = "fossils/" + filePath[len("chunks/"):len(filePath) - len(".fsl")]
|
filePath = "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storage.client.DeleteFile(storage.storageDir + "/" + filePath)
|
err = storage.client.DeleteFile(storage.storageDir + "/" + filePath)
|
||||||
@@ -144,7 +144,7 @@ func (storage *OneDriveStorage) MoveFile(threadIndex int, from string, to string
|
|||||||
fromPath := storage.storageDir + "/" + from
|
fromPath := storage.storageDir + "/" + from
|
||||||
toParent := storage.storageDir + "/fossils"
|
toParent := storage.storageDir + "/fossils"
|
||||||
if strings.HasSuffix(from, ".fsl") {
|
if strings.HasSuffix(from, ".fsl") {
|
||||||
fromPath = storage.storageDir + "/fossils/" + from[len("chunks/"):len(from) - len(".fsl")]
|
fromPath = storage.storageDir + "/fossils/" + from[len("chunks/"):len(from)-len(".fsl")]
|
||||||
toParent = storage.storageDir + "/chunks"
|
toParent = storage.storageDir + "/chunks"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -161,8 +161,8 @@ func (storage *OneDriveStorage) MoveFile(threadIndex int, from string, to string
|
|||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *OneDriveStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
func (storage *OneDriveStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
parent := path.Dir(dir)
|
parent := path.Dir(dir)
|
||||||
@@ -170,15 +170,15 @@ func (storage *OneDriveStorage) CreateDirectory(threadIndex int, dir string) (er
|
|||||||
if parent == "." {
|
if parent == "." {
|
||||||
return storage.client.CreateDirectory(storage.storageDir, dir)
|
return storage.client.CreateDirectory(storage.storageDir, dir)
|
||||||
} else {
|
} else {
|
||||||
return storage.client.CreateDirectory(storage.storageDir + "/" + parent, path.Base(dir))
|
return storage.client.CreateDirectory(storage.storageDir+"/"+parent, path.Base(dir))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *OneDriveStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *OneDriveStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
for len(filePath) > 0 && filePath[len(filePath) - 1] == '/' {
|
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
|
||||||
filePath = filePath[:len(filePath) - 1]
|
filePath = filePath[:len(filePath)-1]
|
||||||
}
|
}
|
||||||
fileID, isDir, size, err := storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
fileID, isDir, size, err := storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
||||||
return fileID != "", isDir, size, err
|
return fileID != "", isDir, size, err
|
||||||
@@ -207,13 +207,13 @@ func (storage *OneDriveStorage) DownloadFile(threadIndex int, filePath string, c
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / storage.numberOfThread)
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThread)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *OneDriveStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *OneDriveStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
err = storage.client.UploadFile(storage.storageDir + "/" + filePath, content, storage.UploadRateLimit / storage.numberOfThread)
|
err = storage.client.UploadFile(storage.storageDir+"/"+filePath, content, storage.UploadRateLimit/storage.numberOfThread)
|
||||||
|
|
||||||
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
|
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
|
||||||
LOG_TRACE("ONEDRIVE_UPLOAD", "File %s already exists", filePath)
|
LOG_TRACE("ONEDRIVE_UPLOAD", "File %s already exists", filePath)
|
||||||
@@ -225,16 +225,16 @@ func (storage *OneDriveStorage) UploadFile(threadIndex int, filePath string, con
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *OneDriveStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *OneDriveStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *OneDriveStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *OneDriveStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *OneDriveStorage) IsStrongConsistent() (bool) { return false }
|
func (storage *OneDriveStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *OneDriveStorage) IsFastListing() (bool) { return true }
|
func (storage *OneDriveStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *OneDriveStorage) EnableTestMode() {
|
func (storage *OneDriveStorage) EnableTestMode() {
|
||||||
|
|||||||
@@ -5,12 +5,12 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"path"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"reflect"
|
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Preference stores options for each storage.
|
// Preference stores options for each storage.
|
||||||
@@ -26,7 +26,7 @@ type Preference struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var preferencePath string
|
var preferencePath string
|
||||||
var Preferences [] Preference
|
var Preferences []Preference
|
||||||
|
|
||||||
func LoadPreferences(repository string) bool {
|
func LoadPreferences(repository string) bool {
|
||||||
|
|
||||||
@@ -91,7 +91,7 @@ func SetDuplicacyPreferencePath(p string) {
|
|||||||
preferencePath = p
|
preferencePath = p
|
||||||
}
|
}
|
||||||
|
|
||||||
func SavePreferences() (bool) {
|
func SavePreferences() bool {
|
||||||
description, err := json.MarshalIndent(Preferences, "", " ")
|
description, err := json.MarshalIndent(Preferences, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
|
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
|
||||||
@@ -108,7 +108,7 @@ func SavePreferences() (bool) {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func FindPreference(name string) (*Preference) {
|
func FindPreference(name string) *Preference {
|
||||||
for i, preference := range Preferences {
|
for i, preference := range Preferences {
|
||||||
if preference.Name == name || preference.StorageURL == name {
|
if preference.Name == name || preference.StorageURL == name {
|
||||||
return &Preferences[i]
|
return &Preferences[i]
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ package duplicacy
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/gilbertchen/goamz/aws"
|
"github.com/gilbertchen/goamz/aws"
|
||||||
"github.com/gilbertchen/goamz/s3"
|
"github.com/gilbertchen/goamz/s3"
|
||||||
)
|
)
|
||||||
@@ -30,10 +31,10 @@ func CreateS3CStorage(regionName string, endpoint string, bucketName string, sto
|
|||||||
}
|
}
|
||||||
region = aws.Regions[regionName]
|
region = aws.Regions[regionName]
|
||||||
} else {
|
} else {
|
||||||
region = aws.Region { Name: regionName, S3Endpoint:"https://" + endpoint }
|
region = aws.Region{Name: regionName, S3Endpoint: "https://" + endpoint}
|
||||||
}
|
}
|
||||||
|
|
||||||
auth := aws.Auth{ AccessKey: accessKey, SecretKey: secretKey }
|
auth := aws.Auth{AccessKey: accessKey, SecretKey: secretKey}
|
||||||
|
|
||||||
var buckets []*s3.Bucket
|
var buckets []*s3.Bucket
|
||||||
for i := 0; i < threads; i++ {
|
for i := 0; i < threads; i++ {
|
||||||
@@ -48,11 +49,11 @@ func CreateS3CStorage(regionName string, endpoint string, bucketName string, sto
|
|||||||
buckets = append(buckets, bucket)
|
buckets = append(buckets, bucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
|
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||||
storageDir += "/"
|
storageDir += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &S3CStorage {
|
storage = &S3CStorage{
|
||||||
buckets: buckets,
|
buckets: buckets,
|
||||||
storageDir: storageDir,
|
storageDir: storageDir,
|
||||||
}
|
}
|
||||||
@@ -62,13 +63,13 @@ func CreateS3CStorage(regionName string, endpoint string, bucketName string, sto
|
|||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *S3CStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
func (storage *S3CStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
if len(dir) > 0 && dir[len(dir) - 1] != '/' {
|
if len(dir) > 0 && dir[len(dir)-1] != '/' {
|
||||||
dir += "/"
|
dir += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
dirLength := len(storage.storageDir + dir)
|
dirLength := len(storage.storageDir + dir)
|
||||||
if dir == "snapshots/" {
|
if dir == "snapshots/" {
|
||||||
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "/", "", 100)
|
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "/", "", 100)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -80,7 +81,7 @@ func (storage *S3CStorage) ListFiles(threadIndex int, dir string) (files []strin
|
|||||||
} else if dir == "chunks/" {
|
} else if dir == "chunks/" {
|
||||||
marker := ""
|
marker := ""
|
||||||
for {
|
for {
|
||||||
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", marker, 1000)
|
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "", marker, 1000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -94,13 +95,13 @@ func (storage *S3CStorage) ListFiles(threadIndex int, dir string) (files []strin
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
marker = results.Contents[len(results.Contents) - 1].Key
|
marker = results.Contents[len(results.Contents)-1].Key
|
||||||
}
|
}
|
||||||
return files, sizes, nil
|
return files, sizes, nil
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", "", 1000)
|
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "", "", 1000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -120,8 +121,8 @@ func (storage *S3CStorage) DeleteFile(threadIndex int, filePath string) (err err
|
|||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *S3CStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *S3CStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
|
||||||
options := s3.CopyOptions { ContentType: "application/duplicacy" }
|
options := s3.CopyOptions{ContentType: "application/duplicacy"}
|
||||||
_, err = storage.buckets[threadIndex].PutCopy(storage.storageDir + to, s3.Private, options, storage.buckets[threadIndex].Name + "/" + storage.storageDir + from)
|
_, err = storage.buckets[threadIndex].PutCopy(storage.storageDir+to, s3.Private, options, storage.buckets[threadIndex].Name+"/"+storage.storageDir+from)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -137,7 +138,7 @@ func (storage *S3CStorage) CreateDirectory(threadIndex int, dir string) (err err
|
|||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *S3CStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *S3CStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
response, err := storage.buckets[threadIndex].Head(storage.storageDir + filePath, nil)
|
response, err := storage.buckets[threadIndex].Head(storage.storageDir+filePath, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*s3.Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
|
if e, ok := err.(*s3.Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
@@ -182,7 +183,7 @@ func (storage *S3CStorage) DownloadFile(threadIndex int, filePath string, chunk
|
|||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.buckets))
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.buckets))
|
||||||
return err
|
return err
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -190,23 +191,23 @@ func (storage *S3CStorage) DownloadFile(threadIndex int, filePath string, chunk
|
|||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *S3CStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *S3CStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
options := s3.Options { }
|
options := s3.Options{}
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.buckets))
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.buckets))
|
||||||
return storage.buckets[threadIndex].PutReader(storage.storageDir + filePath, reader, int64(len(content)), "application/duplicacy", s3.Private, options)
|
return storage.buckets[threadIndex].PutReader(storage.storageDir+filePath, reader, int64(len(content)), "application/duplicacy", s3.Private, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *S3CStorage) IsCacheNeeded () (bool) { return true }
|
func (storage *S3CStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *S3CStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *S3CStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *S3CStorage) IsStrongConsistent() (bool) { return false }
|
func (storage *S3CStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *S3CStorage) IsFastListing() (bool) { return true }
|
func (storage *S3CStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *S3CStorage) EnableTestMode() {}
|
func (storage *S3CStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -5,8 +5,8 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
@@ -34,7 +34,7 @@ func CreateS3Storage(regionName string, endpoint string, bucketName string, stor
|
|||||||
auth := credentials.NewStaticCredentials(accessKey, secretKey, token)
|
auth := credentials.NewStaticCredentials(accessKey, secretKey, token)
|
||||||
|
|
||||||
if regionName == "" && endpoint == "" {
|
if regionName == "" && endpoint == "" {
|
||||||
defaultRegionConfig := &aws.Config {
|
defaultRegionConfig := &aws.Config{
|
||||||
Region: aws.String("us-east-1"),
|
Region: aws.String("us-east-1"),
|
||||||
Credentials: auth,
|
Credentials: auth,
|
||||||
}
|
}
|
||||||
@@ -53,7 +53,7 @@ func CreateS3Storage(regionName string, endpoint string, bucketName string, stor
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
config := &aws.Config {
|
config := &aws.Config{
|
||||||
Region: aws.String(regionName),
|
Region: aws.String(regionName),
|
||||||
Credentials: auth,
|
Credentials: auth,
|
||||||
Endpoint: aws.String(endpoint),
|
Endpoint: aws.String(endpoint),
|
||||||
@@ -61,11 +61,11 @@ func CreateS3Storage(regionName string, endpoint string, bucketName string, stor
|
|||||||
DisableSSL: aws.Bool(!isSSLSupported),
|
DisableSSL: aws.Bool(!isSSLSupported),
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
|
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||||
storageDir += "/"
|
storageDir += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &S3Storage {
|
storage = &S3Storage{
|
||||||
client: s3.New(session.New(config)),
|
client: s3.New(session.New(config)),
|
||||||
bucket: bucketName,
|
bucket: bucketName,
|
||||||
storageDir: storageDir,
|
storageDir: storageDir,
|
||||||
@@ -77,13 +77,13 @@ func CreateS3Storage(regionName string, endpoint string, bucketName string, stor
|
|||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
if len(dir) > 0 && dir[len(dir) - 1] != '/' {
|
if len(dir) > 0 && dir[len(dir)-1] != '/' {
|
||||||
dir += "/"
|
dir += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "snapshots/" {
|
if dir == "snapshots/" {
|
||||||
dir = storage.storageDir + dir
|
dir = storage.storageDir + dir
|
||||||
input := s3.ListObjectsInput {
|
input := s3.ListObjectsInput{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
Prefix: aws.String(dir),
|
Prefix: aws.String(dir),
|
||||||
Delimiter: aws.String("/"),
|
Delimiter: aws.String("/"),
|
||||||
@@ -103,7 +103,7 @@ func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string
|
|||||||
dir = storage.storageDir + dir
|
dir = storage.storageDir + dir
|
||||||
marker := ""
|
marker := ""
|
||||||
for {
|
for {
|
||||||
input := s3.ListObjectsInput {
|
input := s3.ListObjectsInput{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
Prefix: aws.String(dir),
|
Prefix: aws.String(dir),
|
||||||
MaxKeys: aws.Int64(1000),
|
MaxKeys: aws.Int64(1000),
|
||||||
@@ -124,7 +124,7 @@ func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
marker = *output.Contents[len(output.Contents) - 1].Key
|
marker = *output.Contents[len(output.Contents)-1].Key
|
||||||
}
|
}
|
||||||
return files, sizes, nil
|
return files, sizes, nil
|
||||||
}
|
}
|
||||||
@@ -133,7 +133,7 @@ func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string
|
|||||||
|
|
||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *S3Storage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *S3Storage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
input := &s3.DeleteObjectInput {
|
input := &s3.DeleteObjectInput{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
Key: aws.String(storage.storageDir + filePath),
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
}
|
}
|
||||||
@@ -144,7 +144,7 @@ func (storage *S3Storage) DeleteFile(threadIndex int, filePath string) (err erro
|
|||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *S3Storage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *S3Storage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
|
||||||
input := &s3.CopyObjectInput {
|
input := &s3.CopyObjectInput{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
CopySource: aws.String(storage.bucket + "/" + storage.storageDir + from),
|
CopySource: aws.String(storage.bucket + "/" + storage.storageDir + from),
|
||||||
Key: aws.String(storage.storageDir + to),
|
Key: aws.String(storage.storageDir + to),
|
||||||
@@ -167,7 +167,7 @@ func (storage *S3Storage) CreateDirectory(threadIndex int, dir string) (err erro
|
|||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *S3Storage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *S3Storage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
input := &s3.HeadObjectInput {
|
input := &s3.HeadObjectInput{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
Key: aws.String(storage.storageDir + filePath),
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
}
|
}
|
||||||
@@ -210,7 +210,7 @@ func (storage *S3Storage) FindChunk(threadIndex int, chunkID string, isFossil bo
|
|||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
input := &s3.GetObjectInput {
|
input := &s3.GetObjectInput{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
Key: aws.String(storage.storageDir + filePath),
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
}
|
}
|
||||||
@@ -222,7 +222,7 @@ func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *
|
|||||||
|
|
||||||
defer output.Body.Close()
|
defer output.Body.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit / len(storage.bucket))
|
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.bucket))
|
||||||
return err
|
return err
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -233,11 +233,11 @@ func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content [
|
|||||||
attempts := 0
|
attempts := 0
|
||||||
|
|
||||||
for {
|
for {
|
||||||
input := &s3.PutObjectInput {
|
input := &s3.PutObjectInput{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
Key: aws.String(storage.storageDir + filePath),
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
ACL: aws.String(s3.ObjectCannedACLPrivate),
|
ACL: aws.String(s3.ObjectCannedACLPrivate),
|
||||||
Body: CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.bucket)),
|
Body: CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.bucket)),
|
||||||
ContentType: aws.String("application/duplicacy"),
|
ContentType: aws.String("application/duplicacy"),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -255,16 +255,16 @@ func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content [
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *S3Storage) IsCacheNeeded () (bool) { return true }
|
func (storage *S3Storage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *S3Storage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *S3Storage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *S3Storage) IsStrongConsistent() (bool) { return false }
|
func (storage *S3Storage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *S3Storage) IsFastListing() (bool) { return true }
|
func (storage *S3Storage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *S3Storage) EnableTestMode() {}
|
func (storage *S3Storage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -7,15 +7,15 @@ package duplicacy
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"net"
|
|
||||||
"path"
|
|
||||||
"time"
|
|
||||||
"runtime"
|
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/crypto/ssh"
|
|
||||||
"github.com/pkg/sftp"
|
"github.com/pkg/sftp"
|
||||||
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SFTPStorage struct {
|
type SFTPStorage struct {
|
||||||
@@ -29,8 +29,7 @@ type SFTPStorage struct {
|
|||||||
func CreateSFTPStorageWithPassword(server string, port int, username string, storageDir string,
|
func CreateSFTPStorageWithPassword(server string, port int, username string, storageDir string,
|
||||||
password string, threads int) (storage *SFTPStorage, err error) {
|
password string, threads int) (storage *SFTPStorage, err error) {
|
||||||
|
|
||||||
authMethods := [] ssh.AuthMethod { ssh.Password(password) }
|
authMethods := []ssh.AuthMethod{ssh.Password(password)}
|
||||||
|
|
||||||
|
|
||||||
hostKeyCallback := func(hostname string, remote net.Addr,
|
hostKeyCallback := func(hostname string, remote net.Addr,
|
||||||
key ssh.PublicKey) error {
|
key ssh.PublicKey) error {
|
||||||
@@ -41,7 +40,7 @@ func CreateSFTPStorageWithPassword(server string, port int, username string, sto
|
|||||||
}
|
}
|
||||||
|
|
||||||
func CreateSFTPStorage(server string, port int, username string, storageDir string,
|
func CreateSFTPStorage(server string, port int, username string, storageDir string,
|
||||||
authMethods [] ssh.AuthMethod,
|
authMethods []ssh.AuthMethod,
|
||||||
hostKeyCallback func(hostname string, remote net.Addr,
|
hostKeyCallback func(hostname string, remote net.Addr,
|
||||||
key ssh.PublicKey) error, threads int) (storage *SFTPStorage, err error) {
|
key ssh.PublicKey) error, threads int) (storage *SFTPStorage, err error) {
|
||||||
|
|
||||||
@@ -52,7 +51,7 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
if server == "sftp.hidrive.strato.com" {
|
if server == "sftp.hidrive.strato.com" {
|
||||||
config.Ciphers = []string {"aes128-cbc", "aes128-ctr", "aes256-ctr"}
|
config.Ciphers = []string{"aes128-cbc", "aes128-ctr", "aes256-ctr"}
|
||||||
}
|
}
|
||||||
|
|
||||||
serverAddress := fmt.Sprintf("%s:%d", server, port)
|
serverAddress := fmt.Sprintf("%s:%d", server, port)
|
||||||
@@ -67,8 +66,8 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for storageDir[len(storageDir) - 1] == '/' {
|
for storageDir[len(storageDir)-1] == '/' {
|
||||||
storageDir = storageDir[:len(storageDir) - 1]
|
storageDir = storageDir[:len(storageDir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
fileInfo, err := client.Stat(storageDir)
|
fileInfo, err := client.Stat(storageDir)
|
||||||
@@ -80,7 +79,7 @@ func CreateSFTPStorage(server string, port int, username string, storageDir stri
|
|||||||
return nil, fmt.Errorf("The storage path %s is not a directory", storageDir)
|
return nil, fmt.Errorf("The storage path %s is not a directory", storageDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &SFTPStorage {
|
storage = &SFTPStorage{
|
||||||
client: client,
|
client: client,
|
||||||
storageDir: storageDir,
|
storageDir: storageDir,
|
||||||
numberOfThreads: threads,
|
numberOfThreads: threads,
|
||||||
@@ -108,7 +107,7 @@ func (storage *SFTPStorage) ListFiles(threadIndex int, dirPath string) (files []
|
|||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
name := entry.Name()
|
name := entry.Name()
|
||||||
if entry.IsDir() && name[len(name) - 1] != '/' {
|
if entry.IsDir() && name[len(name)-1] != '/' {
|
||||||
name += "/"
|
name += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -188,18 +187,18 @@ func (storage *SFTPStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
|||||||
// The minimum level of directories to dive into before searching for the chunk file.
|
// The minimum level of directories to dive into before searching for the chunk file.
|
||||||
minimumLevel := 2
|
minimumLevel := 2
|
||||||
|
|
||||||
for level := 0; level * 2 < len(chunkID); level ++ {
|
for level := 0; level*2 < len(chunkID); level++ {
|
||||||
if level >= minimumLevel {
|
if level >= minimumLevel {
|
||||||
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
|
filePath = path.Join(dir, chunkID[2*level:]) + suffix
|
||||||
if stat, err := storage.client.Stat(filePath); err == nil && !stat.IsDir() {
|
if stat, err := storage.client.Stat(filePath); err == nil && !stat.IsDir() {
|
||||||
return filePath[len(storage.storageDir) + 1:], true, stat.Size(), nil
|
return filePath[len(storage.storageDir)+1:], true, stat.Size(), nil
|
||||||
} else if err == nil && stat.IsDir() {
|
} else if err == nil && stat.IsDir() {
|
||||||
return filePath[len(storage.storageDir) + 1:], true, 0, fmt.Errorf("The path %s is a directory", filePath)
|
return filePath[len(storage.storageDir)+1:], true, 0, fmt.Errorf("The path %s is a directory", filePath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find the subdirectory the chunk file may reside.
|
// Find the subdirectory the chunk file may reside.
|
||||||
subDir := path.Join(dir, chunkID[2 * level: 2 * level + 2])
|
subDir := path.Join(dir, chunkID[2*level:2*level+2])
|
||||||
stat, err := storage.client.Stat(subDir)
|
stat, err := storage.client.Stat(subDir)
|
||||||
if err == nil && stat.IsDir() {
|
if err == nil && stat.IsDir() {
|
||||||
dir = subDir
|
dir = subDir
|
||||||
@@ -227,7 +226,7 @@ func (storage *SFTPStorage) FindChunk(threadIndex int, chunkID string, isFossil
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Teh chunk must be under this subdirectory but it doesn't exist.
|
// Teh chunk must be under this subdirectory but it doesn't exist.
|
||||||
return path.Join(dir, chunkID[2 * level:])[len(storage.storageDir) + 1:] + suffix, false, 0, nil
|
return path.Join(dir, chunkID[2*level:])[len(storage.storageDir)+1:] + suffix, false, 0, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -245,7 +244,7 @@ func (storage *SFTPStorage) DownloadFile(threadIndex int, filePath string, chunk
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit / storage.numberOfThreads); err != nil {
|
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -265,12 +264,12 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
|
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
|
||||||
|
|
||||||
file, err := storage.client.OpenFile(temporaryFile, os.O_WRONLY | os.O_CREATE | os.O_TRUNC)
|
file, err := storage.client.OpenFile(temporaryFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / storage.numberOfThreads)
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
_, err = io.Copy(file, reader)
|
_, err = io.Copy(file, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
file.Close()
|
file.Close()
|
||||||
@@ -294,16 +293,16 @@ func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *SFTPStorage) IsCacheNeeded () (bool) { return true }
|
func (storage *SFTPStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *SFTPStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *SFTPStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *SFTPStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *SFTPStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *SFTPStorage) IsFastListing() (bool) { return false }
|
func (storage *SFTPStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *SFTPStorage) EnableTestMode() {}
|
func (storage *SFTPStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -5,11 +5,11 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
"time"
|
|
||||||
"os"
|
"os"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
ole "github.com/gilbertchen/go-ole"
|
ole "github.com/gilbertchen/go-ole"
|
||||||
)
|
)
|
||||||
@@ -28,7 +28,7 @@ type IVSSAsyncVtbl struct {
|
|||||||
queryStatus uintptr
|
queryStatus uintptr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (async *IVSSAsync) VTable() * IVSSAsyncVtbl {
|
func (async *IVSSAsync) VTable() *IVSSAsyncVtbl {
|
||||||
return (*IVSSAsyncVtbl)(unsafe.Pointer(async.RawVTable))
|
return (*IVSSAsyncVtbl)(unsafe.Pointer(async.RawVTable))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -55,7 +55,7 @@ func (async *IVSSAsync) Wait(seconds int) bool {
|
|||||||
if status == VSS_S_ASYNC_FINISHED {
|
if status == VSS_S_ASYNC_FINISHED {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if time.Now().Unix() - startTime > int64(seconds) {
|
if time.Now().Unix()-startTime > int64(seconds) {
|
||||||
LOG_WARN("IVSSASYNC_TIMEOUT", "IVssAsync is pending for more than %d seconds\n", seconds)
|
LOG_WARN("IVSSASYNC_TIMEOUT", "IVssAsync is pending for more than %d seconds\n", seconds)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -77,7 +77,6 @@ func getIVSSAsync(unknown *ole.IUnknown, iid *ole.GUID) (async *IVSSAsync) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//665c1d5f-c218-414d-a05d-7fef5f9d5c86
|
//665c1d5f-c218-414d-a05d-7fef5f9d5c86
|
||||||
var IID_IVSS = &ole.GUID{0x665c1d5f, 0xc218, 0x414d, [8]byte{0xa0, 0x5d, 0x7f, 0xef, 0x5f, 0x9d, 0x5c, 0x86}}
|
var IID_IVSS = &ole.GUID{0x665c1d5f, 0xc218, 0x414d, [8]byte{0xa0, 0x5d, 0x7f, 0xef, 0x5f, 0x9d, 0x5c, 0x86}}
|
||||||
|
|
||||||
@@ -137,7 +136,7 @@ type IVSSVtbl struct {
|
|||||||
queryRevertStatus uintptr
|
queryRevertStatus uintptr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vss *IVSS) VTable() * IVSSVtbl {
|
func (vss *IVSS) VTable() *IVSSVtbl {
|
||||||
return (*IVSSVtbl)(unsafe.Pointer(vss.RawVTable))
|
return (*IVSSVtbl)(unsafe.Pointer(vss.RawVTable))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -238,7 +237,7 @@ type SnapshotProperties struct {
|
|||||||
Status int
|
Status int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vss *IVSS) GetSnapshotProperties(snapshotSetID ole.GUID, properties *SnapshotProperties) (int) {
|
func (vss *IVSS) GetSnapshotProperties(snapshotSetID ole.GUID, properties *SnapshotProperties) int {
|
||||||
var ret uintptr
|
var ret uintptr
|
||||||
if runtime.GOARCH == "386" {
|
if runtime.GOARCH == "386" {
|
||||||
address := uint(uintptr(unsafe.Pointer(&snapshotSetID)))
|
address := uint(uintptr(unsafe.Pointer(&snapshotSetID)))
|
||||||
@@ -292,8 +291,7 @@ func (vss *IVSS) DeleteSnapshots(snapshotID ole.GUID) (int, int, ole.GUID) {
|
|||||||
return int(ret), int(deleted), deletedGUID
|
return int(ret), int(deleted), deletedGUID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func uint16ArrayToString(p *uint16) string {
|
||||||
func uint16ArrayToString(p *uint16) (string) {
|
|
||||||
if p == nil {
|
if p == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
@@ -481,9 +479,7 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
|
|||||||
}
|
}
|
||||||
async.Release()
|
async.Release()
|
||||||
|
|
||||||
|
properties := SnapshotProperties{}
|
||||||
properties := SnapshotProperties {
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = vssBackupComponent.GetSnapshotProperties(snapshotID, &properties)
|
ret = vssBackupComponent.GetSnapshotProperties(snapshotID, &properties)
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
@@ -512,7 +508,7 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
|
|||||||
preferencePath := GetDuplicacyPreferencePath()
|
preferencePath := GetDuplicacyPreferencePath()
|
||||||
shadowLink = preferencePath + "\\shadow"
|
shadowLink = preferencePath + "\\shadow"
|
||||||
os.Remove(shadowLink)
|
os.Remove(shadowLink)
|
||||||
err = os.Symlink(snapshotPath + "\\", shadowLink)
|
err = os.Symlink(snapshotPath+"\\", shadowLink)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("VSS_SYMLINK", "Failed to create a symbolic link to the shadow copy just created: %v", err)
|
LOG_ERROR("VSS_SYMLINK", "Failed to create a symbolic link to the shadow copy just created: %v", err)
|
||||||
return top
|
return top
|
||||||
@@ -521,5 +517,3 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
|
|||||||
return shadowLink + "\\" + top[2:]
|
return shadowLink + "\\" + top[2:]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,15 +5,15 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
"strconv"
|
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Snapshot represents a backup of the repository.
|
// Snapshot represents a backup of the repository.
|
||||||
@@ -47,10 +47,10 @@ type Snapshot struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateEmptySnapshot creates an empty snapshot.
|
// CreateEmptySnapshot creates an empty snapshot.
|
||||||
func CreateEmptySnapshot (id string) (snapshto *Snapshot) {
|
func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
|
||||||
return &Snapshot{
|
return &Snapshot{
|
||||||
ID : id,
|
ID: id,
|
||||||
Revision : 0,
|
Revision: 0,
|
||||||
StartTime: time.Now().Unix(),
|
StartTime: time.Now().Unix(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -60,8 +60,8 @@ func CreateEmptySnapshot (id string) (snapshto *Snapshot) {
|
|||||||
func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, skippedDirectories []string,
|
func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, skippedDirectories []string,
|
||||||
skippedFiles []string, err error) {
|
skippedFiles []string, err error) {
|
||||||
|
|
||||||
snapshot = &Snapshot {
|
snapshot = &Snapshot{
|
||||||
ID : id,
|
ID: id,
|
||||||
Revision: 0,
|
Revision: 0,
|
||||||
StartTime: time.Now().Unix(),
|
StartTime: time.Now().Unix(),
|
||||||
}
|
}
|
||||||
@@ -122,8 +122,8 @@ func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, ski
|
|||||||
|
|
||||||
for len(directories) > 0 {
|
for len(directories) > 0 {
|
||||||
|
|
||||||
directory := directories[len(directories) - 1]
|
directory := directories[len(directories)-1]
|
||||||
directories = directories[:len(directories) - 1]
|
directories = directories[:len(directories)-1]
|
||||||
snapshot.Files = append(snapshot.Files, directory)
|
snapshot.Files = append(snapshot.Files, directory)
|
||||||
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, snapshot.discardAttributes)
|
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, snapshot.discardAttributes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -152,9 +152,9 @@ func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, ski
|
|||||||
|
|
||||||
// This is the struct used to save/load incomplete snapshots
|
// This is the struct used to save/load incomplete snapshots
|
||||||
type IncompleteSnapshot struct {
|
type IncompleteSnapshot struct {
|
||||||
Files [] *Entry
|
Files []*Entry
|
||||||
ChunkHashes []string
|
ChunkHashes []string
|
||||||
ChunkLengths [] int
|
ChunkLengths []int
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadIncompleteSnapshot loads the incomplete snapshot if it exists
|
// LoadIncompleteSnapshot loads the incomplete snapshot if it exists
|
||||||
@@ -184,7 +184,7 @@ func LoadIncompleteSnapshot() (snapshot *Snapshot) {
|
|||||||
chunkHashes = append(chunkHashes, string(hash))
|
chunkHashes = append(chunkHashes, string(hash))
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshot = &Snapshot {
|
snapshot = &Snapshot{
|
||||||
Files: incompleteSnapshot.Files,
|
Files: incompleteSnapshot.Files,
|
||||||
ChunkHashes: chunkHashes,
|
ChunkHashes: chunkHashes,
|
||||||
ChunkLengths: incompleteSnapshot.ChunkLengths,
|
ChunkLengths: incompleteSnapshot.ChunkLengths,
|
||||||
@@ -210,7 +210,7 @@ func SaveIncompleteSnapshot(snapshot *Snapshot) {
|
|||||||
chunkHashes = append(chunkHashes, hex.EncodeToString([]byte(chunkHash)))
|
chunkHashes = append(chunkHashes, hex.EncodeToString([]byte(chunkHash)))
|
||||||
}
|
}
|
||||||
|
|
||||||
incompleteSnapshot := IncompleteSnapshot {
|
incompleteSnapshot := IncompleteSnapshot{
|
||||||
Files: files,
|
Files: files,
|
||||||
ChunkHashes: chunkHashes,
|
ChunkHashes: chunkHashes,
|
||||||
ChunkLengths: snapshot.ChunkLengths,
|
ChunkLengths: snapshot.ChunkLengths,
|
||||||
@@ -247,14 +247,14 @@ func RemoveIncompleteSnapshot() {
|
|||||||
// CreateSnapshotFromDescription creates a snapshot from json decription.
|
// CreateSnapshotFromDescription creates a snapshot from json decription.
|
||||||
func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err error) {
|
func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err error) {
|
||||||
|
|
||||||
var root map[string] interface{}
|
var root map[string]interface{}
|
||||||
|
|
||||||
err = json.Unmarshal(description, &root)
|
err = json.Unmarshal(description, &root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshot = &Snapshot {}
|
snapshot = &Snapshot{}
|
||||||
|
|
||||||
if value, ok := root["id"]; !ok {
|
if value, ok := root["id"]; !ok {
|
||||||
return nil, fmt.Errorf("No id is specified in the snapshot")
|
return nil, fmt.Errorf("No id is specified in the snapshot")
|
||||||
@@ -308,7 +308,7 @@ func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sequenceType := range []string { "files", "chunks", "lengths" } {
|
for _, sequenceType := range []string{"files", "chunks", "lengths"} {
|
||||||
if value, ok := root[sequenceType]; !ok {
|
if value, ok := root[sequenceType]; !ok {
|
||||||
return nil, fmt.Errorf("No %s are specified in the snapshot", sequenceType)
|
return nil, fmt.Errorf("No %s are specified in the snapshot", sequenceType)
|
||||||
} else if _, ok = value.([]interface{}); !ok {
|
} else if _, ok = value.([]interface{}); !ok {
|
||||||
@@ -336,7 +336,7 @@ func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err
|
|||||||
// LoadChunks construct 'ChunkHashes' from the json description.
|
// LoadChunks construct 'ChunkHashes' from the json description.
|
||||||
func (snapshot *Snapshot) LoadChunks(description []byte) (err error) {
|
func (snapshot *Snapshot) LoadChunks(description []byte) (err error) {
|
||||||
|
|
||||||
var root [] interface {}
|
var root []interface{}
|
||||||
err = json.Unmarshal(description, &root)
|
err = json.Unmarshal(description, &root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -363,7 +363,7 @@ func (snapshot *Snapshot) LoadLengths(description []byte) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON creates a json representation of the snapshot.
|
// MarshalJSON creates a json representation of the snapshot.
|
||||||
func (snapshot *Snapshot) MarshalJSON() ([] byte, error) {
|
func (snapshot *Snapshot) MarshalJSON() ([]byte, error) {
|
||||||
|
|
||||||
object := make(map[string]interface{})
|
object := make(map[string]interface{})
|
||||||
|
|
||||||
@@ -386,7 +386,7 @@ func (snapshot *Snapshot) MarshalJSON() ([] byte, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// MarshalSequence creates a json represetion for the specified chunk sequence.
|
// MarshalSequence creates a json represetion for the specified chunk sequence.
|
||||||
func (snapshot *Snapshot) MarshalSequence(sequenceType string) ([] byte, error) {
|
func (snapshot *Snapshot) MarshalSequence(sequenceType string) ([]byte, error) {
|
||||||
|
|
||||||
if sequenceType == "files" {
|
if sequenceType == "files" {
|
||||||
return json.Marshal(snapshot.Files)
|
return json.Marshal(snapshot.Files)
|
||||||
@@ -398,7 +398,7 @@ func (snapshot *Snapshot) MarshalSequence(sequenceType string) ([] byte, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetSequence assign a chunk sequence to the specified field.
|
// SetSequence assign a chunk sequence to the specified field.
|
||||||
func (snapshot *Snapshot) SetSequence(sequenceType string, sequence [] string) {
|
func (snapshot *Snapshot) SetSequence(sequenceType string, sequence []string) {
|
||||||
if sequenceType == "files" {
|
if sequenceType == "files" {
|
||||||
snapshot.FileSequence = sequence
|
snapshot.FileSequence = sequence
|
||||||
} else if sequenceType == "chunks" {
|
} else if sequenceType == "chunks" {
|
||||||
@@ -409,7 +409,7 @@ func (snapshot *Snapshot) SetSequence(sequenceType string, sequence [] string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// encodeSequence turns a sequence of binary hashes into a sequence of hex hashes.
|
// encodeSequence turns a sequence of binary hashes into a sequence of hex hashes.
|
||||||
func encodeSequence(sequence[] string) ([] string) {
|
func encodeSequence(sequence []string) []string {
|
||||||
|
|
||||||
sequenceInHex := make([]string, len(sequence))
|
sequenceInHex := make([]string, len(sequence))
|
||||||
|
|
||||||
@@ -419,5 +419,3 @@ func encodeSequence(sequence[] string) ([] string) {
|
|||||||
|
|
||||||
return sequenceInHex
|
return sequenceInHex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,21 +5,21 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"fmt"
|
|
||||||
"text/tabwriter"
|
|
||||||
"sort"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"text/tabwriter"
|
||||||
"time"
|
"time"
|
||||||
"math"
|
|
||||||
"path"
|
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
|
||||||
"encoding/hex"
|
|
||||||
|
|
||||||
"github.com/aryann/difflib"
|
"github.com/aryann/difflib"
|
||||||
)
|
)
|
||||||
@@ -35,7 +35,7 @@ type FossilCollection struct {
|
|||||||
EndTime int64 `json:"end_time"`
|
EndTime int64 `json:"end_time"`
|
||||||
|
|
||||||
// The lastest revision for each snapshot id when the fossil collection was created.
|
// The lastest revision for each snapshot id when the fossil collection was created.
|
||||||
LastRevisions map[string] int `json:"last_revisions"`
|
LastRevisions map[string]int `json:"last_revisions"`
|
||||||
|
|
||||||
// Fossils (i.e., chunks not referenced by any snapshots)
|
// Fossils (i.e., chunks not referenced by any snapshots)
|
||||||
Fossils []string `json:"fossils"`
|
Fossils []string `json:"fossils"`
|
||||||
@@ -45,23 +45,23 @@ type FossilCollection struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateFossilCollection creates an empty fossil collection
|
// CreateFossilCollection creates an empty fossil collection
|
||||||
func CreateFossilCollection(allSnapshots map[string][] *Snapshot) *FossilCollection{
|
func CreateFossilCollection(allSnapshots map[string][]*Snapshot) *FossilCollection {
|
||||||
|
|
||||||
lastRevisions := make(map[string] int)
|
lastRevisions := make(map[string]int)
|
||||||
for id, snapshots := range allSnapshots {
|
for id, snapshots := range allSnapshots {
|
||||||
lastRevisions[id] = snapshots[len(snapshots) - 1].Revision
|
lastRevisions[id] = snapshots[len(snapshots)-1].Revision
|
||||||
}
|
}
|
||||||
|
|
||||||
return &FossilCollection {
|
return &FossilCollection{
|
||||||
LastRevisions : lastRevisions,
|
LastRevisions: lastRevisions,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsDeletable determines if the previously collected fossils are safe to be permanently removed. If so, it will
|
// IsDeletable determines if the previously collected fossils are safe to be permanently removed. If so, it will
|
||||||
// also returns a number of snapshots that were created during or after these fossils were being collected.
|
// also returns a number of snapshots that were created during or after these fossils were being collected.
|
||||||
// Therefore, some fossils may be referenced by these new snapshots and they must be resurrected.
|
// Therefore, some fossils may be referenced by these new snapshots and they must be resurrected.
|
||||||
func (collection *FossilCollection) IsDeletable(isStrongConsistent bool, ignoredIDs [] string,
|
func (collection *FossilCollection) IsDeletable(isStrongConsistent bool, ignoredIDs []string,
|
||||||
allSnapshots map[string][] *Snapshot) (isDeletable bool, newSnapshots []*Snapshot) {
|
allSnapshots map[string][]*Snapshot) (isDeletable bool, newSnapshots []*Snapshot) {
|
||||||
|
|
||||||
hasNewSnapshot := make(map[string]bool)
|
hasNewSnapshot := make(map[string]bool)
|
||||||
lastSnapshotTime := make(map[string]int64)
|
lastSnapshotTime := make(map[string]int64)
|
||||||
@@ -111,7 +111,7 @@ func (collection *FossilCollection) IsDeletable(isStrongConsistent bool, ignored
|
|||||||
|
|
||||||
// If this snapshot ends before this fossil collection, then it is still possible that another snapshot
|
// If this snapshot ends before this fossil collection, then it is still possible that another snapshot
|
||||||
// might be in progress (although very unlikely). So we only deem it deletable if that is not the case.
|
// might be in progress (although very unlikely). So we only deem it deletable if that is not the case.
|
||||||
if snapshot.EndTime > collection.EndTime + int64(extraTime){
|
if snapshot.EndTime > collection.EndTime+int64(extraTime) {
|
||||||
hasNewSnapshot[hostID] = true
|
hasNewSnapshot[hostID] = true
|
||||||
newSnapshots = append(newSnapshots, snapshot)
|
newSnapshots = append(newSnapshots, snapshot)
|
||||||
break
|
break
|
||||||
@@ -126,7 +126,7 @@ func (collection *FossilCollection) IsDeletable(isStrongConsistent bool, ignored
|
|||||||
LOG_TRACE("SNAPSHOT_NO_NEW", "No new snapshot from %s since the fossil collection step", snapshotID)
|
LOG_TRACE("SNAPSHOT_NO_NEW", "No new snapshot from %s since the fossil collection step", snapshotID)
|
||||||
}
|
}
|
||||||
|
|
||||||
lastSnapshot := allSnapshots[snapshotID][len(allSnapshots[snapshotID]) - 1]
|
lastSnapshot := allSnapshots[snapshotID][len(allSnapshots[snapshotID])-1]
|
||||||
if lastSnapshot.EndTime > lastSnapshotTime[hostID] {
|
if lastSnapshot.EndTime > lastSnapshotTime[hostID] {
|
||||||
lastSnapshotTime[hostID] = lastSnapshot.EndTime
|
lastSnapshotTime[hostID] = lastSnapshot.EndTime
|
||||||
}
|
}
|
||||||
@@ -140,7 +140,7 @@ func (collection *FossilCollection) IsDeletable(isStrongConsistent bool, ignored
|
|||||||
// snapshot id during the last 7 days. A snapshot created at the roughly same time as this fossil
|
// snapshot id during the last 7 days. A snapshot created at the roughly same time as this fossil
|
||||||
// collection would have finsihed already, while a snapshot currently being created does not affect
|
// collection would have finsihed already, while a snapshot currently being created does not affect
|
||||||
// this fossil collection.
|
// this fossil collection.
|
||||||
if lastSnapshotTime[hostID] > 0 && lastSnapshotTime[hostID] < time.Now().Unix() - maxSnapshotRunningTime * secondsInDay {
|
if lastSnapshotTime[hostID] > 0 && lastSnapshotTime[hostID] < time.Now().Unix()-maxSnapshotRunningTime*secondsInDay {
|
||||||
LOG_INFO("SNAPSHOT_INACTIVE", "Ignore snapshot %s whose last revision was created %d days ago",
|
LOG_INFO("SNAPSHOT_INACTIVE", "Ignore snapshot %s whose last revision was created %d days ago",
|
||||||
hostID, maxSnapshotRunningTime)
|
hostID, maxSnapshotRunningTime)
|
||||||
continue
|
continue
|
||||||
@@ -174,13 +174,12 @@ type SnapshotManager struct {
|
|||||||
snapshotCache *FileStorage
|
snapshotCache *FileStorage
|
||||||
|
|
||||||
chunkDownloader *ChunkDownloader
|
chunkDownloader *ChunkDownloader
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateSnapshotManager creates a snapshot manager
|
// CreateSnapshotManager creates a snapshot manager
|
||||||
func CreateSnapshotManager(config *Config, storage Storage) *SnapshotManager {
|
func CreateSnapshotManager(config *Config, storage Storage) *SnapshotManager {
|
||||||
|
|
||||||
manager := &SnapshotManager {
|
manager := &SnapshotManager{
|
||||||
config: config,
|
config: config,
|
||||||
storage: storage,
|
storage: storage,
|
||||||
fileChunk: CreateChunk(config, true),
|
fileChunk: CreateChunk(config, true),
|
||||||
@@ -231,14 +230,14 @@ func (manager *SnapshotManager) DownloadSnapshot(snapshotID string, revision int
|
|||||||
// sequenceReader loads the chunks pointed to by 'sequence' one by one as needed. This avoid loading all chunks into
|
// sequenceReader loads the chunks pointed to by 'sequence' one by one as needed. This avoid loading all chunks into
|
||||||
// the memory before passing them to the json unmarshaller.
|
// the memory before passing them to the json unmarshaller.
|
||||||
type sequenceReader struct {
|
type sequenceReader struct {
|
||||||
sequence [] string
|
sequence []string
|
||||||
buffer *bytes.Buffer
|
buffer *bytes.Buffer
|
||||||
index int
|
index int
|
||||||
refillFunc func(hash string) ([]byte)
|
refillFunc func(hash string) []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read reads a new chunk using the refill function when there is no more data in the buffer
|
// Read reads a new chunk using the refill function when there is no more data in the buffer
|
||||||
func (reader *sequenceReader)Read(data []byte) (n int, err error) {
|
func (reader *sequenceReader) Read(data []byte) (n int, err error) {
|
||||||
if len(reader.buffer.Bytes()) == 0 {
|
if len(reader.buffer.Bytes()) == 0 {
|
||||||
if reader.index < len(reader.sequence) {
|
if reader.index < len(reader.sequence) {
|
||||||
newData := reader.refillFunc(reader.sequence[reader.index])
|
newData := reader.refillFunc(reader.sequence[reader.index])
|
||||||
@@ -274,17 +273,17 @@ func (manager *SnapshotManager) DownloadSnapshotFileSequence(snapshot *Snapshot,
|
|||||||
|
|
||||||
manager.CreateChunkDownloader()
|
manager.CreateChunkDownloader()
|
||||||
|
|
||||||
reader := sequenceReader {
|
reader := sequenceReader{
|
||||||
sequence: snapshot.FileSequence,
|
sequence: snapshot.FileSequence,
|
||||||
buffer: new(bytes.Buffer),
|
buffer: new(bytes.Buffer),
|
||||||
refillFunc: func (chunkHash string) ([]byte) {
|
refillFunc: func(chunkHash string) []byte {
|
||||||
i := manager.chunkDownloader.AddChunk(chunkHash)
|
i := manager.chunkDownloader.AddChunk(chunkHash)
|
||||||
chunk := manager.chunkDownloader.WaitForChunk(i)
|
chunk := manager.chunkDownloader.WaitForChunk(i)
|
||||||
return chunk.GetBytes()
|
return chunk.GetBytes()
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
files := make([] *Entry, 0)
|
files := make([]*Entry, 0)
|
||||||
decoder := json.NewDecoder(&reader)
|
decoder := json.NewDecoder(&reader)
|
||||||
|
|
||||||
// read open bracket
|
// read open bracket
|
||||||
@@ -315,7 +314,6 @@ func (manager *SnapshotManager) DownloadSnapshotFileSequence(snapshot *Snapshot,
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// DownloadSnapshotSequence downloads the content represented by a sequence of chunks, and then unmarshal the content
|
// DownloadSnapshotSequence downloads the content represented by a sequence of chunks, and then unmarshal the content
|
||||||
// using the specified 'loadFunction'. It purpose is to decode the chunk sequences representing chunk hashes or chunk lengths
|
// using the specified 'loadFunction'. It purpose is to decode the chunk sequences representing chunk hashes or chunk lengths
|
||||||
// in a snapshot.
|
// in a snapshot.
|
||||||
@@ -331,7 +329,6 @@ func (manager *SnapshotManager) DownloadSnapshotSequence(snapshot *Snapshot, seq
|
|||||||
|
|
||||||
content := manager.DownloadSequence(sequence)
|
content := manager.DownloadSequence(sequence)
|
||||||
|
|
||||||
|
|
||||||
if len(content) == 0 {
|
if len(content) == 0 {
|
||||||
LOG_ERROR("SNAPSHOT_PARSE", "Failed to load %s specified in the snapshot %s at revision %d",
|
LOG_ERROR("SNAPSHOT_PARSE", "Failed to load %s specified in the snapshot %s at revision %d",
|
||||||
sequenceType, snapshot.ID, snapshot.Revision)
|
sequenceType, snapshot.ID, snapshot.Revision)
|
||||||
@@ -367,7 +364,7 @@ func (manager *SnapshotManager) DownloadSnapshotContents(snapshot *Snapshot, pat
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CleanSnapshotCache removes all files not referenced by the specified 'snapshot' in the snapshot cache.
|
// CleanSnapshotCache removes all files not referenced by the specified 'snapshot' in the snapshot cache.
|
||||||
func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, allSnapshots map[string] []*Snapshot) bool {
|
func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, allSnapshots map[string][]*Snapshot) bool {
|
||||||
|
|
||||||
if allSnapshots == nil {
|
if allSnapshots == nil {
|
||||||
// If the 'fossils' directory exists then don't clean the cache as all snapshots will be needed later
|
// If the 'fossils' directory exists then don't clean the cache as all snapshots will be needed later
|
||||||
@@ -409,7 +406,7 @@ func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, all
|
|||||||
|
|
||||||
allCachedSnapshots, _ := manager.ListAllFiles(manager.snapshotCache, "snapshots/")
|
allCachedSnapshots, _ := manager.ListAllFiles(manager.snapshotCache, "snapshots/")
|
||||||
for _, snapshotFile := range allCachedSnapshots {
|
for _, snapshotFile := range allCachedSnapshots {
|
||||||
if snapshotFile[len(snapshotFile) - 1] == '/' {
|
if snapshotFile[len(snapshotFile)-1] == '/' {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -460,7 +457,7 @@ func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, all
|
|||||||
|
|
||||||
allFiles, _ := manager.ListAllFiles(manager.snapshotCache, "chunks/")
|
allFiles, _ := manager.ListAllFiles(manager.snapshotCache, "chunks/")
|
||||||
for _, file := range allFiles {
|
for _, file := range allFiles {
|
||||||
if file[len(file) - 1] != '/' {
|
if file[len(file)-1] != '/' {
|
||||||
chunkID := strings.Replace(file, "/", "", -1)
|
chunkID := strings.Replace(file, "/", "", -1)
|
||||||
if _, found := chunks[chunkID]; !found {
|
if _, found := chunks[chunkID]; !found {
|
||||||
LOG_DEBUG("SNAPSHOT_CLEAN", "Delete chunk %s from the snapshot cache", chunkID)
|
LOG_DEBUG("SNAPSHOT_CLEAN", "Delete chunk %s from the snapshot cache", chunkID)
|
||||||
@@ -478,7 +475,7 @@ func (manager *SnapshotManager) CleanSnapshotCache(latestSnapshot *Snapshot, all
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ListSnapshotIDs returns all snapshot ids.
|
// ListSnapshotIDs returns all snapshot ids.
|
||||||
func (manager *SnapshotManager) ListSnapshotIDs() (snapshotIDs [] string, err error) {
|
func (manager *SnapshotManager) ListSnapshotIDs() (snapshotIDs []string, err error) {
|
||||||
|
|
||||||
LOG_TRACE("SNAPSHOT_LIST_IDS", "Listing all snapshot ids")
|
LOG_TRACE("SNAPSHOT_LIST_IDS", "Listing all snapshot ids")
|
||||||
|
|
||||||
@@ -488,8 +485,8 @@ func (manager *SnapshotManager) ListSnapshotIDs() (snapshotIDs [] string, err er
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, dir := range dirs {
|
for _, dir := range dirs {
|
||||||
if len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
if len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
snapshotIDs = append(snapshotIDs, dir[:len(dir) - 1])
|
snapshotIDs = append(snapshotIDs, dir[:len(dir)-1])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -497,7 +494,7 @@ func (manager *SnapshotManager) ListSnapshotIDs() (snapshotIDs [] string, err er
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ListSnapshotRevisions returns the list of all revisions given a snapshot id.
|
// ListSnapshotRevisions returns the list of all revisions given a snapshot id.
|
||||||
func (manager *SnapshotManager) ListSnapshotRevisions(snapshotID string) (revisions [] int, err error) {
|
func (manager *SnapshotManager) ListSnapshotRevisions(snapshotID string) (revisions []int, err error) {
|
||||||
|
|
||||||
LOG_TRACE("SNAPSHOT_LIST_REVISIONS", "Listing revisions for snapshot %s", snapshotID)
|
LOG_TRACE("SNAPSHOT_LIST_REVISIONS", "Listing revisions for snapshot %s", snapshotID)
|
||||||
|
|
||||||
@@ -519,7 +516,7 @@ func (manager *SnapshotManager) ListSnapshotRevisions(snapshotID string) (revisi
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if len(file) > 0 && file[len(file) - 1] != '/' {
|
if len(file) > 0 && file[len(file)-1] != '/' {
|
||||||
revision, err := strconv.Atoi(file)
|
revision, err := strconv.Atoi(file)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
revisions = append(revisions, revision)
|
revisions = append(revisions, revision)
|
||||||
@@ -571,8 +568,8 @@ func (manager *SnapshotManager) ListAllFiles(storage Storage, top string) (allFi
|
|||||||
|
|
||||||
for len(directories) > 0 {
|
for len(directories) > 0 {
|
||||||
|
|
||||||
dir := directories[len(directories) - 1]
|
dir := directories[len(directories)-1]
|
||||||
directories = directories[:len(directories) - 1]
|
directories = directories[:len(directories)-1]
|
||||||
|
|
||||||
LOG_TRACE("LIST_FILES", "Listing %s", dir)
|
LOG_TRACE("LIST_FILES", "Listing %s", dir)
|
||||||
|
|
||||||
@@ -588,8 +585,8 @@ func (manager *SnapshotManager) ListAllFiles(storage Storage, top string) (allFi
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, file := range files {
|
for i, file := range files {
|
||||||
if len(file) > 0 && file[len(file) - 1] == '/' {
|
if len(file) > 0 && file[len(file)-1] == '/' {
|
||||||
directories = append(directories, dir + file)
|
directories = append(directories, dir+file)
|
||||||
} else {
|
} else {
|
||||||
allFiles = append(allFiles, (dir + file)[len(top):])
|
allFiles = append(allFiles, (dir + file)[len(top):])
|
||||||
allSizes = append(allSizes, sizes[i])
|
allSizes = append(allSizes, sizes[i])
|
||||||
@@ -619,7 +616,7 @@ func (manager *SnapshotManager) ListAllFiles(storage Storage, top string) (allFi
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetSnapshotChunks returns all chunks referenced by a given snapshot.
|
// GetSnapshotChunks returns all chunks referenced by a given snapshot.
|
||||||
func (manager *SnapshotManager) GetSnapshotChunks(snapshot *Snapshot) (chunks [] string) {
|
func (manager *SnapshotManager) GetSnapshotChunks(snapshot *Snapshot) (chunks []string) {
|
||||||
|
|
||||||
for _, chunkHash := range snapshot.FileSequence {
|
for _, chunkHash := range snapshot.FileSequence {
|
||||||
chunks = append(chunks, manager.config.GetChunkIDFromHash(chunkHash))
|
chunks = append(chunks, manager.config.GetChunkIDFromHash(chunkHash))
|
||||||
@@ -658,7 +655,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
|
|||||||
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showFiles: %t, showChunks: %t",
|
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showFiles: %t, showChunks: %t",
|
||||||
snapshotID, revisionsToList, tag, showFiles, showChunks)
|
snapshotID, revisionsToList, tag, showFiles, showChunks)
|
||||||
|
|
||||||
var snapshotIDs [] string
|
var snapshotIDs []string
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if snapshotID == "" {
|
if snapshotID == "" {
|
||||||
@@ -668,7 +665,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
|
|||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
snapshotIDs = []string { snapshotID }
|
snapshotIDs = []string{snapshotID}
|
||||||
}
|
}
|
||||||
|
|
||||||
numberOfSnapshots := 0
|
numberOfSnapshots := 0
|
||||||
@@ -715,7 +712,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
|
|||||||
totalFiles++
|
totalFiles++
|
||||||
totalFileSize += file.Size
|
totalFileSize += file.Size
|
||||||
if file.Size > maxSize {
|
if file.Size > maxSize {
|
||||||
maxSize = maxSize * 10 + 9
|
maxSize = maxSize*10 + 9
|
||||||
maxSizeDigits += 1
|
maxSizeDigits += 1
|
||||||
}
|
}
|
||||||
if file.EndChunk > lastChunk {
|
if file.EndChunk > lastChunk {
|
||||||
@@ -732,7 +729,7 @@ func (manager *SnapshotManager) ListSnapshots(snapshotID string, revisionsToList
|
|||||||
|
|
||||||
metaChunks := len(snapshot.FileSequence) + len(snapshot.ChunkSequence) + len(snapshot.LengthSequence)
|
metaChunks := len(snapshot.FileSequence) + len(snapshot.ChunkSequence) + len(snapshot.LengthSequence)
|
||||||
LOG_INFO("SNAPSHOT_STATS", "Files: %d, total size: %d, file chunks: %d, metadata chunks: %d",
|
LOG_INFO("SNAPSHOT_STATS", "Files: %d, total size: %d, file chunks: %d, metadata chunks: %d",
|
||||||
totalFiles, totalFileSize, lastChunk + 1, metaChunks)
|
totalFiles, totalFileSize, lastChunk+1, metaChunks)
|
||||||
}
|
}
|
||||||
|
|
||||||
if showChunks {
|
if showChunks {
|
||||||
@@ -756,7 +753,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
|
LOG_DEBUG("LIST_PARAMETERS", "id: %s, revisions: %v, tag: %s, showStatistics: %t, checkFiles: %t, searchFossils: %t, resurrect: %t",
|
||||||
snapshotID, revisionsToCheck, tag, showStatistics, checkFiles, searchFossils, resurrect)
|
snapshotID, revisionsToCheck, tag, showStatistics, checkFiles, searchFossils, resurrect)
|
||||||
|
|
||||||
snapshotMap := make(map[string] [] *Snapshot)
|
snapshotMap := make(map[string][]*Snapshot)
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// Stores the chunk file size for each chunk
|
// Stores the chunk file size for each chunk
|
||||||
@@ -772,7 +769,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
allChunks, allSizes := manager.ListAllFiles(manager.storage, "chunks/")
|
allChunks, allSizes := manager.ListAllFiles(manager.storage, "chunks/")
|
||||||
|
|
||||||
for i, chunk := range allChunks {
|
for i, chunk := range allChunks {
|
||||||
if len(chunk) == 0 || chunk[len(chunk) - 1] == '/'{
|
if len(chunk) == 0 || chunk[len(chunk)-1] == '/' {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -799,7 +796,6 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
snapshotMap[snapshotID] = nil
|
snapshotMap[snapshotID] = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
snapshotIDIndex := 0
|
snapshotIDIndex := 0
|
||||||
for snapshotID, _ = range snapshotMap {
|
for snapshotID, _ = range snapshotMap {
|
||||||
|
|
||||||
@@ -863,7 +859,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
if resurrect {
|
if resurrect {
|
||||||
manager.resurrectChunk(chunkPath, chunkID)
|
manager.resurrectChunk(chunkPath, chunkID)
|
||||||
} else {
|
} else {
|
||||||
LOG_WARN("SNAPHOST_FOSSIL", "Chunk %s referenced by snapshot %s at revision %d " +
|
LOG_WARN("SNAPHOST_FOSSIL", "Chunk %s referenced by snapshot %s at revision %d "+
|
||||||
"has been marked as a fossil", chunkID, snapshotID, revision)
|
"has been marked as a fossil", chunkID, snapshotID, revision)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -908,7 +904,7 @@ func (manager *SnapshotManager) CheckSnapshots(snapshotID string, revisionsToChe
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Print snapshot and revision statistics
|
// Print snapshot and revision statistics
|
||||||
func (manager *SnapshotManager) ShowStatistics(snapshotMap map[string] [] *Snapshot, chunkSizeMap map[string]int64, chunkUniqueMap map[string]bool,
|
func (manager *SnapshotManager) ShowStatistics(snapshotMap map[string][]*Snapshot, chunkSizeMap map[string]int64, chunkUniqueMap map[string]bool,
|
||||||
chunkSnapshotMap map[string]int) {
|
chunkSnapshotMap map[string]int) {
|
||||||
for snapshotID, snapshotList := range snapshotMap {
|
for snapshotID, snapshotList := range snapshotMap {
|
||||||
|
|
||||||
@@ -957,7 +953,7 @@ func (manager *SnapshotManager) ShowStatistics(snapshotMap map[string] [] *Snaps
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Print snapshot and revision statistics in tabular format
|
// Print snapshot and revision statistics in tabular format
|
||||||
func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string] [] *Snapshot, chunkSizeMap map[string]int64, chunkUniqueMap map[string]bool,
|
func (manager *SnapshotManager) ShowStatisticsTabular(snapshotMap map[string][]*Snapshot, chunkSizeMap map[string]int64, chunkUniqueMap map[string]bool,
|
||||||
chunkSnapshotMap map[string]int) {
|
chunkSnapshotMap map[string]int) {
|
||||||
tableBuffer := new(bytes.Buffer)
|
tableBuffer := new(bytes.Buffer)
|
||||||
tableWriter := tabwriter.NewWriter(tableBuffer, 0, 0, 1, ' ', tabwriter.AlignRight|tabwriter.Debug)
|
tableWriter := tabwriter.NewWriter(tableBuffer, 0, 0, 1, ' ', tabwriter.AlignRight|tabwriter.Debug)
|
||||||
@@ -1099,7 +1095,7 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
files := make([]*Entry, 0, len(snapshot.Files) / 2)
|
files := make([]*Entry, 0, len(snapshot.Files)/2)
|
||||||
for _, file := range snapshot.Files {
|
for _, file := range snapshot.Files {
|
||||||
if file.IsFile() && file.Size != 0 {
|
if file.IsFile() && file.Size != 0 {
|
||||||
files = append(files, file)
|
files = append(files, file)
|
||||||
@@ -1109,7 +1105,7 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool {
|
|||||||
sort.Sort(ByChunk(files))
|
sort.Sort(ByChunk(files))
|
||||||
corruptedFiles := 0
|
corruptedFiles := 0
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if !manager.RetrieveFile(snapshot, file, func([]byte) {} ) {
|
if !manager.RetrieveFile(snapshot, file, func([]byte) {}) {
|
||||||
corruptedFiles++
|
corruptedFiles++
|
||||||
}
|
}
|
||||||
LOG_TRACE("SNAPSHOT_VERIFY", "%s", file.Path)
|
LOG_TRACE("SNAPSHOT_VERIFY", "%s", file.Path)
|
||||||
@@ -1127,7 +1123,7 @@ func (manager *SnapshotManager) VerifySnapshot(snapshot *Snapshot) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RetrieveFile retrieve the file in the specifed snapshot.
|
// RetrieveFile retrieve the file in the specifed snapshot.
|
||||||
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, output func([]byte)()) bool {
|
func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, output func([]byte)) bool {
|
||||||
|
|
||||||
if file.Size == 0 {
|
if file.Size == 0 {
|
||||||
return true
|
return true
|
||||||
@@ -1181,7 +1177,7 @@ func (manager *SnapshotManager) RetrieveFile(snapshot *Snapshot, file *Entry, ou
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FindFile returns the file entry that has the given file name.
|
// FindFile returns the file entry that has the given file name.
|
||||||
func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string, suppressError bool) (*Entry) {
|
func (manager *SnapshotManager) FindFile(snapshot *Snapshot, filePath string, suppressError bool) *Entry {
|
||||||
for _, entry := range snapshot.Files {
|
for _, entry := range snapshot.Files {
|
||||||
if entry.Path == filePath {
|
if entry.Path == filePath {
|
||||||
return entry
|
return entry
|
||||||
@@ -1232,7 +1228,7 @@ func (manager *SnapshotManager) PrintFile(snapshotID string, revision int, path
|
|||||||
}
|
}
|
||||||
|
|
||||||
file := manager.FindFile(snapshot, path, false)
|
file := manager.FindFile(snapshot, path, false)
|
||||||
var content [] byte
|
var content []byte
|
||||||
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) { content = append(content, chunk...) }) {
|
if !manager.RetrieveFile(snapshot, file, func(chunk []byte) { content = append(content, chunk...) }) {
|
||||||
LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d",
|
LOG_ERROR("SNAPSHOT_RETRIEVE", "File %s is corrupted in snapshot %s at revision %d",
|
||||||
path, snapshot.ID, snapshot.Revision)
|
path, snapshot.ID, snapshot.Revision)
|
||||||
@@ -1280,7 +1276,6 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
|||||||
leftSnapshot = manager.DownloadSnapshot(snapshotID, revisions[0])
|
leftSnapshot = manager.DownloadSnapshot(snapshotID, revisions[0])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if len(filePath) > 0 {
|
if len(filePath) > 0 {
|
||||||
|
|
||||||
manager.DownloadSnapshotContents(leftSnapshot, nil)
|
manager.DownloadSnapshotContents(leftSnapshot, nil)
|
||||||
@@ -1320,7 +1315,7 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
|||||||
|
|
||||||
after := 10
|
after := 10
|
||||||
before := 10
|
before := 10
|
||||||
var buffer [] string
|
var buffer []string
|
||||||
on := false
|
on := false
|
||||||
distance := 0
|
distance := 0
|
||||||
|
|
||||||
@@ -1372,19 +1367,19 @@ func (manager *SnapshotManager) Diff(top string, snapshotID string, revisions []
|
|||||||
// Find the max Size value in order for pretty alignment.
|
// Find the max Size value in order for pretty alignment.
|
||||||
for _, file := range leftSnapshot.Files {
|
for _, file := range leftSnapshot.Files {
|
||||||
for !file.IsDir() && file.Size > maxSize {
|
for !file.IsDir() && file.Size > maxSize {
|
||||||
maxSize = maxSize * 10 + 9
|
maxSize = maxSize*10 + 9
|
||||||
maxSizeDigits += 1
|
maxSizeDigits += 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range rightSnapshot.Files {
|
for _, file := range rightSnapshot.Files {
|
||||||
for !file.IsDir() && file.Size > maxSize {
|
for !file.IsDir() && file.Size > maxSize {
|
||||||
maxSize = maxSize * 10 + 9
|
maxSize = maxSize*10 + 9
|
||||||
maxSizeDigits += 1
|
maxSizeDigits += 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer := make([]byte, 32 * 1024)
|
buffer := make([]byte, 32*1024)
|
||||||
|
|
||||||
var i, j int
|
var i, j int
|
||||||
for i < len(leftSnapshot.Files) || j < len(rightSnapshot.Files) {
|
for i < len(leftSnapshot.Files) || j < len(rightSnapshot.Files) {
|
||||||
@@ -1486,7 +1481,6 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
|
|||||||
LOG_INFO("SNAPSHOT_HISTORY", "%7d:", revision)
|
LOG_INFO("SNAPSHOT_HISTORY", "%7d:", revision)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
stat, err := os.Stat(joinPath(top, filePath))
|
stat, err := os.Stat(joinPath(top, filePath))
|
||||||
@@ -1497,7 +1491,7 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
|
|||||||
modifiedFlag = "*"
|
modifiedFlag = "*"
|
||||||
}
|
}
|
||||||
if showLocalHash {
|
if showLocalHash {
|
||||||
localFile.Hash = manager.config.ComputeFileHash(joinPath(top, filePath), make([]byte, 32 * 1024))
|
localFile.Hash = manager.config.ComputeFileHash(joinPath(top, filePath), make([]byte, 32*1024))
|
||||||
if lastVersion.Hash != localFile.Hash {
|
if lastVersion.Hash != localFile.Hash {
|
||||||
modifiedFlag = "*"
|
modifiedFlag = "*"
|
||||||
}
|
}
|
||||||
@@ -1512,7 +1506,7 @@ func (manager *SnapshotManager) ShowHistory(top string, snapshotID string, revis
|
|||||||
|
|
||||||
// fossilizeChunk turns the chunk into a fossil.
|
// fossilizeChunk turns the chunk into a fossil.
|
||||||
func (manager *SnapshotManager) fossilizeChunk(chunkID string, filePath string,
|
func (manager *SnapshotManager) fossilizeChunk(chunkID string, filePath string,
|
||||||
exclusive bool, collection *FossilCollection) (bool) {
|
exclusive bool, collection *FossilCollection) bool {
|
||||||
if exclusive {
|
if exclusive {
|
||||||
err := manager.storage.DeleteFile(0, filePath)
|
err := manager.storage.DeleteFile(0, filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1548,7 +1542,7 @@ func (manager *SnapshotManager) fossilizeChunk(chunkID string, filePath string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// resurrectChunk turns the fossil back into a chunk
|
// resurrectChunk turns the fossil back into a chunk
|
||||||
func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string) (bool) {
|
func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string) bool {
|
||||||
chunkPath, exist, _, err := manager.storage.FindChunk(0, chunkID, false)
|
chunkPath, exist, _, err := manager.storage.FindChunk(0, chunkID, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", chunkID, err)
|
LOG_ERROR("CHUNK_FIND", "Failed to locate the path for the chunk %s: %v", chunkID, err)
|
||||||
@@ -1571,8 +1565,6 @@ func (manager *SnapshotManager) resurrectChunk(fossilPath string, chunkID string
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// PruneSnapshots deletes snapshots by revisions, tags, or a retention policy. The main idea is two-step
|
// PruneSnapshots deletes snapshots by revisions, tags, or a retention policy. The main idea is two-step
|
||||||
// fossil collection.
|
// fossil collection.
|
||||||
// 1. Delete snapshots specified by revision, retention policy, with a tag. Find any resulting unreferenced
|
// 1. Delete snapshots specified by revision, retention policy, with a tag. Find any resulting unreferenced
|
||||||
@@ -1594,7 +1586,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
dryRun bool, deleteOnly bool, collectOnly bool) bool {
|
dryRun bool, deleteOnly bool, collectOnly bool) bool {
|
||||||
|
|
||||||
LOG_DEBUG("DELETE_PARAMETERS",
|
LOG_DEBUG("DELETE_PARAMETERS",
|
||||||
"id: %s, revisions: %v, tags: %v, retentions: %v, exhaustive: %t, exclusive: %t, " +
|
"id: %s, revisions: %v, tags: %v, retentions: %v, exhaustive: %t, exclusive: %t, "+
|
||||||
"dryrun: %t, deleteOnly: %t, collectOnly: %t",
|
"dryrun: %t, deleteOnly: %t, collectOnly: %t",
|
||||||
snapshotID, revisionsToBeDeleted, tags, retentions,
|
snapshotID, revisionsToBeDeleted, tags, retentions,
|
||||||
exhaustive, exclusive, dryRun, deleteOnly, collectOnly)
|
exhaustive, exclusive, dryRun, deleteOnly, collectOnly)
|
||||||
@@ -1607,13 +1599,13 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
logDir := path.Join(preferencePath, "logs")
|
logDir := path.Join(preferencePath, "logs")
|
||||||
os.Mkdir(logDir, 0700)
|
os.Mkdir(logDir, 0700)
|
||||||
logFileName := path.Join(logDir, time.Now().Format("prune-log-20060102-150405"))
|
logFileName := path.Join(logDir, time.Now().Format("prune-log-20060102-150405"))
|
||||||
logFile, err := os.OpenFile(logFileName, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0600)
|
logFile, err := os.OpenFile(logFileName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if logFile != nil {
|
if logFile != nil {
|
||||||
logFile.Close()
|
logFile.Close()
|
||||||
}
|
}
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
// A retention policy is specified in the form 'interval:age', where both 'interval' and 'age' are numbers of
|
// A retention policy is specified in the form 'interval:age', where both 'interval' and 'age' are numbers of
|
||||||
// days. A retention policy applies to a snapshot if the snapshot is older than the age. For snapshots older
|
// days. A retention policy applies to a snapshot if the snapshot is older than the age. For snapshots older
|
||||||
@@ -1629,7 +1621,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
Age int
|
Age int
|
||||||
Interval int
|
Interval int
|
||||||
}
|
}
|
||||||
var retentionPolicies [] RetentionPolicy
|
var retentionPolicies []RetentionPolicy
|
||||||
|
|
||||||
// Parse the retention policy if needed.
|
// Parse the retention policy if needed.
|
||||||
if len(revisionsToBeDeleted) == 0 && len(retentions) > 0 {
|
if len(revisionsToBeDeleted) == 0 && len(retentions) > 0 {
|
||||||
@@ -1654,9 +1646,9 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
policy := RetentionPolicy {
|
policy := RetentionPolicy{
|
||||||
Age : age,
|
Age: age,
|
||||||
Interval : interval,
|
Interval: interval,
|
||||||
}
|
}
|
||||||
|
|
||||||
retentionPolicies = append(retentionPolicies, policy)
|
retentionPolicies = append(retentionPolicies, policy)
|
||||||
@@ -1668,7 +1660,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
for i, policy := range retentionPolicies {
|
for i, policy := range retentionPolicies {
|
||||||
if i == 0 || policy.Age < retentionPolicies[i - 1].Age {
|
if i == 0 || policy.Age < retentionPolicies[i-1].Age {
|
||||||
if policy.Interval == 0 {
|
if policy.Interval == 0 {
|
||||||
LOG_INFO("RETENTION_POLICY", "Keep no snapshots older than %d days", policy.Age)
|
LOG_INFO("RETENTION_POLICY", "Keep no snapshots older than %d days", policy.Age)
|
||||||
} else {
|
} else {
|
||||||
@@ -1679,7 +1671,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
allSnapshots := make(map[string] [] *Snapshot)
|
allSnapshots := make(map[string][]*Snapshot)
|
||||||
|
|
||||||
// We must find all snapshots for all ids even if only one snapshot is specified to be deleted,
|
// We must find all snapshots for all ids even if only one snapshot is specified to be deleted,
|
||||||
// because we need to find out which chunks are not referenced.
|
// because we need to find out which chunks are not referenced.
|
||||||
@@ -1697,7 +1689,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
sort.Ints(revisions)
|
sort.Ints(revisions)
|
||||||
var snapshots [] *Snapshot
|
var snapshots []*Snapshot
|
||||||
for _, revision := range revisions {
|
for _, revision := range revisions {
|
||||||
snapshot := manager.DownloadSnapshot(id, revision)
|
snapshot := manager.DownloadSnapshot(id, revision)
|
||||||
if snapshot != nil {
|
if snapshot != nil {
|
||||||
@@ -1731,7 +1723,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
matched := collectionRegex.FindStringSubmatch(collectionName)
|
matched := collectionRegex.FindStringSubmatch(collectionName)
|
||||||
if matched == nil{
|
if matched == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1874,7 +1866,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
i := 0
|
i := 0
|
||||||
for j, snapshot := range snapshots {
|
for j, snapshot := range snapshots {
|
||||||
|
|
||||||
if !exclusive && j == len(snapshots) - 1 {
|
if !exclusive && j == len(snapshots)-1 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1886,7 +1878,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
|
|
||||||
// Find out which retent policy applies based on the age.
|
// Find out which retent policy applies based on the age.
|
||||||
for i < len(retentionPolicies) &&
|
for i < len(retentionPolicies) &&
|
||||||
int(now - snapshot.StartTime) <retentionPolicies[i].Age * secondsInDay {
|
int(now-snapshot.StartTime) < retentionPolicies[i].Age*secondsInDay {
|
||||||
i++
|
i++
|
||||||
lastSnapshotTime = 0
|
lastSnapshotTime = 0
|
||||||
}
|
}
|
||||||
@@ -1897,7 +1889,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
snapshot.Flag = true
|
snapshot.Flag = true
|
||||||
toBeDeleted++
|
toBeDeleted++
|
||||||
} else if lastSnapshotTime != 0 &&
|
} else if lastSnapshotTime != 0 &&
|
||||||
int(snapshot.StartTime - lastSnapshotTime) < retentionPolicies[i].Interval * secondsInDay - 600 {
|
int(snapshot.StartTime-lastSnapshotTime) < retentionPolicies[i].Interval*secondsInDay-600 {
|
||||||
// Delete the snapshot if it is too close to the last kept one. Note that a tolerance of 10
|
// Delete the snapshot if it is too close to the last kept one. Note that a tolerance of 10
|
||||||
// minutes was subtracted from the interval.
|
// minutes was subtracted from the interval.
|
||||||
snapshot.Flag = true
|
snapshot.Flag = true
|
||||||
@@ -1935,7 +1927,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
for _, snapshots := range allSnapshots {
|
for _, snapshots := range allSnapshots {
|
||||||
|
|
||||||
if len(snapshots) > 0 {
|
if len(snapshots) > 0 {
|
||||||
latest := snapshots[len(snapshots) - 1]
|
latest := snapshots[len(snapshots)-1]
|
||||||
if latest.Flag && !exclusive {
|
if latest.Flag && !exclusive {
|
||||||
LOG_ERROR("SNAPSHOT_DELETE",
|
LOG_ERROR("SNAPSHOT_DELETE",
|
||||||
"The latest snapshot %s at revision %d can't be deleted in non-exclusive mode",
|
"The latest snapshot %s at revision %d can't be deleted in non-exclusive mode",
|
||||||
@@ -1966,7 +1958,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
// In exhaustive, we scan the entire chunk tree to find dangling chunks and temporaries.
|
// In exhaustive, we scan the entire chunk tree to find dangling chunks and temporaries.
|
||||||
allFiles, _ := manager.ListAllFiles(manager.storage, chunkDir)
|
allFiles, _ := manager.ListAllFiles(manager.storage, chunkDir)
|
||||||
for _, file := range allFiles {
|
for _, file := range allFiles {
|
||||||
if file[len(file) - 1] == '/' {
|
if file[len(file)-1] == '/' {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1981,7 +1973,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
|
|
||||||
if exclusive {
|
if exclusive {
|
||||||
// In exclusive mode, we assume no other restore operation is running concurrently.
|
// In exclusive mode, we assume no other restore operation is running concurrently.
|
||||||
err := manager.storage.DeleteFile(0, chunkDir + file)
|
err := manager.storage.DeleteFile(0, chunkDir+file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("CHUNK_TEMPORARY", "Failed to remove the temporary file %s: %v", file, err)
|
LOG_ERROR("CHUNK_TEMPORARY", "Failed to remove the temporary file %s: %v", file, err)
|
||||||
return false
|
return false
|
||||||
@@ -2006,9 +1998,9 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
chunk = strings.Replace(chunk, ".fsl", "", -1)
|
chunk = strings.Replace(chunk, ".fsl", "", -1)
|
||||||
|
|
||||||
if _, found := referencedChunks[chunk]; found {
|
if _, found := referencedChunks[chunk]; found {
|
||||||
manager.resurrectChunk(chunkDir + file, chunk)
|
manager.resurrectChunk(chunkDir+file, chunk)
|
||||||
} else {
|
} else {
|
||||||
err := manager.storage.DeleteFile(0, chunkDir + file)
|
err := manager.storage.DeleteFile(0, chunkDir+file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("FOSSIL_DELETE", "Failed to remove the unreferenced fossil %s: %v", file, err)
|
LOG_WARN("FOSSIL_DELETE", "Failed to remove the unreferenced fossil %s: %v", file, err)
|
||||||
} else {
|
} else {
|
||||||
@@ -2035,7 +2027,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
manager.fossilizeChunk(chunk, chunkDir + file, exclusive, collection)
|
manager.fossilizeChunk(chunk, chunkDir+file, exclusive, collection)
|
||||||
if exclusive {
|
if exclusive {
|
||||||
fmt.Fprintf(logFile, "Deleted chunk %s (exclusive mode)\n", chunk)
|
fmt.Fprintf(logFile, "Deleted chunk %s (exclusive mode)\n", chunk)
|
||||||
} else {
|
} else {
|
||||||
@@ -2053,7 +2045,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This is a redundant chunk file (for instance D3/495A8D and D3/49/5A8D )
|
// This is a redundant chunk file (for instance D3/495A8D and D3/49/5A8D )
|
||||||
err := manager.storage.DeleteFile(0, chunkDir + file)
|
err := manager.storage.DeleteFile(0, chunkDir+file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("CHUNK_DELETE", "Failed to remove the redundant chunk file %s: %v", file, err)
|
LOG_WARN("CHUNK_DELETE", "Failed to remove the redundant chunk file %s: %v", file, err)
|
||||||
} else {
|
} else {
|
||||||
@@ -2113,7 +2105,6 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the fossil collection if it is not empty.
|
// Save the fossil collection if it is not empty.
|
||||||
@@ -2169,7 +2160,7 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
|
|
||||||
var latestSnapshot *Snapshot = nil
|
var latestSnapshot *Snapshot = nil
|
||||||
if len(allSnapshots[selfID]) > 0 {
|
if len(allSnapshots[selfID]) > 0 {
|
||||||
latestSnapshot = allSnapshots[selfID][len(allSnapshots[selfID]) - 1]
|
latestSnapshot = allSnapshots[selfID][len(allSnapshots[selfID])-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
if latestSnapshot != nil && !latestSnapshot.Flag {
|
if latestSnapshot != nil && !latestSnapshot.Flag {
|
||||||
@@ -2181,7 +2172,6 @@ func (manager *SnapshotManager) PruneSnapshots(selfID string, snapshotID string,
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// CheckSnapshot performs sanity checks on the given snapshot.
|
// CheckSnapshot performs sanity checks on the given snapshot.
|
||||||
func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
|
func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
|
||||||
|
|
||||||
@@ -2233,7 +2223,7 @@ func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
|
|||||||
entry.Path, entry.StartChunk, lastChunk)
|
entry.Path, entry.StartChunk, lastChunk)
|
||||||
}
|
}
|
||||||
|
|
||||||
if entry.StartChunk > lastChunk + 1 {
|
if entry.StartChunk > lastChunk+1 {
|
||||||
return fmt.Errorf("The file %s starts at chunk %d while the last chunk is %d",
|
return fmt.Errorf("The file %s starts at chunk %d while the last chunk is %d",
|
||||||
entry.Path, entry.StartChunk, lastChunk)
|
entry.Path, entry.StartChunk, lastChunk)
|
||||||
}
|
}
|
||||||
@@ -2275,11 +2265,11 @@ func (manager *SnapshotManager) CheckSnapshot(snapshot *Snapshot) (err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(entries) > 0 && entries[0].StartChunk != 0 {
|
if len(entries) > 0 && entries[0].StartChunk != 0 {
|
||||||
return fmt.Errorf("The first file starts at chunk %d", entries[0].StartChunk )
|
return fmt.Errorf("The first file starts at chunk %d", entries[0].StartChunk)
|
||||||
}
|
}
|
||||||
|
|
||||||
// There may be a last chunk whose size is 0 so we allow this to happen
|
// There may be a last chunk whose size is 0 so we allow this to happen
|
||||||
if lastChunk < numberOfChunks - 2 {
|
if lastChunk < numberOfChunks-2 {
|
||||||
return fmt.Errorf("The last file ends at chunk %d but the number of chunks is %d", lastChunk, numberOfChunks)
|
return fmt.Errorf("The last file ends at chunk %d but the number of chunks is %d", lastChunk, numberOfChunks)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,19 +5,19 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"crypto/rand"
|
||||||
"os"
|
"encoding/hex"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"crypto/rand"
|
"testing"
|
||||||
"encoding/json"
|
"time"
|
||||||
"encoding/hex"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func createDummySnapshot(snapshotID string, revision int, endTime int64) * Snapshot {
|
func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot {
|
||||||
return &Snapshot {
|
return &Snapshot{
|
||||||
ID: snapshotID,
|
ID: snapshotID,
|
||||||
Revision: revision,
|
Revision: revision,
|
||||||
EndTime: endTime,
|
EndTime: endTime,
|
||||||
@@ -31,15 +31,15 @@ func TestIsDeletable(t *testing.T) {
|
|||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
day := int64(3600 * 24)
|
day := int64(3600 * 24)
|
||||||
|
|
||||||
allSnapshots := make(map[string][] *Snapshot)
|
allSnapshots := make(map[string][]*Snapshot)
|
||||||
allSnapshots["host1"] = append([]*Snapshot{}, createDummySnapshot("host1", 1, now - 2 * day))
|
allSnapshots["host1"] = append([]*Snapshot{}, createDummySnapshot("host1", 1, now-2*day))
|
||||||
allSnapshots["host2"] = append([]*Snapshot{}, createDummySnapshot("host2", 1, now - 2 * day))
|
allSnapshots["host2"] = append([]*Snapshot{}, createDummySnapshot("host2", 1, now-2*day))
|
||||||
allSnapshots["host1"] = append(allSnapshots["host1"], createDummySnapshot("host1", 2, now - 1 * day))
|
allSnapshots["host1"] = append(allSnapshots["host1"], createDummySnapshot("host1", 2, now-1*day))
|
||||||
allSnapshots["host2"] = append(allSnapshots["host2"], createDummySnapshot("host2", 2, now - 1 * day))
|
allSnapshots["host2"] = append(allSnapshots["host2"], createDummySnapshot("host2", 2, now-1*day))
|
||||||
|
|
||||||
collection := & FossilCollection {
|
collection := &FossilCollection{
|
||||||
EndTime: now - day - 3600,
|
EndTime: now - day - 3600,
|
||||||
LastRevisions: make(map[string] int),
|
LastRevisions: make(map[string]int),
|
||||||
}
|
}
|
||||||
|
|
||||||
collection.LastRevisions["host1"] = 1
|
collection.LastRevisions["host1"] = 1
|
||||||
@@ -51,21 +51,21 @@ func TestIsDeletable(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
collection.LastRevisions["host3"] = 1
|
collection.LastRevisions["host3"] = 1
|
||||||
allSnapshots["host3"] = append([]*Snapshot{}, createDummySnapshot("host3", 1, now - 2 * day))
|
allSnapshots["host3"] = append([]*Snapshot{}, createDummySnapshot("host3", 1, now-2*day))
|
||||||
|
|
||||||
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
if isDeletable {
|
if isDeletable {
|
||||||
t.Errorf("Scenario 2: should not be deletable")
|
t.Errorf("Scenario 2: should not be deletable")
|
||||||
}
|
}
|
||||||
|
|
||||||
allSnapshots["host3"] = append(allSnapshots["host3"], createDummySnapshot("host3", 2, now - day))
|
allSnapshots["host3"] = append(allSnapshots["host3"], createDummySnapshot("host3", 2, now-day))
|
||||||
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
if !isDeletable || len(newSnapshots) != 3 {
|
if !isDeletable || len(newSnapshots) != 3 {
|
||||||
t.Errorf("Scenario 3: should be deletable, 3 new snapshots")
|
t.Errorf("Scenario 3: should be deletable, 3 new snapshots")
|
||||||
}
|
}
|
||||||
|
|
||||||
collection.LastRevisions["host4"] = 1
|
collection.LastRevisions["host4"] = 1
|
||||||
allSnapshots["host4"] = append([]*Snapshot{}, createDummySnapshot("host4", 1, now - 8 * day))
|
allSnapshots["host4"] = append([]*Snapshot{}, createDummySnapshot("host4", 1, now-8*day))
|
||||||
|
|
||||||
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
if !isDeletable || len(newSnapshots) != 3 {
|
if !isDeletable || len(newSnapshots) != 3 {
|
||||||
@@ -73,17 +73,17 @@ func TestIsDeletable(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
collection.LastRevisions["repository1@host5"] = 1
|
collection.LastRevisions["repository1@host5"] = 1
|
||||||
allSnapshots["repository1@host5"] = append([]*Snapshot{}, createDummySnapshot("repository1@host5", 1, now - 3 * day))
|
allSnapshots["repository1@host5"] = append([]*Snapshot{}, createDummySnapshot("repository1@host5", 1, now-3*day))
|
||||||
|
|
||||||
collection.LastRevisions["repository2@host5"] = 1
|
collection.LastRevisions["repository2@host5"] = 1
|
||||||
allSnapshots["repository2@host5"] = append([]*Snapshot{}, createDummySnapshot("repository2@host5", 1, now - 2 * day))
|
allSnapshots["repository2@host5"] = append([]*Snapshot{}, createDummySnapshot("repository2@host5", 1, now-2*day))
|
||||||
|
|
||||||
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
if isDeletable {
|
if isDeletable {
|
||||||
t.Errorf("Scenario 5: should not be deletable")
|
t.Errorf("Scenario 5: should not be deletable")
|
||||||
}
|
}
|
||||||
|
|
||||||
allSnapshots["repository1@host5"] = append(allSnapshots["repository1@host5"], createDummySnapshot("repository1@host5", 2, now - day))
|
allSnapshots["repository1@host5"] = append(allSnapshots["repository1@host5"], createDummySnapshot("repository1@host5", 2, now-day))
|
||||||
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
if !isDeletable || len(newSnapshots) != 4 {
|
if !isDeletable || len(newSnapshots) != 4 {
|
||||||
t.Errorf("Scenario 6: should be deletable, 4 new snapshots")
|
t.Errorf("Scenario 6: should be deletable, 4 new snapshots")
|
||||||
@@ -142,7 +142,7 @@ func uploadRandomChunk(manager *SnapshotManager, chunkSize int) string {
|
|||||||
|
|
||||||
func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision int, startTime int64, endTime int64, chunkHashes []string) {
|
func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision int, startTime int64, endTime int64, chunkHashes []string) {
|
||||||
|
|
||||||
snapshot := &Snapshot {
|
snapshot := &Snapshot{
|
||||||
ID: snapshotID,
|
ID: snapshotID,
|
||||||
Revision: revision,
|
Revision: revision,
|
||||||
StartTime: startTime,
|
StartTime: startTime,
|
||||||
@@ -150,29 +150,29 @@ func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision in
|
|||||||
ChunkHashes: chunkHashes,
|
ChunkHashes: chunkHashes,
|
||||||
}
|
}
|
||||||
|
|
||||||
var chunkHashesInHex [] string
|
var chunkHashesInHex []string
|
||||||
for _, chunkHash := range chunkHashes {
|
for _, chunkHash := range chunkHashes {
|
||||||
chunkHashesInHex = append(chunkHashesInHex, hex.EncodeToString([]byte(chunkHash)))
|
chunkHashesInHex = append(chunkHashesInHex, hex.EncodeToString([]byte(chunkHash)))
|
||||||
}
|
}
|
||||||
|
|
||||||
sequence, _ := json.Marshal(chunkHashesInHex)
|
sequence, _ := json.Marshal(chunkHashesInHex)
|
||||||
snapshot.ChunkSequence = []string { uploadTestChunk(manager, sequence) }
|
snapshot.ChunkSequence = []string{uploadTestChunk(manager, sequence)}
|
||||||
|
|
||||||
description, _ := snapshot.MarshalJSON()
|
description, _ := snapshot.MarshalJSON()
|
||||||
path := fmt.Sprintf("snapshots/%s/%d", snapshotID, snapshot.Revision)
|
path := fmt.Sprintf("snapshots/%s/%d", snapshotID, snapshot.Revision)
|
||||||
manager.storage.CreateDirectory(0, "snapshots/" + snapshotID)
|
manager.storage.CreateDirectory(0, "snapshots/"+snapshotID)
|
||||||
manager.UploadFile(path, path, description)
|
manager.UploadFile(path, path, description)
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkTestSnapshots(manager *SnapshotManager, expectedSnapshots int, expectedFossils int) {
|
func checkTestSnapshots(manager *SnapshotManager, expectedSnapshots int, expectedFossils int) {
|
||||||
|
|
||||||
var snapshotIDs [] string
|
var snapshotIDs []string
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
chunks := make(map[string]bool)
|
chunks := make(map[string]bool)
|
||||||
files, _ := manager.ListAllFiles(manager.storage, "chunks/")
|
files, _ := manager.ListAllFiles(manager.storage, "chunks/")
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if file[len(file) - 1] == '/' {
|
if file[len(file)-1] == '/' {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
chunk := strings.Replace(file, "/", "", -1)
|
chunk := strings.Replace(file, "/", "", -1)
|
||||||
@@ -239,12 +239,12 @@ func TestSingleRepositoryPrune(t *testing.T) {
|
|||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
day := int64(24 * 3600)
|
day := int64(24 * 3600)
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
createTestSnapshot(snapshotManager, "repository1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2 })
|
createTestSnapshot(snapshotManager, "repository1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||||
checkTestSnapshots(snapshotManager, 1, 2)
|
checkTestSnapshots(snapshotManager, 1, 2)
|
||||||
|
|
||||||
t.Logf("Creating 2 snapshots")
|
t.Logf("Creating 2 snapshots")
|
||||||
createTestSnapshot(snapshotManager, "repository1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3 })
|
createTestSnapshot(snapshotManager, "repository1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||||
createTestSnapshot(snapshotManager, "repository1", 3, now - 1 * day - 3600, now - 1 * day - 60, []string { chunkHash3, chunkHash4 })
|
createTestSnapshot(snapshotManager, "repository1", 3, now-1*day-3600, now-1*day-60, []string{chunkHash3, chunkHash4})
|
||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot repository1 revision 1 with --exclusive")
|
t.Logf("Removing snapshot repository1 revision 1 with --exclusive")
|
||||||
@@ -257,7 +257,7 @@ func TestSingleRepositoryPrune(t *testing.T) {
|
|||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
createTestSnapshot(snapshotManager, "repository1", 4, now + 1 * day - 3600 , now + 1 * day, []string { chunkHash4, chunkHash5 })
|
createTestSnapshot(snapshotManager, "repository1", 4, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
@@ -282,9 +282,9 @@ func TestSingleHostPrune(t *testing.T) {
|
|||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
day := int64(24 * 3600)
|
day := int64(24 * 3600)
|
||||||
t.Logf("Creating 3 snapshots")
|
t.Logf("Creating 3 snapshots")
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2 })
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3 })
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||||
createTestSnapshot(snapshotManager, "vm2@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash3, chunkHash4 })
|
createTestSnapshot(snapshotManager, "vm2@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4})
|
||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||||
@@ -297,7 +297,7 @@ func TestSingleHostPrune(t *testing.T) {
|
|||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
createTestSnapshot(snapshotManager, "vm2@host1", 2, now + 1 * day - 3600 , now + 1 * day, []string { chunkHash4, chunkHash5 })
|
createTestSnapshot(snapshotManager, "vm2@host1", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
|
||||||
checkTestSnapshots(snapshotManager, 3, 2)
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
@@ -323,9 +323,9 @@ func TestMultipleHostPrune(t *testing.T) {
|
|||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
day := int64(24 * 3600)
|
day := int64(24 * 3600)
|
||||||
t.Logf("Creating 3 snapshot")
|
t.Logf("Creating 3 snapshot")
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2 })
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3 })
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||||
createTestSnapshot(snapshotManager, "vm2@host2", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash3, chunkHash4 })
|
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4})
|
||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||||
@@ -338,7 +338,7 @@ func TestMultipleHostPrune(t *testing.T) {
|
|||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
createTestSnapshot(snapshotManager, "vm2@host2", 2, now + 1 * day - 3600 , now + 1 * day, []string {chunkHash4, chunkHash5})
|
createTestSnapshot(snapshotManager, "vm2@host2", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
|
||||||
checkTestSnapshots(snapshotManager, 3, 2)
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
||||||
@@ -347,7 +347,7 @@ func TestMultipleHostPrune(t *testing.T) {
|
|||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
chunkHash6 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash6 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 3, now + 1 * day - 3600 , now + 1 * day, []string {chunkHash5, chunkHash6})
|
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash5, chunkHash6})
|
||||||
checkTestSnapshots(snapshotManager, 4, 2)
|
checkTestSnapshots(snapshotManager, 4, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
@@ -371,8 +371,8 @@ func TestPruneAndResurrect(t *testing.T) {
|
|||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
day := int64(24 * 3600)
|
day := int64(24 * 3600)
|
||||||
t.Logf("Creating 2 snapshots")
|
t.Logf("Creating 2 snapshots")
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2})
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3})
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||||
checkTestSnapshots(snapshotManager, 2, 0)
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||||
@@ -381,7 +381,7 @@ func TestPruneAndResurrect(t *testing.T) {
|
|||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 4, now + 1 * day - 3600 , now + 1 * day, []string { chunkHash4, chunkHash1})
|
createTestSnapshot(snapshotManager, "vm1@host1", 4, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash1})
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- one fossil will be resurrected")
|
t.Logf("Prune without removing any snapshots -- one fossil will be resurrected")
|
||||||
@@ -406,10 +406,10 @@ func TestInactiveHostPrune(t *testing.T) {
|
|||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
day := int64(24 * 3600)
|
day := int64(24 * 3600)
|
||||||
t.Logf("Creating 3 snapshot")
|
t.Logf("Creating 3 snapshot")
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2} )
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3} )
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||||
// Host2 is inactive
|
// Host2 is inactive
|
||||||
createTestSnapshot(snapshotManager, "vm2@host2", 1, now - 7 * day - 3600, now - 7 * day - 60, []string { chunkHash3, chunkHash4} )
|
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-7*day-3600, now-7*day-60, []string{chunkHash3, chunkHash4})
|
||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 revision 1")
|
t.Logf("Removing snapshot vm1@host1 revision 1")
|
||||||
@@ -422,7 +422,7 @@ func TestInactiveHostPrune(t *testing.T) {
|
|||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 3, now + 1 * day - 3600 , now + 1 * day, []string { chunkHash4, chunkHash5} )
|
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
|
||||||
checkTestSnapshots(snapshotManager, 3, 2)
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
@@ -439,7 +439,7 @@ func TestRetentionPolicy(t *testing.T) {
|
|||||||
snapshotManager := createTestSnapshotManager(testDir)
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
chunkSize := 1024
|
chunkSize := 1024
|
||||||
var chunkHashes [] string
|
var chunkHashes []string
|
||||||
for i := 0; i < 30; i++ {
|
for i := 0; i < 30; i++ {
|
||||||
chunkHashes = append(chunkHashes, uploadRandomChunk(snapshotManager, chunkSize))
|
chunkHashes = append(chunkHashes, uploadRandomChunk(snapshotManager, chunkSize))
|
||||||
}
|
}
|
||||||
@@ -448,7 +448,7 @@ func TestRetentionPolicy(t *testing.T) {
|
|||||||
day := int64(24 * 3600)
|
day := int64(24 * 3600)
|
||||||
t.Logf("Creating 30 snapshots")
|
t.Logf("Creating 30 snapshots")
|
||||||
for i := 0; i < 30; i++ {
|
for i := 0; i < 30; i++ {
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", i + 1, now - int64(30 - i) * day - 3600, now - int64(30 - i) * day - 60, []string { chunkHashes[i] })
|
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]})
|
||||||
}
|
}
|
||||||
|
|
||||||
checkTestSnapshots(snapshotManager, 30, 0)
|
checkTestSnapshots(snapshotManager, 30, 0)
|
||||||
|
|||||||
@@ -6,14 +6,14 @@ package duplicacy
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"strconv"
|
|
||||||
"os"
|
|
||||||
"net"
|
|
||||||
"path"
|
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
"regexp"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
"golang.org/x/crypto/ssh/agent"
|
"golang.org/x/crypto/ssh/agent"
|
||||||
@@ -47,16 +47,16 @@ type Storage interface {
|
|||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
IsCacheNeeded() (bool)
|
IsCacheNeeded() bool
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
IsMoveFileImplemented() (bool)
|
IsMoveFileImplemented() bool
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
IsStrongConsistent() (bool)
|
IsStrongConsistent() bool
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
IsFastListing() (bool)
|
IsFastListing() bool
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
EnableTestMode()
|
EnableTestMode()
|
||||||
@@ -79,7 +79,7 @@ func checkHostKey(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
|||||||
|
|
||||||
preferencePath := GetDuplicacyPreferencePath()
|
preferencePath := GetDuplicacyPreferencePath()
|
||||||
hostFile := path.Join(preferencePath, "known_hosts")
|
hostFile := path.Join(preferencePath, "known_hosts")
|
||||||
file, err := os.OpenFile(hostFile, os.O_RDWR | os.O_CREATE, 0600)
|
file, err := os.OpenFile(hostFile, os.O_RDWR|os.O_CREATE, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -96,7 +96,7 @@ func checkHostKey(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
|||||||
keyString = strings.Replace(keyString, "\n", "", -1)
|
keyString = strings.Replace(keyString, "\n", "", -1)
|
||||||
remoteAddress := remote.String()
|
remoteAddress := remote.String()
|
||||||
if strings.HasSuffix(remoteAddress, ":22") {
|
if strings.HasSuffix(remoteAddress, ":22") {
|
||||||
remoteAddress = remoteAddress[:len(remoteAddress) - len(":22")]
|
remoteAddress = remoteAddress[:len(remoteAddress)-len(":22")]
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, line := range strings.Split(string(content), "\n") {
|
for i, line := range strings.Split(string(content), "\n") {
|
||||||
@@ -187,7 +187,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
|
|
||||||
if strings.Contains(server, ":") {
|
if strings.Contains(server, ":") {
|
||||||
index := strings.Index(server, ":")
|
index := strings.Index(server, ":")
|
||||||
port, _ = strconv.Atoi(server[index + 1:])
|
port, _ = strconv.Atoi(server[index+1:])
|
||||||
server = server[:index]
|
server = server[:index]
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -197,7 +197,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
}
|
}
|
||||||
|
|
||||||
if username != "" {
|
if username != "" {
|
||||||
username = username[:len(username) - 1]
|
username = username[:len(username)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
// If ssh_key_file is set, skip password-based login
|
// If ssh_key_file is set, skip password-based login
|
||||||
@@ -210,12 +210,12 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
return password, nil
|
return password, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
keyboardInteractive := func (user, instruction string, questions []string, echos []bool) (answers []string,
|
keyboardInteractive := func(user, instruction string, questions []string, echos []bool) (answers []string,
|
||||||
err error) {
|
err error) {
|
||||||
if len(questions) == 1 {
|
if len(questions) == 1 {
|
||||||
LOG_DEBUG("SSH_INTERACTIVE", "Attempting keyboard interactive login")
|
LOG_DEBUG("SSH_INTERACTIVE", "Attempting keyboard interactive login")
|
||||||
password = GetPassword(preference, "ssh_password", "Enter SSH password:", false, resetPassword)
|
password = GetPassword(preference, "ssh_password", "Enter SSH password:", false, resetPassword)
|
||||||
answers = []string { password }
|
answers = []string{password}
|
||||||
return answers, nil
|
return answers, nil
|
||||||
} else {
|
} else {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
@@ -225,7 +225,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
publicKeysCallback := func() ([]ssh.Signer, error) {
|
publicKeysCallback := func() ([]ssh.Signer, error) {
|
||||||
LOG_DEBUG("SSH_PUBLICKEY", "Attempting public key authentication")
|
LOG_DEBUG("SSH_PUBLICKEY", "Attempting public key authentication")
|
||||||
|
|
||||||
signers := []ssh.Signer {}
|
signers := []ssh.Signer{}
|
||||||
|
|
||||||
agentSock := os.Getenv("SSH_AUTH_SOCK")
|
agentSock := os.Getenv("SSH_AUTH_SOCK")
|
||||||
if agentSock != "" {
|
if agentSock != "" {
|
||||||
@@ -274,13 +274,12 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
authMethods := [] ssh.AuthMethod {
|
authMethods := []ssh.AuthMethod{}
|
||||||
}
|
passwordAuthMethods := []ssh.AuthMethod{
|
||||||
passwordAuthMethods := [] ssh.AuthMethod {
|
|
||||||
ssh.PasswordCallback(passwordCallback),
|
ssh.PasswordCallback(passwordCallback),
|
||||||
ssh.KeyboardInteractive(keyboardInteractive),
|
ssh.KeyboardInteractive(keyboardInteractive),
|
||||||
}
|
}
|
||||||
keyFileAuthMethods := [] ssh.AuthMethod {
|
keyFileAuthMethods := []ssh.AuthMethod{
|
||||||
ssh.PublicKeysCallback(publicKeysCallback),
|
ssh.PublicKeysCallback(publicKeysCallback),
|
||||||
}
|
}
|
||||||
if keyFile != "" {
|
if keyFile != "" {
|
||||||
@@ -298,7 +297,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
keyFileKey = preference.Name + "_" + keyFileKey
|
keyFileKey = preference.Name + "_" + keyFileKey
|
||||||
}
|
}
|
||||||
|
|
||||||
authMethods = [] ssh.AuthMethod {}
|
authMethods = []ssh.AuthMethod{}
|
||||||
if keyringGet(passwordKey) != "" {
|
if keyringGet(passwordKey) != "" {
|
||||||
authMethods = append(authMethods, ssh.PasswordCallback(passwordCallback))
|
authMethods = append(authMethods, ssh.PasswordCallback(passwordCallback))
|
||||||
authMethods = append(authMethods, ssh.KeyboardInteractive(keyboardInteractive))
|
authMethods = append(authMethods, ssh.KeyboardInteractive(keyboardInteractive))
|
||||||
@@ -333,7 +332,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
bucket := matched[5]
|
bucket := matched[5]
|
||||||
|
|
||||||
if region != "" {
|
if region != "" {
|
||||||
region = region[:len(region) - 1]
|
region = region[:len(region)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.EqualFold(endpoint, "amazon") || strings.EqualFold(endpoint, "amazon.com") {
|
if strings.EqualFold(endpoint, "amazon") || strings.EqualFold(endpoint, "amazon.com") {
|
||||||
@@ -343,7 +342,7 @@ func CreateStorage(preference Preference, resetPassword bool, threads int) (stor
|
|||||||
storageDir := ""
|
storageDir := ""
|
||||||
if strings.Contains(bucket, "/") {
|
if strings.Contains(bucket, "/") {
|
||||||
firstSlash := strings.Index(bucket, "/")
|
firstSlash := strings.Index(bucket, "/")
|
||||||
storageDir = bucket[firstSlash + 1:]
|
storageDir = bucket[firstSlash+1:]
|
||||||
bucket = bucket[:firstSlash]
|
bucket = bucket[:firstSlash]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,19 +5,19 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"fmt"
|
|
||||||
"time"
|
|
||||||
"flag"
|
|
||||||
"path"
|
|
||||||
"testing"
|
|
||||||
"strings"
|
|
||||||
"strconv"
|
|
||||||
"io/ioutil"
|
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
@@ -111,8 +111,8 @@ func cleanStorage(storage Storage) {
|
|||||||
LOG_INFO("STORAGE_LIST", "Listing snapshots in the storage")
|
LOG_INFO("STORAGE_LIST", "Listing snapshots in the storage")
|
||||||
for len(directories) > 0 {
|
for len(directories) > 0 {
|
||||||
|
|
||||||
dir := directories[len(directories) - 1]
|
dir := directories[len(directories)-1]
|
||||||
directories = directories[:len(directories) - 1]
|
directories = directories[:len(directories)-1]
|
||||||
|
|
||||||
files, _, err := storage.ListFiles(0, dir)
|
files, _, err := storage.ListFiles(0, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -121,10 +121,10 @@ func cleanStorage(storage Storage) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if len(file) > 0 && file[len(file) - 1] == '/' {
|
if len(file) > 0 && file[len(file)-1] == '/' {
|
||||||
directories = append(directories, dir + file)
|
directories = append(directories, dir+file)
|
||||||
} else {
|
} else {
|
||||||
snapshots = append(snapshots, dir + file)
|
snapshots = append(snapshots, dir+file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -135,7 +135,7 @@ func cleanStorage(storage Storage) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, chunk := range listChunks(storage) {
|
for _, chunk := range listChunks(storage) {
|
||||||
storage.DeleteFile(0, "chunks/" + chunk)
|
storage.DeleteFile(0, "chunks/"+chunk)
|
||||||
}
|
}
|
||||||
|
|
||||||
storage.DeleteFile(0, "config")
|
storage.DeleteFile(0, "config")
|
||||||
@@ -151,8 +151,8 @@ func listChunks(storage Storage) (chunks []string) {
|
|||||||
|
|
||||||
for len(directories) > 0 {
|
for len(directories) > 0 {
|
||||||
|
|
||||||
dir := directories[len(directories) - 1]
|
dir := directories[len(directories)-1]
|
||||||
directories = directories[:len(directories) - 1]
|
directories = directories[:len(directories)-1]
|
||||||
|
|
||||||
files, _, err := storage.ListFiles(0, dir)
|
files, _, err := storage.ListFiles(0, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -161,8 +161,8 @@ func listChunks(storage Storage) (chunks []string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if len(file) > 0 && file[len(file) - 1] == '/' {
|
if len(file) > 0 && file[len(file)-1] == '/' {
|
||||||
directories = append(directories, dir + file)
|
directories = append(directories, dir+file)
|
||||||
} else {
|
} else {
|
||||||
chunk := dir + file
|
chunk := dir + file
|
||||||
chunk = chunk[len("chunks/"):]
|
chunk = chunk[len("chunks/"):]
|
||||||
@@ -185,7 +185,7 @@ func moveChunk(t *testing.T, storage Storage, chunkID string, isFossil bool, del
|
|||||||
|
|
||||||
to := filePath + ".fsl"
|
to := filePath + ".fsl"
|
||||||
if isFossil {
|
if isFossil {
|
||||||
to = filePath[:len(filePath) - len(".fsl")]
|
to = filePath[:len(filePath)-len(".fsl")]
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storage.MoveFile(0, filePath, to)
|
err = storage.MoveFile(0, filePath, to)
|
||||||
@@ -232,7 +232,7 @@ func TestStorage(t *testing.T) {
|
|||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
||||||
os.RemoveAll(testDir)
|
os.RemoveAll(testDir)
|
||||||
@@ -256,7 +256,7 @@ func TestStorage(t *testing.T) {
|
|||||||
delay = 2
|
delay = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, dir := range []string { "chunks", "snapshots" } {
|
for _, dir := range []string{"chunks", "snapshots"} {
|
||||||
err = storage.CreateDirectory(0, dir)
|
err = storage.CreateDirectory(0, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create directory %s: %v", dir, err)
|
t.Errorf("Failed to create directory %s: %v", dir, err)
|
||||||
@@ -299,10 +299,10 @@ func TestStorage(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshotIDs := []string {}
|
snapshotIDs := []string{}
|
||||||
for _, snapshotDir := range snapshotDirs {
|
for _, snapshotDir := range snapshotDirs {
|
||||||
if len(snapshotDir) > 0 && snapshotDir[len(snapshotDir) - 1] == '/' {
|
if len(snapshotDir) > 0 && snapshotDir[len(snapshotDir)-1] == '/' {
|
||||||
snapshotIDs = append(snapshotIDs, snapshotDir[:len(snapshotDir) - 1])
|
snapshotIDs = append(snapshotIDs, snapshotDir[:len(snapshotDir)-1])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -312,13 +312,13 @@ func TestStorage(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, snapshotID := range snapshotIDs {
|
for _, snapshotID := range snapshotIDs {
|
||||||
snapshots, _, err := storage.ListFiles(0, "snapshots/" + snapshotID)
|
snapshots, _, err := storage.ListFiles(0, "snapshots/"+snapshotID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list snapshots for %s: %v", snapshotID, err)
|
t.Errorf("Failed to list snapshots for %s: %v", snapshotID, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, snapshot := range snapshots {
|
for _, snapshot := range snapshots {
|
||||||
storage.DeleteFile(0, "snapshots/" + snapshotID + "/" + snapshot)
|
storage.DeleteFile(0, "snapshots/"+snapshotID+"/"+snapshot)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -326,7 +326,7 @@ func TestStorage(t *testing.T) {
|
|||||||
|
|
||||||
storage.DeleteFile(0, "config")
|
storage.DeleteFile(0, "config")
|
||||||
|
|
||||||
for _, file := range []string { "snapshots/repository1/1", "snapshots/repository2/1"} {
|
for _, file := range []string{"snapshots/repository1/1", "snapshots/repository2/1"} {
|
||||||
exist, _, _, err := storage.GetFileInfo(0, file)
|
exist, _, _, err := storage.GetFileInfo(0, file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to get file info for %s: %v", file, err)
|
t.Errorf("Failed to get file info for %s: %v", file, err)
|
||||||
@@ -348,7 +348,7 @@ func TestStorage(t *testing.T) {
|
|||||||
chunks := []string{}
|
chunks := []string{}
|
||||||
|
|
||||||
for i := 0; i < numberOfFiles; i++ {
|
for i := 0; i < numberOfFiles; i++ {
|
||||||
content := make([]byte, rand.Int() % maxFileSize + 1)
|
content := make([]byte, rand.Int()%maxFileSize+1)
|
||||||
_, err = crypto_rand.Read(content)
|
_, err = crypto_rand.Read(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error generating random content: %v", err)
|
t.Errorf("Error generating random content: %v", err)
|
||||||
@@ -377,7 +377,7 @@ func TestStorage(t *testing.T) {
|
|||||||
LOG_INFO("STORAGE_CHUNK", "Uploaded chunk: %s, size: %d", chunkID, len(content))
|
LOG_INFO("STORAGE_CHUNK", "Uploaded chunk: %s, size: %d", chunkID, len(content))
|
||||||
}
|
}
|
||||||
|
|
||||||
allChunks := [] string {}
|
allChunks := []string{}
|
||||||
for _, file := range listChunks(storage) {
|
for _, file := range listChunks(storage) {
|
||||||
file = strings.Replace(file, "/", "", -1)
|
file = strings.Replace(file, "/", "", -1)
|
||||||
if len(file) == 64 {
|
if len(file) == 64 {
|
||||||
@@ -392,11 +392,10 @@ func TestStorage(t *testing.T) {
|
|||||||
|
|
||||||
config := CreateConfig()
|
config := CreateConfig()
|
||||||
config.MinimumChunkSize = 100
|
config.MinimumChunkSize = 100
|
||||||
config.chunkPool = make(chan *Chunk, numberOfFiles * 2)
|
config.chunkPool = make(chan *Chunk, numberOfFiles*2)
|
||||||
|
|
||||||
chunk := CreateChunk(config, true)
|
chunk := CreateChunk(config, true)
|
||||||
|
|
||||||
|
|
||||||
for _, chunkID := range chunks {
|
for _, chunkID := range chunks {
|
||||||
|
|
||||||
chunk.Reset(false)
|
chunk.Reset(false)
|
||||||
@@ -450,7 +449,7 @@ func TestStorage(t *testing.T) {
|
|||||||
|
|
||||||
for _, file := range allChunks {
|
for _, file := range allChunks {
|
||||||
|
|
||||||
err = storage.DeleteFile(0, "chunks/" + file)
|
err = storage.DeleteFile(0, "chunks/"+file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to delete the file %s: %v", file, err)
|
t.Errorf("Failed to delete the file %s: %v", file, err)
|
||||||
return
|
return
|
||||||
@@ -474,7 +473,7 @@ func TestCleanStorage(t *testing.T) {
|
|||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
||||||
os.RemoveAll(testDir)
|
os.RemoveAll(testDir)
|
||||||
@@ -494,8 +493,8 @@ func TestCleanStorage(t *testing.T) {
|
|||||||
|
|
||||||
for len(directories) > 0 {
|
for len(directories) > 0 {
|
||||||
|
|
||||||
dir := directories[len(directories) - 1]
|
dir := directories[len(directories)-1]
|
||||||
directories = directories[:len(directories) - 1]
|
directories = directories[:len(directories)-1]
|
||||||
|
|
||||||
LOG_INFO("LIST_FILES", "Listing %s", dir)
|
LOG_INFO("LIST_FILES", "Listing %s", dir)
|
||||||
|
|
||||||
@@ -506,10 +505,10 @@ func TestCleanStorage(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if len(file) > 0 && file[len(file) - 1] == '/' {
|
if len(file) > 0 && file[len(file)-1] == '/' {
|
||||||
directories = append(directories, dir + file)
|
directories = append(directories, dir+file)
|
||||||
} else {
|
} else {
|
||||||
storage.DeleteFile(0, dir + file)
|
storage.DeleteFile(0, dir+file)
|
||||||
LOG_INFO("DELETE_FILE", "Deleted file %s", file)
|
LOG_INFO("DELETE_FILE", "Deleted file %s", file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,21 +5,21 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"bufio"
|
"bufio"
|
||||||
|
"crypto/sha256"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
|
||||||
"strconv"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"crypto/sha256"
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"golang.org/x/crypto/pbkdf2"
|
|
||||||
"github.com/gilbertchen/gopass"
|
"github.com/gilbertchen/gopass"
|
||||||
|
"golang.org/x/crypto/pbkdf2"
|
||||||
)
|
)
|
||||||
|
|
||||||
var RunInBackground bool = false
|
var RunInBackground bool = false
|
||||||
@@ -41,8 +41,8 @@ func init() {
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateRateLimitedReader(content []byte, rate int) (*RateLimitedReader) {
|
func CreateRateLimitedReader(content []byte, rate int) *RateLimitedReader {
|
||||||
return &RateLimitedReader {
|
return &RateLimitedReader{
|
||||||
Content: content,
|
Content: content,
|
||||||
Rate: float64(rate * 1024),
|
Rate: float64(rate * 1024),
|
||||||
Next: 0,
|
Next: 0,
|
||||||
@@ -84,7 +84,7 @@ func IsValidRegex(pattern string) (valid bool, err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reader *RateLimitedReader) Length() (int64) {
|
func (reader *RateLimitedReader) Length() int64 {
|
||||||
return int64(len(reader.Content))
|
return int64(len(reader.Content))
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -123,19 +123,19 @@ func (reader *RateLimitedReader) Read(p []byte) (n int, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
elapsed := time.Since(reader.StartTime).Seconds()
|
elapsed := time.Since(reader.StartTime).Seconds()
|
||||||
delay := float64(reader.Next) / reader.Rate - elapsed
|
delay := float64(reader.Next)/reader.Rate - elapsed
|
||||||
end := reader.Next + int(reader.Rate / 5)
|
end := reader.Next + int(reader.Rate/5)
|
||||||
if delay > 0 {
|
if delay > 0 {
|
||||||
time.Sleep(time.Duration(delay * float64(time.Second)))
|
time.Sleep(time.Duration(delay * float64(time.Second)))
|
||||||
} else {
|
} else {
|
||||||
end += - int(delay * reader.Rate)
|
end += -int(delay * reader.Rate)
|
||||||
}
|
}
|
||||||
|
|
||||||
if end > len(reader.Content) {
|
if end > len(reader.Content) {
|
||||||
end = len(reader.Content)
|
end = len(reader.Content)
|
||||||
}
|
}
|
||||||
|
|
||||||
n = copy(p, reader.Content[reader.Next : end])
|
n = copy(p, reader.Content[reader.Next:end])
|
||||||
reader.Next += n
|
reader.Next += n
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
@@ -145,7 +145,7 @@ func RateLimitedCopy(writer io.Writer, reader io.Reader, rate int) (written int6
|
|||||||
return io.Copy(writer, reader)
|
return io.Copy(writer, reader)
|
||||||
}
|
}
|
||||||
for range time.Tick(time.Second / 5) {
|
for range time.Tick(time.Second / 5) {
|
||||||
n, err := io.CopyN(writer, reader, int64(rate * 1024 / 5))
|
n, err := io.CopyN(writer, reader, int64(rate*1024/5))
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
@@ -164,7 +164,7 @@ func GenerateKeyFromPassword(password string) []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get password from preference, env, but don't start any keyring request
|
// Get password from preference, env, but don't start any keyring request
|
||||||
func GetPasswordFromPreference(preference Preference, passwordType string) (string) {
|
func GetPasswordFromPreference(preference Preference, passwordType string) string {
|
||||||
passwordID := passwordType
|
passwordID := passwordType
|
||||||
if preference.Name != "default" {
|
if preference.Name != "default" {
|
||||||
passwordID = preference.Name + "_" + passwordID
|
passwordID = preference.Name + "_" + passwordID
|
||||||
@@ -196,9 +196,9 @@ func GetPasswordFromPreference(preference Preference, passwordType string) (stri
|
|||||||
|
|
||||||
// GetPassword attempts to get the password from KeyChain/KeyRing, environment variables, or keyboard input.
|
// GetPassword attempts to get the password from KeyChain/KeyRing, environment variables, or keyboard input.
|
||||||
func GetPassword(preference Preference, passwordType string, prompt string,
|
func GetPassword(preference Preference, passwordType string, prompt string,
|
||||||
showPassword bool, resetPassword bool) (string) {
|
showPassword bool, resetPassword bool) string {
|
||||||
passwordID := passwordType
|
passwordID := passwordType
|
||||||
password := GetPasswordFromPreference(preference,passwordType)
|
password := GetPasswordFromPreference(preference, passwordType)
|
||||||
if password != "" {
|
if password != "" {
|
||||||
return password
|
return password
|
||||||
}
|
}
|
||||||
@@ -331,7 +331,7 @@ func matchPattern(text string, pattern string) bool {
|
|||||||
// either '+' or '-', whereas '-' indicates exclusion and '+' indicates inclusion. Wildcards like '*' and '?' may
|
// either '+' or '-', whereas '-' indicates exclusion and '+' indicates inclusion. Wildcards like '*' and '?' may
|
||||||
// appear in the patterns. In case no matching pattern is found, the file will be excluded if all patterns are
|
// appear in the patterns. In case no matching pattern is found, the file will be excluded if all patterns are
|
||||||
// include patterns, and included otherwise.
|
// include patterns, and included otherwise.
|
||||||
func MatchPath(filePath string, patterns [] string) (included bool) {
|
func MatchPath(filePath string, patterns []string) (included bool) {
|
||||||
|
|
||||||
var re *regexp.Regexp = nil
|
var re *regexp.Regexp = nil
|
||||||
var found bool
|
var found bool
|
||||||
@@ -386,52 +386,52 @@ func joinPath(components ...string) string {
|
|||||||
return combinedPath
|
return combinedPath
|
||||||
}
|
}
|
||||||
|
|
||||||
func PrettyNumber(number int64) (string) {
|
func PrettyNumber(number int64) string {
|
||||||
|
|
||||||
G := int64(1024 * 1024 * 1024)
|
G := int64(1024 * 1024 * 1024)
|
||||||
M := int64(1024 * 1024)
|
M := int64(1024 * 1024)
|
||||||
K := int64(1024)
|
K := int64(1024)
|
||||||
|
|
||||||
if number > 1000 * G {
|
if number > 1000*G {
|
||||||
return fmt.Sprintf("%dG", number / G)
|
return fmt.Sprintf("%dG", number/G)
|
||||||
} else if number > G {
|
} else if number > G {
|
||||||
return fmt.Sprintf("%d,%03dM", number / (1000 * M), (number / M) % 1000)
|
return fmt.Sprintf("%d,%03dM", number/(1000*M), (number/M)%1000)
|
||||||
} else if number > M {
|
} else if number > M {
|
||||||
return fmt.Sprintf("%d,%03dK", number / (1000 * K), (number / K) % 1000)
|
return fmt.Sprintf("%d,%03dK", number/(1000*K), (number/K)%1000)
|
||||||
} else if number > K {
|
} else if number > K {
|
||||||
return fmt.Sprintf("%dK", number / K)
|
return fmt.Sprintf("%dK", number/K)
|
||||||
} else {
|
} else {
|
||||||
return fmt.Sprintf("%d", number)
|
return fmt.Sprintf("%d", number)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func PrettySize(size int64) (string) {
|
func PrettySize(size int64) string {
|
||||||
if size > 1024 * 1024 {
|
if size > 1024*1024 {
|
||||||
return fmt.Sprintf("%.2fM", float64(size) / (1024.0 * 1024.0))
|
return fmt.Sprintf("%.2fM", float64(size)/(1024.0*1024.0))
|
||||||
} else if size > 1024 {
|
} else if size > 1024 {
|
||||||
return fmt.Sprintf("%.0fK", float64(size) / 1024.0)
|
return fmt.Sprintf("%.0fK", float64(size)/1024.0)
|
||||||
} else {
|
} else {
|
||||||
return fmt.Sprintf("%d", size)
|
return fmt.Sprintf("%d", size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func PrettyTime(seconds int64) (string) {
|
func PrettyTime(seconds int64) string {
|
||||||
|
|
||||||
day := int64(3600 * 24)
|
day := int64(3600 * 24)
|
||||||
|
|
||||||
if seconds > day * 2 {
|
if seconds > day*2 {
|
||||||
return fmt.Sprintf("%d days %02d:%02d:%02d",
|
return fmt.Sprintf("%d days %02d:%02d:%02d",
|
||||||
seconds / day, (seconds % day) / 3600, (seconds % 3600) / 60, seconds % 60)
|
seconds/day, (seconds%day)/3600, (seconds%3600)/60, seconds%60)
|
||||||
} else if seconds > day {
|
} else if seconds > day {
|
||||||
return fmt.Sprintf("1 day %02d:%02d:%02d", (seconds % day) / 3600, (seconds % 3600) / 60, seconds % 60)
|
return fmt.Sprintf("1 day %02d:%02d:%02d", (seconds%day)/3600, (seconds%3600)/60, seconds%60)
|
||||||
} else if seconds > 0 {
|
} else if seconds > 0 {
|
||||||
return fmt.Sprintf("%02d:%02d:%02d", seconds / 3600, (seconds % 3600) / 60, seconds % 60)
|
return fmt.Sprintf("%02d:%02d:%02d", seconds/3600, (seconds%3600)/60, seconds%60)
|
||||||
} else {
|
} else {
|
||||||
return "n/a"
|
return "n/a"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func AtoSize(sizeString string) (int) {
|
func AtoSize(sizeString string) int {
|
||||||
sizeString = strings.ToLower(sizeString)
|
sizeString = strings.ToLower(sizeString)
|
||||||
|
|
||||||
sizeRegex := regexp.MustCompile(`^([0-9]+)([mk])?$`)
|
sizeRegex := regexp.MustCompile(`^([0-9]+)([mk])?$`)
|
||||||
@@ -451,7 +451,7 @@ func AtoSize(sizeString string) (int) {
|
|||||||
return size
|
return size
|
||||||
}
|
}
|
||||||
|
|
||||||
func MinInt(x, y int) (int) {
|
func MinInt(x, y int) int {
|
||||||
if x < y {
|
if x < y {
|
||||||
return x
|
return x
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,10 +7,10 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"syscall"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
"github.com/gilbertchen/xattr"
|
"github.com/gilbertchen/xattr"
|
||||||
)
|
)
|
||||||
@@ -31,7 +31,7 @@ func GetOwner(entry *Entry, fileInfo *os.FileInfo) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) (bool) {
|
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
|
||||||
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
|
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
|
||||||
if ok && stat != nil && (int(stat.Uid) != entry.UID || int(stat.Gid) != entry.GID) {
|
if ok && stat != nil && (int(stat.Uid) != entry.UID || int(stat.Gid) != entry.GID) {
|
||||||
if entry.UID != -1 && entry.GID != -1 {
|
if entry.UID != -1 && entry.GID != -1 {
|
||||||
|
|||||||
@@ -5,15 +5,14 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"time"
|
"time"
|
||||||
"bytes"
|
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMatchPattern(t *testing.T) {
|
func TestMatchPattern(t *testing.T) {
|
||||||
@@ -21,70 +20,70 @@ func TestMatchPattern(t *testing.T) {
|
|||||||
// Test cases were copied from Matching Wildcards: An Empirical Way to Tame an Algorithm
|
// Test cases were copied from Matching Wildcards: An Empirical Way to Tame an Algorithm
|
||||||
// By Kirk J. Krauss, October 07, 2014
|
// By Kirk J. Krauss, October 07, 2014
|
||||||
|
|
||||||
DATA := [] struct {
|
DATA := []struct {
|
||||||
text string
|
text string
|
||||||
pattern string
|
pattern string
|
||||||
matched bool
|
matched bool
|
||||||
} {
|
}{
|
||||||
// Cases with repeating character sequences.
|
// Cases with repeating character sequences.
|
||||||
{ "abcccd", "*ccd", true },
|
{"abcccd", "*ccd", true},
|
||||||
{ "mississipissippi", "*issip*ss*", true },
|
{"mississipissippi", "*issip*ss*", true},
|
||||||
{ "xxxx*zzzzzzzzy*f", "xxxx*zzy*fffff", false },
|
{"xxxx*zzzzzzzzy*f", "xxxx*zzy*fffff", false},
|
||||||
{ "xxxx*zzzzzzzzy*f", "xxx*zzy*f", true },
|
{"xxxx*zzzzzzzzy*f", "xxx*zzy*f", true},
|
||||||
{ "xxxxzzzzzzzzyf", "xxxx*zzy*fffff", false },
|
{"xxxxzzzzzzzzyf", "xxxx*zzy*fffff", false},
|
||||||
{ "xxxxzzzzzzzzyf", "xxxx*zzy*f", true },
|
{"xxxxzzzzzzzzyf", "xxxx*zzy*f", true},
|
||||||
{ "xyxyxyzyxyz", "xy*z*xyz", true },
|
{"xyxyxyzyxyz", "xy*z*xyz", true},
|
||||||
{ "mississippi", "*sip*", true },
|
{"mississippi", "*sip*", true},
|
||||||
{ "xyxyxyxyz", "xy*xyz", true },
|
{"xyxyxyxyz", "xy*xyz", true},
|
||||||
{ "mississippi", "mi*sip*", true },
|
{"mississippi", "mi*sip*", true},
|
||||||
{ "ababac", "*abac*", true },
|
{"ababac", "*abac*", true},
|
||||||
{ "ababac", "*abac*", true },
|
{"ababac", "*abac*", true},
|
||||||
{ "aaazz", "a*zz*", true },
|
{"aaazz", "a*zz*", true},
|
||||||
{ "a12b12", "*12*23", false },
|
{"a12b12", "*12*23", false},
|
||||||
{ "a12b12", "a12b", false },
|
{"a12b12", "a12b", false},
|
||||||
{ "a12b12", "*12*12*", true },
|
{"a12b12", "*12*12*", true},
|
||||||
|
|
||||||
// More double wildcard scenarios.
|
// More double wildcard scenarios.
|
||||||
{ "XYXYXYZYXYz", "XY*Z*XYz", true },
|
{"XYXYXYZYXYz", "XY*Z*XYz", true},
|
||||||
{ "missisSIPpi", "*SIP*", true },
|
{"missisSIPpi", "*SIP*", true},
|
||||||
{ "mississipPI", "*issip*PI", true },
|
{"mississipPI", "*issip*PI", true},
|
||||||
{ "xyxyxyxyz", "xy*xyz", true },
|
{"xyxyxyxyz", "xy*xyz", true},
|
||||||
{ "miSsissippi", "mi*sip*", true },
|
{"miSsissippi", "mi*sip*", true},
|
||||||
{ "miSsissippi", "mi*Sip*", false },
|
{"miSsissippi", "mi*Sip*", false},
|
||||||
{ "abAbac", "*Abac*", true },
|
{"abAbac", "*Abac*", true},
|
||||||
{ "abAbac", "*Abac*", true },
|
{"abAbac", "*Abac*", true},
|
||||||
{ "aAazz", "a*zz*", true },
|
{"aAazz", "a*zz*", true},
|
||||||
{ "A12b12", "*12*23", false },
|
{"A12b12", "*12*23", false},
|
||||||
{ "a12B12", "*12*12*", true },
|
{"a12B12", "*12*12*", true},
|
||||||
{ "oWn", "*oWn*", true },
|
{"oWn", "*oWn*", true},
|
||||||
|
|
||||||
// Completely tame (no wildcards) cases.
|
// Completely tame (no wildcards) cases.
|
||||||
{ "bLah", "bLah", true },
|
{"bLah", "bLah", true},
|
||||||
{ "bLah", "bLaH", false },
|
{"bLah", "bLaH", false},
|
||||||
|
|
||||||
// Simple mixed wildcard tests suggested by IBMer Marlin Deckert.
|
// Simple mixed wildcard tests suggested by IBMer Marlin Deckert.
|
||||||
{ "a", "*?", true },
|
{"a", "*?", true},
|
||||||
{ "ab", "*?", true },
|
{"ab", "*?", true},
|
||||||
{ "abc", "*?", true },
|
{"abc", "*?", true},
|
||||||
|
|
||||||
// More mixed wildcard tests including coverage for false positives.
|
// More mixed wildcard tests including coverage for false positives.
|
||||||
{ "a", "??", false },
|
{"a", "??", false},
|
||||||
{ "ab", "?*?", true },
|
{"ab", "?*?", true},
|
||||||
{ "ab", "*?*?*", true },
|
{"ab", "*?*?*", true},
|
||||||
{ "abc", "?*?*?", true },
|
{"abc", "?*?*?", true},
|
||||||
{ "abc", "?*?*&?", false },
|
{"abc", "?*?*&?", false},
|
||||||
{ "abcd", "?b*??", true },
|
{"abcd", "?b*??", true},
|
||||||
{ "abcd", "?a*??", false },
|
{"abcd", "?a*??", false},
|
||||||
{ "abcd", "?*?c?", true },
|
{"abcd", "?*?c?", true},
|
||||||
{ "abcd", "?*?d?", false },
|
{"abcd", "?*?d?", false},
|
||||||
{ "abcde", "?*b*?*d*?", true },
|
{"abcde", "?*b*?*d*?", true},
|
||||||
|
|
||||||
// Single-character-match cases.
|
// Single-character-match cases.
|
||||||
{ "bLah", "bL?h", true },
|
{"bLah", "bL?h", true},
|
||||||
{ "bLaaa", "bLa?", false },
|
{"bLaaa", "bLa?", false},
|
||||||
{ "bLah", "bLa?", true },
|
{"bLah", "bLa?", true},
|
||||||
{ "bLaH", "?Lah", false },
|
{"bLaH", "?Lah", false},
|
||||||
{ "bLaH", "?LaH", true },
|
{"bLaH", "?LaH", true},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, data := range DATA {
|
for _, data := range DATA {
|
||||||
@@ -96,7 +95,7 @@ func TestMatchPattern(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRateLimit(t *testing.T) {
|
func TestRateLimit(t *testing.T) {
|
||||||
content := make([]byte, 100 * 1024)
|
content := make([]byte, 100*1024)
|
||||||
_, err := crypto_rand.Read(content)
|
_, err := crypto_rand.Read(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error generating random content: %v", err)
|
t.Errorf("Error generating random content: %v", err)
|
||||||
|
|||||||
@@ -5,8 +5,8 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
@@ -36,6 +36,7 @@ type reparseDataBuffer struct {
|
|||||||
// GenericReparseBuffer
|
// GenericReparseBuffer
|
||||||
reparseBuffer byte
|
reparseBuffer byte
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
FSCTL_GET_REPARSE_POINT = 0x900A8
|
FSCTL_GET_REPARSE_POINT = 0x900A8
|
||||||
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024
|
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024
|
||||||
@@ -75,17 +76,17 @@ func Readlink(path string) (isRegular bool, s string, err error) {
|
|||||||
data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
|
data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
|
||||||
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
|
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
|
||||||
if data.PrintNameLength > 0 {
|
if data.PrintNameLength > 0 {
|
||||||
s = syscall.UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength + data.PrintNameOffset)/2])
|
s = syscall.UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength+data.PrintNameOffset)/2])
|
||||||
} else {
|
} else {
|
||||||
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameLength + data.SubstituteNameOffset)/2])
|
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameLength+data.SubstituteNameOffset)/2])
|
||||||
}
|
}
|
||||||
case IO_REPARSE_TAG_MOUNT_POINT:
|
case IO_REPARSE_TAG_MOUNT_POINT:
|
||||||
data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
|
data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
|
||||||
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
|
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
|
||||||
if data.PrintNameLength > 0 {
|
if data.PrintNameLength > 0 {
|
||||||
s = syscall.UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength + data.PrintNameOffset)/2])
|
s = syscall.UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength+data.PrintNameOffset)/2])
|
||||||
} else {
|
} else {
|
||||||
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameLength + data.SubstituteNameOffset)/2])
|
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameLength+data.SubstituteNameOffset)/2])
|
||||||
}
|
}
|
||||||
case IO_REPARSE_TAG_DEDUP:
|
case IO_REPARSE_TAG_DEDUP:
|
||||||
return true, "", nil
|
return true, "", nil
|
||||||
@@ -103,7 +104,7 @@ func GetOwner(entry *Entry, fileInfo *os.FileInfo) {
|
|||||||
entry.GID = -1
|
entry.GID = -1
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) (bool) {
|
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user