1
0
mirror of https://github.com/gilbertchen/duplicacy synced 2025-12-06 00:03:38 +00:00

Compare commits

..

29 Commits

Author SHA1 Message Date
Gilbert Chen
839be6094f Remove unused import 2017-06-20 16:37:47 -04:00
Gilbert Chen
84a4c86ca7 Bump version to 2.0.3 2017-06-20 14:39:04 -04:00
Gilbert Chen
651d82e511 Check directory existence again when failing to create it to avoid erroring out on race condition 2017-06-20 14:38:09 -04:00
gilbertchen
169d6db544 Create README.md 2017-06-15 16:22:09 -04:00
gilbertchen
25684942b3 Merge pull request #78 from stefandz/patch-2
another tiny typo
2017-06-15 10:49:59 -04:00
gilbertchen
746431d5e0 Merge pull request #77 from stefandz/patch-1
Update GUIDE.md
2017-06-15 10:49:17 -04:00
Gilbert Chen
28da4d15e2 Fixed #76: must create a new chunk for uploading in the copy operation 2017-06-15 10:48:24 -04:00
stefandz
d36e80a5eb another tiny typo 2017-06-15 15:40:28 +01:00
stefandz
fe1de10f22 Update GUIDE.md
Tiny typo
2017-06-15 11:29:52 +01:00
gilbertchen
112d5b22e5 Replace goamz with aws-sdk-g 2017-06-13 15:16:24 -04:00
gilbertchen
3da8830592 Fix typos 2017-06-13 13:29:01 -04:00
gilbertchen
04b01fa87d Merge pull request #73 from sdaros/master
Fix typo
2017-06-13 13:17:05 -04:00
Stefano Da Ros
4b60859054 Fix typo
The file should be titled "known_hosts" instead.
2017-06-13 18:53:59 +02:00
Gilbert Chen
7e5fc0972d Make LICENSE a Markdown file for better viewing 2017-06-13 12:37:05 -04:00
Gilbert Chen
c9951d6036 Move LICENSE to the top directory 2017-06-13 12:36:02 -04:00
Gilbert Chen
92b3594e89 Add a LICENSE file 2017-06-13 12:35:06 -04:00
Gilbert Chen
2424a2eeed Switch from goamz to aws-sdk-go for the S3 storage backend 2017-06-13 12:27:01 -04:00
gilbertchen
2ace6c74e1 Merge pull request #71 from ech1965/pref-dir
add -pref-dir command line option for init subcommand
2017-06-13 11:58:07 -04:00
Etienne Charlier
2fcc4d44b9 Merge branch 'master' of https://github.com/gilbertchen/duplicacy into pref-dir 2017-06-12 19:28:52 +02:00
gilbertchen
3f45b0a15a Update README.md 2017-06-11 14:09:39 -04:00
gilbertchen
2d69f64c20 Create README.md 2017-06-11 14:08:13 -04:00
Gilbert Chen
7a1a541c98 Rename main directory for better support of go get 2017-06-11 14:02:43 -04:00
Etienne Charlier
7aa0eca47c Fix typo 2017-06-11 14:10:14 +02:00
Etienne Charlier
aa909c0c15 Update documentation 2017-06-11 13:48:11 +02:00
Etienne Charlier
9e1740c1d6 Fix merge error 2017-06-10 17:14:58 +02:00
Etienne Charlier
ae34347741 merge version 2.0.2 2017-06-10 17:12:44 +02:00
Etienne Charlier
1361b553ac Remove logging statement; refactor test scripts 2017-06-08 22:21:57 +02:00
Etienne Charlier
c688c501d3 Refactor variable names and revert shadow copy path computation 2017-06-07 21:02:55 +02:00
Etienne Charlier
c88e148d59 First steps -pref-dir 2017-06-05 23:16:11 +02:00
20 changed files with 439 additions and 119 deletions

3
.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
.idea
duplicacy_main

View File

@@ -7,7 +7,7 @@ Duplicacy is based on the following open source project:
|https://github.com/bkaradzic/go-lz4 | BSD-2-Clause |
|https://github.com/Azure/azure-sdk-for-go | Apache-2.0 |
|https://github.com/tj/go-dropbox | MIT |
|https://github.com/goamz/goamz | LGPL-3.0 with static compilation excpetions |
|https://github.com/aws/aws-sdk-go | Apache-2.0 |
|https://github.com/howeyc/gopass | ISC |
|https://github.com/tmc/keyring | ISC |
|https://github.com/pcwizz/xattr | BSD-2-Clause |

View File

@@ -16,6 +16,7 @@ OPTIONS:
-chunk-size, -c 4M the average size of chunks
-max-chunk-size, -max 16M the maximum size of chunks (defaults to chunk-size * 4)
-min-chunk-size, -min 1M the minimum size of chunks (defaults to chunk-size / 4)
-pref-dir <preference directory path> Specify alternate location for .duplicacy preferences directory
```
The *init* command first connects to the storage specified by the storage URL. If the storage has been already been
@@ -33,6 +34,8 @@ The -e option controls whether or not encryption will be enabled for the storage
The three chunk size parameters are passed to the variable-size chunking algorithm. Their values are important to the overall performance, especially for cloud storages. If the chunk size is too small, a lot of overhead will be in sending requests and receiving responses. If the chunk size is too large, the effect of deduplication will be less obvious as more data will need to be transferred with each chunk.
The -pref-dir controls the location of the preferences directory. If not specified, a directory named .duplicacy is created in the repository. If specified, it must point to a non-existing directory. The directory is created and a .duplicacy file is created in the repository. The .duplicacy file contains the absolute path name to the preferences directory.
Once a storage has been initialized with these parameters, these parameters cannot be modified any more.
#### Backup
@@ -499,4 +502,4 @@ Note that the passwords stored in the environment variable and the preference ne
## Scripts
You can instruct Duplicay to run a script before or after executing a command. For example, if you create a bash script with the name *pre-prune* under the *.duplicacy/scripts* directory, this bash script will be run before the *prune* command starts. A script named *post-prune* will be run after the *prune* command finishes. This rule applies to all commands except *init*.
You can instruct Duplicacy to run a script before or after executing a command. For example, if you create a bash script with the name *pre-prune* under the *.duplicacy/scripts* directory, this bash script will be run before the *prune* command starts. A script named *post-prune* will be run after the *prune* command finishes. This rule applies to all commands except *init*.

20
LICENSE.md Normal file
View File

@@ -0,0 +1,20 @@
Copyright © 2017 Acrosync LLC
Licensor: Acrosync LLC
Software: Dulicacy
Use Limitation: 5 users
License Grant. Licensor hereby grants to each recipient of the Software (“you”) a non-exclusive, non-transferable, royalty-free and fully-paid-up license, under all of the Licensors copyright and patent rights, to use, copy, distribute, prepare derivative works of, publicly perform and display the Software, subject to the Use Limitation and the conditions set forth below.
Use Limitation. The license granted above allows use by up to the number of users per entity set forth above (the “Use Limitation”). For determining the number of users, “you” includes all affiliates, meaning legal entities controlling, controlled by, or under common control with you. If you exceed the Use Limitation, your use is subject to payment of Licensors then-current list price for licenses.
Conditions. Redistribution in source code or other forms must include a copy of this license document to be provided in a reasonable manner. Any redistribution of the Software is only allowed subject to this license.
Trademarks. This license does not grant you any right in the trademarks, service marks, brand names or logos of Licensor.
DISCLAIMER. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OR CONDITION, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. LICENSORS HEREBY DISCLAIM ALL LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE.
Termination. If you violate the terms of this license, your rights will terminate automatically and will not be reinstated without the prior written consent of Licensor. Any such termination will not affect the right of others who may have received copies of the Software from you.

View File

@@ -4,6 +4,8 @@ Duplicacy is a new generation cross-platform cloud backup tool based on the idea
The repository hosts source code, design documents, and binary releases of the command line version. There is also a Duplicacy GUI frontend built for Windows and Mac OS X available from https://duplicacy.com.
There is a special edition of Duplicacy developed for VMware vSphere (ESXi) named [Vertical Backup](https://www.verticalbackup.com) that can back up virtual machine files on ESXi to local drives, network or cloud storages.
## Features
Duplicacy currently supports major cloud storage providers (Amazon S3, Google Cloud Storage, Microsoft Azure, Dropbox, Backblaze, Google Drive, Microsoft OneDrive, and Hubic) and offers all essential features of a modern backup tool:
@@ -26,16 +28,13 @@ The [design document](https://github.com/gilbertchen/duplicacy-cli/blob/master/D
## Getting Started
Duplicacy is written in Go. You can build the executable by running the following commands:
Duplicacy is written in Go. You can run the following command to build the executable (which will be created under `$GOPATH/bin`):
```
git clone https://github.com/gilbertchen/duplicacy.git
cd duplicacy
go get ./...
go build main/duplicacy_main.go
go get -u github.com/gilbertchen/duplicacy/...
```
You can also visit the [releases page](https://github.com/gilbertchen/duplicacy-cli/releases/latest) to download the version suitable for your platform. Installation is not needed.
You can also visit the [releases page](https://github.com/gilbertchen/duplicacy-cli/releases/latest) to download the pre-built binary suitable for your platform..
Once you have the Duplicacy executable on your path, you can change to the directory that you want to back up (called *repository*) and run the *init* command:
@@ -155,7 +154,7 @@ You'll need to input an access key and a secret key to access your Amazon S3 sto
Storage URL: gcs://bucket/path/to/storage
```
Starting from version 2.0.0, a new Google Cloud Storage backend is added which is implemented using the [official Google client library](https://godoc.org/cloud.google.com/go/storage). You must first obtain a credential file by [authorizing](https://duplicacy.com/gcp_start) Dupliacy to access your Google Cloud Storage account or by [downloading](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts) a service account credential file.
Starting from version 2.0.0, a new Google Cloud Storage backend is added which is implemented using the [official Google client library](https://godoc.org/cloud.google.com/go/storage). You must first obtain a credential file by [authorizing](https://duplicacy.com/gcp_start) Duplicacy to access your Google Cloud Storage account or by [downloading](https://console.cloud.google.com/projectselector/iam-admin/serviceaccounts) a service account credential file.
You can also use the s3 protocol to access Google Cloud Storage. To do this, you must enable the [s3 interoperability](https://cloud.google.com/storage/docs/migrating#migration-simple) in your Google Cloud Storage settings and set the storage url as `s3://storage.googleapis.com/bucket/path/to/storage`.
@@ -175,7 +174,7 @@ Storage URL: b2://bucket
You'll need to input the account id and application key.
Backblaze's B2 storage is not only the least expensive (at 0.5 cent per GB per month), but also the fastest. We have been working closely with their developers to leverage the full potentials provided by the B2 API in order to maximumize the transfer speed.
Backblaze's B2 storage is not only the least expensive (at 0.5 cent per GB per month), but also the fastest. We have been working closely with their developers to leverage the full potentials provided by the B2 API in order to maximize the transfer speed.
#### Google Drive

View File

@@ -18,6 +18,7 @@ import (
"github.com/gilbertchen/cli"
"github.com/gilbertchen/duplicacy/src"
"io/ioutil"
)
const (
@@ -36,14 +37,14 @@ func getRepositoryPreference(context *cli.Context, storageName string) (reposito
}
for {
stat, err := os.Stat(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY))
stat, err := os.Stat(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY)) //TOKEEP
if err != nil && !os.IsNotExist(err) {
duplicacy.LOG_ERROR("REPOSITORY_PATH", "Failed to retrieve the information about the directory %s: %v",
repository, err)
return "", nil
}
if stat != nil && stat.IsDir() {
if stat != nil && (stat.IsDir() || stat.Mode().IsRegular()) {
break
}
@@ -54,10 +55,10 @@ func getRepositoryPreference(context *cli.Context, storageName string) (reposito
}
repository = parent
}
duplicacy.LoadPreferences(repository)
duplicacy.SetKeyringFile(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY, "keyring"))
preferencePath := duplicacy.GetDuplicacyPreferencePath(repository)
duplicacy.SetKeyringFile(path.Join(preferencePath, "keyring"))
if storageName == "" {
storageName = context.String("storage")
@@ -142,8 +143,9 @@ func runScript(context *cli.Context, repository string, storageName string, phas
if !ScriptEnabled {
return false
}
scriptDir, _ := filepath.Abs(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY, "scripts"))
preferencePath := duplicacy.GetDuplicacyPreferencePath(repository)
scriptDir, _ := filepath.Abs(path.Join(preferencePath, "scripts"))
scriptName := phase + "-" + context.Command.Name
script := path.Join(scriptDir, scriptName)
@@ -174,14 +176,14 @@ func runScript(context *cli.Context, repository string, storageName string, phas
}
func initRepository(context *cli.Context) {
configRespository(context, true)
configRepository(context, true)
}
func addStorage(context *cli.Context) {
configRespository(context, false)
configRepository(context, false)
}
func configRespository(context *cli.Context, init bool) {
func configRepository(context *cli.Context, init bool) {
setGlobalOptions(context)
defer duplicacy.CatchLogException()
@@ -220,21 +222,37 @@ func configRespository(context *cli.Context, init bool) {
duplicacy.LOG_ERROR("REPOSITORY_PATH", "Failed to retrieve the current working directory: %v", err)
return
}
duplicacyDirectory := path.Join(repository, duplicacy.DUPLICACY_DIRECTORY)
if stat, _ := os.Stat(path.Join(duplicacyDirectory, "preferences")); stat != nil {
preferencePath := context.String("pref-dir")
if preferencePath == "" {
preferencePath = path.Join(repository, duplicacy.DUPLICACY_DIRECTORY) // TOKEEP
}
if stat, _ := os.Stat(path.Join(preferencePath, "preferences")); stat != nil {
duplicacy.LOG_ERROR("REPOSITORY_INIT", "The repository %s has already been initialized", repository)
return
}
err = os.Mkdir(duplicacyDirectory, 0744)
err = os.Mkdir(preferencePath, 0744)
if err != nil && !os.IsExist(err) {
duplicacy.LOG_ERROR("REPOSITORY_INIT", "Failed to create the directory %s: %v",
duplicacy.DUPLICACY_DIRECTORY, err)
preferencePath, err)
return
}
duplicacy.SetKeyringFile(path.Join(duplicacyDirectory, "keyring"))
if context.String("pref-dir") != "" {
// out of tree preference file
// write real path into .duplicacy file inside repository
duplicacyFileName := path.Join(repository, duplicacy.DUPLICACY_FILE)
d1 := []byte(preferencePath)
err = ioutil.WriteFile(duplicacyFileName, d1, 0644)
if err != nil {
duplicacy.LOG_ERROR("REPOSITORY_PATH", "Failed to write %s file inside repository %v", duplicacyFileName, err)
return
}
}
duplicacy.SetKeyringFile(path.Join(preferencePath, "keyring"))
} else {
repository, _ = getRepositoryPreference(context, "")
@@ -547,7 +565,6 @@ func changePassword(context *cli.Context) {
duplicacy.LOG_INFO("STORAGE_SET", "The password for storage %s has been changed", preference.StorageURL)
}
func backupRepository(context *cli.Context) {
setGlobalOptions(context)
defer duplicacy.CatchLogException()
@@ -1071,7 +1088,8 @@ func infoStorage(context *cli.Context) {
repository := context.String("repository")
if repository != "" {
duplicacy.SetKeyringFile(path.Join(repository, duplicacy.DUPLICACY_DIRECTORY, "keyring"))
preferencePath := duplicacy.GetDuplicacyPreferencePath(repository)
duplicacy.SetKeyringFile(path.Join(preferencePath, "keyring"))
}
isEncrypted := context.Bool("e")
@@ -1132,6 +1150,11 @@ func main() {
Usage: "the minimum size of chunks (defaults to chunk-size / 4)",
Argument: "1M",
},
cli.StringFlag{
Name: "pref-dir",
Usage: "Specify alternate location for .duplicacy preferences directory (absolute or relative to current directory)",
Argument: "<preferences directory path>",
},
},
Usage: "Initialize the storage if necessary and the current directory as the repository",
ArgsUsage: "<snapshot id> <storage url>",
@@ -1658,7 +1681,7 @@ func main() {
app.Name = "duplicacy"
app.HelpName = "duplicacy"
app.Usage = "A new generation cloud backup tool based on lock-free deduplication"
app.Version = "2.0.2"
app.Version = "2.0.3"
err := app.Run(os.Args)
if err != nil {
os.Exit(2)

17
integration_tests/test.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/bin/bash
. ./test_functions.sh
fixture
init_repo_pref_dir
backup
add_file file3
backup
add_file file4
backup
add_file file5
restore
check

View File

@@ -0,0 +1,122 @@
#!/bin/bash
get_abs_filename() {
# $1 : relative filename
echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")"
}
pushd () {
command pushd "$@" > /dev/null
}
popd () {
command popd "$@" > /dev/null
}
# Functions used to create integration tests suite
DUPLICACY=$(get_abs_filename ../duplicacy_main)
# Base directory where test repositories will be created
TEST_ZONE=$HOME/DUPLICACY_TEST_ZONE
# Test Repository
TEST_REPO=$TEST_ZONE/TEST_REPO
# Storage for test ( For now, only local path storage is supported by test suite)
TEST_STORAGE=$TEST_ZONE/TEST_STORAGE
# Extra storage for copy operation
SECONDARY_STORAGE=$TEST_ZONE/SECONDARY_STORAGE
# Preference directory ( for testing the -pref-dir option)
DUPLICACY_PREF_DIR=$TEST_ZONE/TEST_DUPLICACY_PREF_DIR
# Scratch pad for testing restore
TEST_RESTORE_POINT=$TEST_ZONE/RESTORE_POINT
# Make sure $TEST_ZONE is in know state
function fixture()
{
# clean TEST_RESTORE_POINT
rm -rf $TEST_RESTORE_POINT
mkdir -p $TEST_RESTORE_POINT
# clean TEST_STORAGE
rm -rf $TEST_STORAGE
mkdir -p $TEST_STORAGE
# clean SECONDARY_STORAGE
rm -rf $SECONDARY_STORAGE
mkdir -p $SECONDARY_STORAGE
# clean TEST_DOT_DUPLICACY
rm -rf $DUPLICACY_PREF_DIR
mkdir -p $DUPLICACY_PREF_DIR
# Create test repository
rm -rf ${TEST_REPO}
mkdir -p ${TEST_REPO}
pushd ${TEST_REPO}
echo "file1" > file1
mkdir dir1
echo "file2" > dir1/file2
popd
}
function init_repo()
{
pushd ${TEST_REPO}
${DUPLICACY} init integration-tests $TEST_STORAGE
${DUPLICACY} add -copy default secondary integration-tests $SECONDARY_STORAGE
${DUPLICACY} backup
popd
}
function init_repo_pref_dir()
{
pushd ${TEST_REPO}
${DUPLICACY} init -pref-dir "${DUPLICACY_PREF_DIR}" integration-tests ${TEST_STORAGE}
${DUPLICACY} add -copy default secondary integration-tests $SECONDARY_STORAGE
${DUPLICACY} backup
popd
}
function add_file()
{
FILE_NAME=$1
pushd ${TEST_REPO}
dd if=/dev/urandom of=${FILE_NAME} bs=1000 count=20000
popd
}
function backup()
{
pushd ${TEST_REPO}
${DUPLICACY} backup
${DUPLICACY} copy -from default -to secondary
popd
}
function restore()
{
pushd ${TEST_REPO}
${DUPLICACY} restore -r 2 -delete
popd
}
function check()
{
pushd ${TEST_REPO}
${DUPLICACY} check -files
${DUPLICACY} check -storage secondary -files
popd
}

View File

@@ -0,0 +1,17 @@
#!/bin/bash
. ./test_functions.sh
fixture
pushd ${TEST_REPO}
${DUPLICACY} init integration-tests $TEST_STORAGE -c 1k
add_file file3
add_file file4
${DUPLICACY} backup -threads 16
${DUPLICACY} check --files -stats
popd

View File

@@ -71,8 +71,9 @@ func CreateBackupManager(snapshotID string, storage Storage, top string, passwor
// SetupSnapshotCache creates the snapshot cache, which is merely a local storage under the default .duplicacy
// directory
func (manager *BackupManager) SetupSnapshotCache(top string, storageName string) bool {
cacheDir := path.Join(top, DUPLICACY_DIRECTORY, "cache", storageName)
preferencePath := GetDuplicacyPreferencePath(top)
cacheDir := path.Join(preferencePath, "cache", storageName)
storage, err := CreateFileStorage(cacheDir, 1)
if err != nil {
@@ -600,6 +601,7 @@ func (manager *BackupManager) Restore(top string, revision int, inPlace bool, qu
}
}
// How will behave restore when repo created using -repo-dir ,??
err = os.Mkdir(path.Join(top, DUPLICACY_DIRECTORY), 0744)
if err != nil && !os.IsExist(err) {
LOG_ERROR("RESTORE_MKDIR", "Failed to create the preference directory: %v", err)
@@ -978,8 +980,9 @@ func (manager *BackupManager) RestoreFile(chunkDownloader *ChunkDownloader, chun
var existingFile, newFile *os.File
var err error
temporaryPath := path.Join(top, DUPLICACY_DIRECTORY, "temporary")
preferencePath := GetDuplicacyPreferencePath(top)
temporaryPath := path.Join(preferencePath, "temporary")
fullPath := joinPath(top, entry.Path)
defer func() {
@@ -1334,6 +1337,7 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
} else {
LOG_INFO("SNAPSHOT_COPY", "Copied chunk %s (%d/%d)", chunk.GetID(), chunkIndex, len(chunks))
}
otherManager.config.PutChunk(chunk)
})
chunkUploader.Start()
@@ -1347,7 +1351,10 @@ func (manager *BackupManager) CopySnapshots(otherManager *BackupManager, snapsho
i := chunkDownloader.AddChunk(chunkHash)
chunk := chunkDownloader.WaitForChunk(i)
chunkUploader.StartChunk(chunk, chunkIndex)
newChunk := otherManager.config.GetChunk()
newChunk.Reset(true)
newChunk.Write(chunk.GetBytes())
chunkUploader.StartChunk(newChunk, chunkIndex)
}
chunkDownloader.Stop()

View File

@@ -22,6 +22,7 @@ import (
// This is the hidden directory in the repository for storing various files.
var DUPLICACY_DIRECTORY = ".duplicacy"
var DUPLICACY_FILE = ".duplicacy"
// Regex for matching 'StartChunk:StartOffset:EndChunk:EndOffset'
var contentRegex = regexp.MustCompile(`^([0-9]+):([0-9]+):([0-9]+):([0-9]+)`)

View File

@@ -158,7 +158,11 @@ func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil
err = os.Mkdir(subDir, 0744)
if err != nil {
return "", false, 0, err
// The directory may have been created by other threads so check it again.
stat, _ := os.Stat(subDir)
if stat == nil || !stat.IsDir() {
return "", false, 0, err
}
}
dir = subDir

View File

@@ -9,6 +9,7 @@ import (
"path"
"io/ioutil"
"reflect"
"os"
)
// Preference stores options for each storage.
@@ -25,9 +26,52 @@ type Preference struct {
var Preferences [] Preference
func LoadPreferences(repository string) (bool) {
// Compute .duplicacy directory path name:
// - if .duplicacy is a directory -> compute absolute path name and return it
// - if .duplicacy is a file -> assumed this file contains the real path name of .duplicacy
// - if pointed directory does not exits... return error
func GetDuplicacyPreferencePath( repository string) (preferencePath string){
preferencePath = path.Join(repository, DUPLICACY_DIRECTORY) //TOKEEP
stat, err := os.Stat(preferencePath)
if err != nil && !os.IsNotExist(err) {
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to retrieve the information about the directory %s: %v",
repository, err)
return ""
}
if stat != nil && stat.IsDir() {
// $repository/.duplicacy exists and is a directory --> we found the .duplicacy directory
return path.Clean(preferencePath)
}
if stat != nil && stat.Mode().IsRegular() {
b, err := ioutil.ReadFile(preferencePath) // just pass the file name
if err != nil {
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to read file %s: %v",
preferencePath, err)
return ""
}
dotDuplicacyContent := string(b) // convert content to a 'string'
stat, err := os.Stat(dotDuplicacyContent)
if err != nil && !os.IsNotExist(err) {
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to retrieve the information about the directory %s: %v",
repository, err)
return ""
}
if stat != nil && stat.IsDir() {
// If expression read from .duplicacy file is a directory --> we found the .duplicacy directory
return path.Clean(dotDuplicacyContent)
}
}
return ""
}
description, err := ioutil.ReadFile(path.Join(repository, DUPLICACY_DIRECTORY, "preferences"))
func LoadPreferences(repository string) (bool) {
preferencePath := GetDuplicacyPreferencePath(repository)
description, err := ioutil.ReadFile(path.Join(preferencePath, "preferences"))
if err != nil {
LOG_ERROR("PREFERENCE_OPEN", "Failed to read the preference file from repository %s: %v", repository, err)
return false
@@ -53,8 +97,9 @@ func SavePreferences(repository string) (bool) {
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
return false
}
preferenceFile := path.Join(repository, DUPLICACY_DIRECTORY, "/preferences")
preferencePath := GetDuplicacyPreferencePath(repository)
preferenceFile := path.Join(preferencePath, "/preferences")
err = ioutil.WriteFile(preferenceFile, description, 0644)
if err != nil {
LOG_ERROR("PREFERENCE_WRITE", "Failed to save the preference file %s: %v", preferenceFile, err)

View File

@@ -5,57 +5,67 @@
package duplicacy
import (
"time"
"github.com/gilbertchen/goamz/aws"
"github.com/gilbertchen/goamz/s3"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
type S3Storage struct {
RateLimitedStorage
buckets []*s3.Bucket
client *s3.S3
bucket string
storageDir string
numberOfThreads int
}
// CreateS3Storage creates a amazon s3 storage object.
func CreateS3Storage(regionName string, endpoint string, bucketName string, storageDir string,
accessKey string, secretKey string, threads int) (storage *S3Storage, err error) {
var region aws.Region
token := ""
auth := credentials.NewStaticCredentials(accessKey, secretKey, token)
if endpoint == "" {
if regionName == "" {
regionName = "us-east-1"
if regionName == "" && endpoint == "" {
defaultRegionConfig := &aws.Config {
Region: aws.String("us-east-1"),
Credentials: auth,
}
region = aws.Regions[regionName]
} else {
region = aws.Region { Name: regionName, S3Endpoint:"https://" + endpoint }
}
s3Client := s3.New(session.New(defaultRegionConfig))
auth := aws.Auth{ AccessKey: accessKey, SecretKey: secretKey }
response, err := s3Client.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String(bucketName)})
var buckets []*s3.Bucket
for i := 0; i < threads; i++ {
s3Client := s3.New(auth, region)
s3Client.AttemptStrategy = aws.AttemptStrategy{
Min: 8,
Total: 300 * time.Second,
Delay: 1000 * time.Millisecond,
if err != nil {
return nil, err
}
regionName = "us-east-1"
if response.LocationConstraint != nil {
regionName = *response.LocationConstraint
}
bucket := s3Client.Bucket(bucketName)
buckets = append(buckets, bucket)
}
config := &aws.Config {
Region: aws.String(regionName),
Credentials: auth,
Endpoint: aws.String(endpoint),
}
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
storageDir += "/"
}
storage = &S3Storage {
buckets: buckets,
client: s3.New(session.New(config)),
bucket: bucketName,
storageDir: storageDir,
numberOfThreads: threads,
}
return storage, nil
}
@@ -65,67 +75,82 @@ func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string
dir += "/"
}
dirLength := len(storage.storageDir + dir)
if dir == "snapshots/" {
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "/", "", 100)
dir = storage.storageDir + dir
input := s3.ListObjectsInput {
Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(1000),
}
output, err := storage.client.ListObjects(&input)
if err != nil {
return nil, nil, err
}
for _, subDir := range results.CommonPrefixes {
files = append(files, subDir[dirLength:])
for _, subDir := range output.CommonPrefixes {
files = append(files, (*subDir.Prefix)[len(dir):])
}
return files, nil, nil
} else if dir == "chunks/" {
} else {
dir = storage.storageDir + dir
marker := ""
for {
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", marker, 1000)
input := s3.ListObjectsInput {
Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir),
MaxKeys: aws.Int64(1000),
Marker: aws.String(marker),
}
output, err := storage.client.ListObjects(&input)
if err != nil {
return nil, nil, err
}
for _, object := range results.Contents {
files = append(files, object.Key[dirLength:])
sizes = append(sizes, object.Size)
for _, object := range output.Contents {
files = append(files, (*object.Key)[len(dir):])
sizes = append(sizes, *object.Size)
}
if !results.IsTruncated {
if !*output.IsTruncated {
break
}
marker = results.Contents[len(results.Contents) - 1].Key
marker = *output.Contents[len(output.Contents) - 1].Key
}
return files, sizes, nil
}
} else {
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", "", 1000)
if err != nil {
return nil, nil, err
}
for _, object := range results.Contents {
files = append(files, object.Key[dirLength:])
}
return files, nil, nil
}
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *S3Storage) DeleteFile(threadIndex int, filePath string) (err error) {
return storage.buckets[threadIndex].Del(storage.storageDir + filePath)
input := &s3.DeleteObjectInput {
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
}
_, err = storage.client.DeleteObject(input)
return err
}
// MoveFile renames the file.
func (storage *S3Storage) MoveFile(threadIndex int, from string, to string) (err error) {
options := s3.CopyOptions { ContentType: "application/duplicacy" }
_, err = storage.buckets[threadIndex].PutCopy(storage.storageDir + to, s3.Private, options, storage.buckets[threadIndex].Name + "/" + storage.storageDir + from)
if err != nil {
return nil
input := &s3.CopyObjectInput {
Bucket: aws.String(storage.bucket),
CopySource: aws.String(storage.bucket + "/" + storage.storageDir + from),
Key: aws.String(storage.storageDir + to),
}
_, err = storage.client.CopyObject(input)
if err != nil {
return err
}
return storage.DeleteFile(threadIndex, from)
}
// CreateDirectory creates a new directory.
@@ -136,19 +161,24 @@ func (storage *S3Storage) CreateDirectory(threadIndex int, dir string) (err erro
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *S3Storage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
response, err := storage.buckets[threadIndex].Head(storage.storageDir + filePath, nil)
input := &s3.HeadObjectInput {
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
}
output, err := storage.client.HeadObject(input)
if err != nil {
if e, ok := err.(*s3.Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
if e, ok := err.(awserr.RequestFailure); ok && (e.StatusCode() == 403 || e.StatusCode() == 404) {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
}
if response.StatusCode == 403 || response.StatusCode == 404 {
if output == nil || output.ContentLength == nil {
return false, false, 0, nil
} else {
return true, false, response.ContentLength, nil
return true, false, *output.ContentLength, nil
}
}
@@ -174,14 +204,19 @@ func (storage *S3Storage) FindChunk(threadIndex int, chunkID string, isFossil bo
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
readCloser, err := storage.buckets[threadIndex].GetReader(storage.storageDir + filePath)
input := &s3.GetObjectInput {
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
}
output, err := storage.client.GetObject(input)
if err != nil {
return err
}
defer readCloser.Close()
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.buckets))
defer output.Body.Close()
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit / len(storage.bucket))
return err
}
@@ -189,9 +224,16 @@ func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
options := s3.Options { }
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.buckets))
return storage.buckets[threadIndex].PutReader(storage.storageDir + filePath, reader, int64(len(content)), "application/duplicacy", s3.Private, options)
input := &s3.PutObjectInput {
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
ACL: aws.String(s3.ObjectCannedACLPrivate),
Body: CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.bucket)),
ContentType: aws.String("application/duplicacy"),
}
_, err = storage.client.PutObject(input)
return err
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when

View File

@@ -215,7 +215,11 @@ func (storage *SFTPStorage) FindChunk(threadIndex int, chunkID string, isFossil
err = storage.client.Mkdir(subDir)
if err != nil {
return "", false, 0, fmt.Errorf("Failed to create the directory %s: %v", subDir, err)
// The directory may have been created by other threads so check it again.
stat, _ := storage.client.Stat(subDir)
if stat == nil || !stat.IsDir() {
return "", false, 0, fmt.Errorf("Failed to create the directory %s: %v", subDir, err)
}
}
dir = subDir

View File

@@ -9,7 +9,6 @@ import (
"unsafe"
"time"
"os"
"path"
"runtime"
ole "github.com/gilbertchen/go-ole"
@@ -509,8 +508,9 @@ func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
LOG_INFO("VSS_DONE", "Shadow copy %s created", SnapshotIDString)
snapshotPath := uint16ArrayToString(properties.SnapshotDeviceObject)
shadowLink = path.Join(top, DUPLICACY_DIRECTORY) + "\\shadow"
preferencePath := GetDuplicacyPreferencePath(top)
shadowLink = preferencePath + "\\shadow"
os.Remove(shadowLink)
err = os.Symlink(snapshotPath + "\\", shadowLink)
if err != nil {

View File

@@ -67,7 +67,9 @@ func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, ski
}
var patterns []string
patternFile, err := ioutil.ReadFile(path.Join(top, DUPLICACY_DIRECTORY, "filters"))
preferencePath := GetDuplicacyPreferencePath(top)
patternFile, err := ioutil.ReadFile(path.Join(preferencePath, "filters"))
if err == nil {
for _, pattern := range strings.Split(string(patternFile), "\n") {
pattern = strings.TrimSpace(pattern)

View File

@@ -1510,8 +1510,9 @@ func (manager *SnapshotManager) PruneSnapshots(top string, selfID string, snapsh
if len(revisionsToBeDeleted) > 0 && (len(tags) > 0 || len(retentions) > 0) {
LOG_WARN("DELETE_OPTIONS", "Tags or retention policy will be ignored if at least one revision is specified")
}
logDir := path.Join(top, DUPLICACY_DIRECTORY, "logs")
preferencePath := GetDuplicacyPreferencePath(top)
logDir := path.Join(preferencePath, "logs")
os.Mkdir(logDir, 0700)
logFileName := path.Join(logDir, time.Now().Format("prune-log-20060102-150405"))
logFile, err := os.OpenFile(logFileName, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0600)

View File

@@ -80,9 +80,9 @@ func checkHostKey(repository string, hostname string, remote net.Addr, key ssh.P
if len(repository) == 0 {
return nil
}
duplicacyDirectory := path.Join(repository, DUPLICACY_DIRECTORY)
hostFile := path.Join(duplicacyDirectory, "knowns_hosts")
preferencePath := GetDuplicacyPreferencePath(repository)
hostFile := path.Join(preferencePath, "known_hosts")
file, err := os.OpenFile(hostFile, os.O_RDWR | os.O_CREATE, 0600)
if err != nil {
return err

View File

@@ -48,6 +48,16 @@ func (reader *RateLimitedReader) Reset() {
reader.Next = 0
}
func (reader *RateLimitedReader) Seek(offset int64, whence int) (int64, error) {
if whence == io.SeekStart {
reader.Next = int(offset)
} else if whence == io.SeekCurrent {
reader.Next += int(offset)
} else {
reader.Next = len(reader.Content) - int(offset)
}
return int64(reader.Next), nil
}
func (reader *RateLimitedReader) Read(p []byte) (n int, err error) {