1
0
mirror of https://github.com/gilbertchen/duplicacy synced 2025-12-06 00:03:38 +00:00

Run goimports on all source files

This commit is contained in:
Gilbert Chen
2017-09-20 23:07:43 -04:00
parent 978212fd75
commit 923cd0aa63
49 changed files with 13570 additions and 13631 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -5,448 +5,448 @@
package duplicacy
import (
"fmt"
"time"
"bytes"
"sync"
"io/ioutil"
"encoding/json"
"io"
"net/http"
"mime/multipart"
"math/rand"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"mime/multipart"
"net/http"
"sync"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2"
)
type ACDError struct {
Status int
Message string `json:"message"`
Status int
Message string `json:"message"`
}
func (err ACDError) Error() string {
return fmt.Sprintf("%d %s", err.Status, err.Message)
return fmt.Sprintf("%d %s", err.Status, err.Message)
}
var ACDRefreshTokenURL = "https://duplicacy.com/acd_refresh"
type ACDClient struct {
HTTPClient *http.Client
HTTPClient *http.Client
TokenFile string
Token *oauth2.Token
TokenLock *sync.Mutex
TokenFile string
Token *oauth2.Token
TokenLock *sync.Mutex
ContentURL string
MetadataURL string
ContentURL string
MetadataURL string
TestMode bool
TestMode bool
}
func NewACDClient(tokenFile string) (*ACDClient, error) {
description, err := ioutil.ReadFile(tokenFile)
if err != nil {
return nil, err
}
description, err := ioutil.ReadFile(tokenFile)
if err != nil {
return nil, err
}
token := new(oauth2.Token)
if err := json.Unmarshal(description, token); err != nil {
return nil, err
}
token := new(oauth2.Token)
if err := json.Unmarshal(description, token); err != nil {
return nil, err
}
client := &ACDClient{
HTTPClient: http.DefaultClient,
TokenFile: tokenFile,
Token: token,
TokenLock: &sync.Mutex{},
}
client := &ACDClient{
HTTPClient: http.DefaultClient,
TokenFile: tokenFile,
Token: token,
TokenLock: &sync.Mutex{},
}
client.GetEndpoint()
client.GetEndpoint()
return client, nil
return client, nil
}
func (client *ACDClient) call(url string, method string, input interface{}, contentType string) (io.ReadCloser, int64, error) {
LOG_DEBUG("ACD_CALL", "Calling %s", url)
LOG_DEBUG("ACD_CALL", "Calling %s", url)
var response *http.Response
var response *http.Response
backoff := 1
for i := 0; i < 8; i++ {
var inputReader io.Reader
backoff := 1
for i := 0; i < 8; i++ {
var inputReader io.Reader
switch input.(type) {
default:
jsonInput, err := json.Marshal(input)
if err != nil {
return nil, 0, err
}
inputReader = bytes.NewReader(jsonInput)
case []byte:
inputReader = bytes.NewReader(input.([]byte))
case int:
inputReader = bytes.NewReader([]byte(""))
case *bytes.Buffer:
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
case *RateLimitedReader:
input.(*RateLimitedReader).Reset()
inputReader = input.(*RateLimitedReader)
}
switch input.(type) {
default:
jsonInput, err := json.Marshal(input)
if err != nil {
return nil, 0, err
}
inputReader = bytes.NewReader(jsonInput)
case []byte:
inputReader = bytes.NewReader(input.([]byte))
case int:
inputReader = bytes.NewReader([]byte(""))
case *bytes.Buffer:
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
case *RateLimitedReader:
input.(*RateLimitedReader).Reset()
inputReader = input.(*RateLimitedReader)
}
request, err := http.NewRequest(method, url, inputReader)
if err != nil {
return nil, 0, err
}
request, err := http.NewRequest(method, url, inputReader)
if err != nil {
return nil, 0, err
}
if reader, ok := inputReader.(*RateLimitedReader); ok {
request.ContentLength = reader.Length()
}
if reader, ok := inputReader.(*RateLimitedReader); ok {
request.ContentLength = reader.Length()
}
if url != ACDRefreshTokenURL {
client.TokenLock.Lock()
request.Header.Set("Authorization", "Bearer " + client.Token.AccessToken)
client.TokenLock.Unlock()
}
if contentType != "" {
request.Header.Set("Content-Type", contentType)
}
if url != ACDRefreshTokenURL {
client.TokenLock.Lock()
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
client.TokenLock.Unlock()
}
if contentType != "" {
request.Header.Set("Content-Type", contentType)
}
response, err = client.HTTPClient.Do(request)
if err != nil {
return nil, 0, err
}
response, err = client.HTTPClient.Do(request)
if err != nil {
return nil, 0, err
}
if response.StatusCode < 400 {
return response.Body, response.ContentLength, nil
}
if response.StatusCode < 400 {
return response.Body, response.ContentLength, nil
}
if response.StatusCode == 404 {
buffer := new(bytes.Buffer)
buffer.ReadFrom(response.Body)
response.Body.Close()
return nil, 0, ACDError { Status: response.StatusCode, Message: buffer.String()}
}
if response.StatusCode == 404 {
buffer := new(bytes.Buffer)
buffer.ReadFrom(response.Body)
response.Body.Close()
return nil, 0, ACDError{Status: response.StatusCode, Message: buffer.String()}
}
if response.StatusCode == 400 {
defer response.Body.Close()
if response.StatusCode == 400 {
defer response.Body.Close()
e := &ACDError {
Status: response.StatusCode,
}
e := &ACDError{
Status: response.StatusCode,
}
if err := json.NewDecoder(response.Body).Decode(e); err == nil {
return nil, 0, e
} else {
return nil, 0, ACDError { Status: response.StatusCode, Message: "Bad input parameter"}
}
}
if err := json.NewDecoder(response.Body).Decode(e); err == nil {
return nil, 0, e
} else {
return nil, 0, ACDError{Status: response.StatusCode, Message: "Bad input parameter"}
}
}
response.Body.Close()
response.Body.Close()
if response.StatusCode == 401 {
if response.StatusCode == 401 {
if url == ACDRefreshTokenURL {
return nil, 0, ACDError { Status: response.StatusCode, Message: "Unauthorized"}
}
if url == ACDRefreshTokenURL {
return nil, 0, ACDError{Status: response.StatusCode, Message: "Unauthorized"}
}
err = client.RefreshToken()
if err != nil {
return nil, 0, err
}
err = client.RefreshToken()
if err != nil {
return nil, 0, err
}
continue
} else if response.StatusCode == 403 {
return nil, 0, ACDError { Status: response.StatusCode, Message: "Forbidden"}
} else if response.StatusCode == 404 {
return nil, 0, ACDError { Status: response.StatusCode, Message: "Resource not found"}
} else if response.StatusCode == 409 {
return nil, 0, ACDError { Status: response.StatusCode, Message: "Conflict"}
} else if response.StatusCode == 411 {
return nil, 0, ACDError { Status: response.StatusCode, Message: "Length required"}
} else if response.StatusCode == 412 {
return nil, 0, ACDError { Status: response.StatusCode, Message: "Precondition failed"}
} else if response.StatusCode == 429 || response.StatusCode == 500 {
reason := "Too many requests"
if response.StatusCode == 500 {
reason = "Internal server error"
}
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
LOG_INFO("ACD_RETRY", "%s; retry after %d milliseconds", reason, retryAfter)
time.Sleep(retryAfter * time.Millisecond)
backoff *= 2
continue
} else if response.StatusCode == 503 {
return nil, 0, ACDError { Status: response.StatusCode, Message: "Service unavailable"}
} else {
return nil, 0, ACDError { Status: response.StatusCode, Message: "Unknown error"}
}
}
continue
} else if response.StatusCode == 403 {
return nil, 0, ACDError{Status: response.StatusCode, Message: "Forbidden"}
} else if response.StatusCode == 404 {
return nil, 0, ACDError{Status: response.StatusCode, Message: "Resource not found"}
} else if response.StatusCode == 409 {
return nil, 0, ACDError{Status: response.StatusCode, Message: "Conflict"}
} else if response.StatusCode == 411 {
return nil, 0, ACDError{Status: response.StatusCode, Message: "Length required"}
} else if response.StatusCode == 412 {
return nil, 0, ACDError{Status: response.StatusCode, Message: "Precondition failed"}
} else if response.StatusCode == 429 || response.StatusCode == 500 {
reason := "Too many requests"
if response.StatusCode == 500 {
reason = "Internal server error"
}
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
LOG_INFO("ACD_RETRY", "%s; retry after %d milliseconds", reason, retryAfter)
time.Sleep(retryAfter * time.Millisecond)
backoff *= 2
continue
} else if response.StatusCode == 503 {
return nil, 0, ACDError{Status: response.StatusCode, Message: "Service unavailable"}
} else {
return nil, 0, ACDError{Status: response.StatusCode, Message: "Unknown error"}
}
}
return nil, 0, fmt.Errorf("Maximum number of retries reached")
return nil, 0, fmt.Errorf("Maximum number of retries reached")
}
func (client *ACDClient) RefreshToken() (err error) {
client.TokenLock.Lock()
defer client.TokenLock.Unlock()
client.TokenLock.Lock()
defer client.TokenLock.Unlock()
readCloser, _, err := client.call(ACDRefreshTokenURL, "POST", client.Token, "")
if err != nil {
return err
}
readCloser, _, err := client.call(ACDRefreshTokenURL, "POST", client.Token, "")
if err != nil {
return err
}
defer readCloser.Close()
defer readCloser.Close()
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
return err
}
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
return err
}
description, err := json.Marshal(client.Token)
if err != nil {
return err
}
description, err := json.Marshal(client.Token)
if err != nil {
return err
}
err = ioutil.WriteFile(client.TokenFile, description, 0644)
if err != nil {
return err
}
err = ioutil.WriteFile(client.TokenFile, description, 0644)
if err != nil {
return err
}
return nil
return nil
}
type ACDGetEndpointOutput struct {
CustomerExists bool `json:"customerExists"`
ContentURL string `json:"contentUrl"`
MetadataURL string `json:"metadataUrl"`
CustomerExists bool `json:"customerExists"`
ContentURL string `json:"contentUrl"`
MetadataURL string `json:"metadataUrl"`
}
func (client *ACDClient) GetEndpoint() (err error) {
readCloser, _, err := client.call("https://drive.amazonaws.com/drive/v1/account/endpoint", "GET", 0, "")
if err != nil {
return err
}
readCloser, _, err := client.call("https://drive.amazonaws.com/drive/v1/account/endpoint", "GET", 0, "")
if err != nil {
return err
}
defer readCloser.Close()
defer readCloser.Close()
output := &ACDGetEndpointOutput {}
output := &ACDGetEndpointOutput{}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return err
}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return err
}
client.ContentURL = output.ContentURL
client.MetadataURL = output.MetadataURL
client.ContentURL = output.ContentURL
client.MetadataURL = output.MetadataURL
return nil
return nil
}
type ACDEntry struct {
Name string `json:"name"`
ID string `json:"id"`
Size int64 `json:"size"`
Kind string `json:"kind"`
Name string `json:"name"`
ID string `json:"id"`
Size int64 `json:"size"`
Kind string `json:"kind"`
}
type ACDListEntriesOutput struct {
Count int `json:"count"`
NextToken string `json:"nextToken"`
Entries []ACDEntry `json:"data"`
Count int `json:"count"`
NextToken string `json:"nextToken"`
Entries []ACDEntry `json:"data"`
}
func (client *ACDClient) ListEntries(parentID string, listFiles bool) ([]ACDEntry, error) {
startToken := ""
startToken := ""
entries := []ACDEntry{}
entries := []ACDEntry{}
for {
for {
url := client.MetadataURL + "nodes/" + parentID + "/children?filters="
url := client.MetadataURL + "nodes/" + parentID + "/children?filters="
if listFiles {
url += "kind:FILE"
} else {
url += "kind:FOLDER"
}
if listFiles {
url += "kind:FILE"
} else {
url += "kind:FOLDER"
}
if startToken != "" {
url += "&startToken=" + startToken
}
if startToken != "" {
url += "&startToken=" + startToken
}
if client.TestMode {
url += "&limit=8"
}
if client.TestMode {
url += "&limit=8"
}
readCloser, _, err := client.call(url, "GET", 0, "")
if err != nil {
return nil, err
}
readCloser, _, err := client.call(url, "GET", 0, "")
if err != nil {
return nil, err
}
defer readCloser.Close()
defer readCloser.Close()
output := &ACDListEntriesOutput {}
output := &ACDListEntriesOutput{}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return nil, err
}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return nil, err
}
entries = append(entries, output.Entries...)
entries = append(entries, output.Entries...)
startToken = output.NextToken
if startToken == "" {
break
}
}
startToken = output.NextToken
if startToken == "" {
break
}
}
return entries, nil
return entries, nil
}
func (client *ACDClient) ListByName(parentID string, name string) (string, bool, int64, error) {
url := client.MetadataURL + "nodes"
url := client.MetadataURL + "nodes"
if parentID == "" {
url += "?filters=Kind:FOLDER+AND+isRoot:true"
} else {
url += "/" + parentID + "/children?filters=name:" + name
}
if parentID == "" {
url += "?filters=Kind:FOLDER+AND+isRoot:true"
} else {
url += "/" + parentID + "/children?filters=name:" + name
}
readCloser, _, err := client.call(url, "GET", 0, "")
if err != nil {
return "", false, 0, err
}
readCloser, _, err := client.call(url, "GET", 0, "")
if err != nil {
return "", false, 0, err
}
defer readCloser.Close()
defer readCloser.Close()
output := &ACDListEntriesOutput {}
output := &ACDListEntriesOutput{}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return "", false, 0, err
}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return "", false, 0, err
}
if len(output.Entries) == 0 {
return "", false, 0, nil
}
if len(output.Entries) == 0 {
return "", false, 0, nil
}
return output.Entries[0].ID, output.Entries[0].Kind == "FOLDER", output.Entries[0].Size, nil
return output.Entries[0].ID, output.Entries[0].Kind == "FOLDER", output.Entries[0].Size, nil
}
func (client *ACDClient) DownloadFile(fileID string) (io.ReadCloser, int64, error) {
url := client.ContentURL + "nodes/" + fileID + "/content"
url := client.ContentURL + "nodes/" + fileID + "/content"
return client.call(url, "GET", 0, "")
return client.call(url, "GET", 0, "")
}
func (client *ACDClient) UploadFile(parentID string, name string, content []byte, rateLimit int) (fileID string, err error) {
url := client.ContentURL + "nodes?suppress=deduplication"
url := client.ContentURL + "nodes?suppress=deduplication"
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
metadata := make(map[string]interface{})
metadata["name"] = name
metadata["kind"] = "FILE"
metadata["parents"] = []string{ parentID }
metadata := make(map[string]interface{})
metadata["name"] = name
metadata["kind"] = "FILE"
metadata["parents"] = []string{parentID}
metadataJSON, err := json.Marshal(metadata)
if err != nil {
return "", err
}
metadataJSON, err := json.Marshal(metadata)
if err != nil {
return "", err
}
err = writer.WriteField("metadata", string(metadataJSON))
if err != nil {
return "", err
}
err = writer.WriteField("metadata", string(metadataJSON))
if err != nil {
return "", err
}
part, err := writer.CreateFormFile("content", name)
if err != nil {
return "", err
}
part, err := writer.CreateFormFile("content", name)
if err != nil {
return "", err
}
_, err = part.Write(content)
if err != nil {
return "", err
}
_, err = part.Write(content)
if err != nil {
return "", err
}
writer.Close()
writer.Close()
var input interface{}
input = body
if rateLimit > 0 {
input = CreateRateLimitedReader(body.Bytes(), rateLimit)
}
var input interface{}
input = body
if rateLimit > 0 {
input = CreateRateLimitedReader(body.Bytes(), rateLimit)
}
readCloser, _, err := client.call(url, "POST", input, writer.FormDataContentType())
readCloser, _, err := client.call(url, "POST", input, writer.FormDataContentType())
if err != nil {
return "", err
}
if err != nil {
return "", err
}
defer readCloser.Close()
defer readCloser.Close()
entry := ACDEntry {}
if err = json.NewDecoder(readCloser).Decode(&entry); err != nil {
return "", err
}
entry := ACDEntry{}
if err = json.NewDecoder(readCloser).Decode(&entry); err != nil {
return "", err
}
return entry.ID, nil
return entry.ID, nil
}
func (client *ACDClient) DeleteFile(fileID string) error {
url := client.MetadataURL + "trash/" + fileID
url := client.MetadataURL + "trash/" + fileID
readCloser, _, err := client.call(url, "PUT", 0, "")
if err != nil {
return err
}
readCloser, _, err := client.call(url, "PUT", 0, "")
if err != nil {
return err
}
readCloser.Close()
return nil
readCloser.Close()
return nil
}
func (client *ACDClient) MoveFile(fileID string, fromParentID string, toParentID string) error {
url := client.MetadataURL + "nodes/" + toParentID + "/children"
url := client.MetadataURL + "nodes/" + toParentID + "/children"
parameters := make(map[string]string)
parameters["fromParent"] = fromParentID
parameters["childId"] = fileID
parameters := make(map[string]string)
parameters["fromParent"] = fromParentID
parameters["childId"] = fileID
readCloser, _, err := client.call(url, "POST", parameters, "")
if err != nil {
return err
}
readCloser, _, err := client.call(url, "POST", parameters, "")
if err != nil {
return err
}
readCloser.Close()
return nil
readCloser.Close()
return nil
}
func (client *ACDClient) CreateDirectory(parentID string, name string) (string, error) {
url := client.MetadataURL + "nodes"
url := client.MetadataURL + "nodes"
parameters := make(map[string]interface{})
parameters["name"] = name
parameters["kind"] = "FOLDER"
parameters["parents"] = []string {parentID}
parameters := make(map[string]interface{})
parameters["name"] = name
parameters["kind"] = "FOLDER"
parameters["parents"] = []string{parentID}
readCloser, _, err := client.call(url, "POST", parameters, "")
if err != nil {
return "", err
}
readCloser, _, err := client.call(url, "POST", parameters, "")
if err != nil {
return "", err
}
defer readCloser.Close()
defer readCloser.Close()
entry := ACDEntry {}
if err = json.NewDecoder(readCloser).Decode(&entry); err != nil {
return "", err
}
entry := ACDEntry{}
if err = json.NewDecoder(readCloser).Decode(&entry); err != nil {
return "", err
}
return entry.ID, nil
return entry.ID, nil
}

View File

@@ -5,149 +5,149 @@
package duplicacy
import (
"io"
"fmt"
"testing"
"crypto/sha256"
"encoding/hex"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"testing"
crypto_rand "crypto/rand"
"math/rand"
crypto_rand "crypto/rand"
"math/rand"
)
func TestACDClient(t *testing.T) {
acdClient, err := NewACDClient("acd-token.json")
if err != nil {
t.Errorf("Failed to create the ACD client: %v", err)
return
}
acdClient, err := NewACDClient("acd-token.json")
if err != nil {
t.Errorf("Failed to create the ACD client: %v", err)
return
}
acdClient.TestMode = true
acdClient.TestMode = true
rootID, _, _, err := acdClient.ListByName("", "")
if err != nil {
t.Errorf("Failed to get the root node: %v", err)
return
}
rootID, _, _, err := acdClient.ListByName("", "")
if err != nil {
t.Errorf("Failed to get the root node: %v", err)
return
}
if rootID == "" {
t.Errorf("No root node")
return
}
if rootID == "" {
t.Errorf("No root node")
return
}
testID, _, _, err := acdClient.ListByName(rootID, "test")
if err != nil {
t.Errorf("Failed to list the test directory: %v", err)
return
}
if testID == "" {
testID, err = acdClient.CreateDirectory(rootID, "test")
if err != nil {
t.Errorf("Failed to create the test directory: %v", err)
return
}
}
testID, _, _, err := acdClient.ListByName(rootID, "test")
if err != nil {
t.Errorf("Failed to list the test directory: %v", err)
return
}
if testID == "" {
testID, err = acdClient.CreateDirectory(rootID, "test")
if err != nil {
t.Errorf("Failed to create the test directory: %v", err)
return
}
}
test1ID, _, _, err := acdClient.ListByName(testID, "test1")
if err != nil {
t.Errorf("Failed to list the test1 directory: %v", err)
return
}
if test1ID == "" {
test1ID, err = acdClient.CreateDirectory(testID, "test1")
if err != nil {
t.Errorf("Failed to create the test1 directory: %v", err)
return
}
}
test1ID, _, _, err := acdClient.ListByName(testID, "test1")
if err != nil {
t.Errorf("Failed to list the test1 directory: %v", err)
return
}
if test1ID == "" {
test1ID, err = acdClient.CreateDirectory(testID, "test1")
if err != nil {
t.Errorf("Failed to create the test1 directory: %v", err)
return
}
}
test2ID, _, _, err := acdClient.ListByName(testID, "test2")
if err != nil {
t.Errorf("Failed to list the test2 directory: %v", err)
return
}
if test2ID == "" {
test2ID, err = acdClient.CreateDirectory(testID, "test2")
if err != nil {
t.Errorf("Failed to create the test2 directory: %v", err)
return
}
}
test2ID, _, _, err := acdClient.ListByName(testID, "test2")
if err != nil {
t.Errorf("Failed to list the test2 directory: %v", err)
return
}
if test2ID == "" {
test2ID, err = acdClient.CreateDirectory(testID, "test2")
if err != nil {
t.Errorf("Failed to create the test2 directory: %v", err)
return
}
}
fmt.Printf("test1: %s, test2: %s\n", test1ID, test2ID)
fmt.Printf("test1: %s, test2: %s\n", test1ID, test2ID)
numberOfFiles := 20
maxFileSize := 64 * 1024
numberOfFiles := 20
maxFileSize := 64 * 1024
for i := 0; i < numberOfFiles; i++ {
content := make([]byte, rand.Int() % maxFileSize + 1)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
for i := 0; i < numberOfFiles; i++ {
content := make([]byte, rand.Int()%maxFileSize+1)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
hasher := sha256.New()
hasher.Write(content)
filename := hex.EncodeToString(hasher.Sum(nil))
hasher := sha256.New()
hasher.Write(content)
filename := hex.EncodeToString(hasher.Sum(nil))
fmt.Printf("file: %s\n", filename)
fmt.Printf("file: %s\n", filename)
_, err = acdClient.UploadFile(test1ID, filename, content, 100)
if err != nil {
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
t.Errorf("Failed to upload the file %s: %v", filename, err)
return
}
}
}
_, err = acdClient.UploadFile(test1ID, filename, content, 100)
if err != nil {
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
t.Errorf("Failed to upload the file %s: %v", filename, err)
return
}
}
}
entries, err := acdClient.ListEntries(test1ID, true)
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
entries, err := acdClient.ListEntries(test1ID, true)
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
for _, entry := range entries {
err = acdClient.MoveFile(entry.ID, test1ID, test2ID)
if err != nil {
t.Errorf("Failed to move %s: %v", entry.Name, err)
return
}
}
for _, entry := range entries {
err = acdClient.MoveFile(entry.ID, test1ID, test2ID)
if err != nil {
t.Errorf("Failed to move %s: %v", entry.Name, err)
return
}
}
entries, err = acdClient.ListEntries(test2ID, true)
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
entries, err = acdClient.ListEntries(test2ID, true)
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
for _, entry := range entries {
readCloser, _, err := acdClient.DownloadFile(entry.ID)
if err != nil {
t.Errorf("Error downloading file %s: %v", entry.Name, err)
return
}
for _, entry := range entries {
readCloser, _, err := acdClient.DownloadFile(entry.ID)
if err != nil {
t.Errorf("Error downloading file %s: %v", entry.Name, err)
return
}
hasher := sha256.New()
io.Copy(hasher, readCloser)
hash := hex.EncodeToString(hasher.Sum(nil))
hasher := sha256.New()
io.Copy(hasher, readCloser)
hash := hex.EncodeToString(hasher.Sum(nil))
if hash != entry.Name {
t.Errorf("File %s, hash %s", entry.Name, hash)
}
if hash != entry.Name {
t.Errorf("File %s, hash %s", entry.Name, hash)
}
readCloser.Close()
}
readCloser.Close()
}
for _, entry := range entries {
for _, entry := range entries {
err = acdClient.DeleteFile(entry.ID)
if err != nil {
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
return
}
}
err = acdClient.DeleteFile(entry.ID)
if err != nil {
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
return
}
}
}

View File

@@ -5,400 +5,398 @@
package duplicacy
import (
"fmt"
"path"
"strings"
"sync"
"fmt"
"path"
"strings"
"sync"
)
type ACDStorage struct {
RateLimitedStorage
RateLimitedStorage
client *ACDClient
idCache map[string]string
idCacheLock *sync.Mutex
numberOfThreads int
client *ACDClient
idCache map[string]string
idCacheLock *sync.Mutex
numberOfThreads int
}
// CreateACDStorage creates an ACD storage object.
func CreateACDStorage(tokenFile string, storagePath string, threads int) (storage *ACDStorage, err error) {
client, err := NewACDClient(tokenFile)
if err != nil {
return nil, err
}
client, err := NewACDClient(tokenFile)
if err != nil {
return nil, err
}
storage = &ACDStorage {
client: client,
idCache: make(map[string]string),
idCacheLock: &sync.Mutex{},
numberOfThreads: threads,
}
storage = &ACDStorage{
client: client,
idCache: make(map[string]string),
idCacheLock: &sync.Mutex{},
numberOfThreads: threads,
}
storagePathID, _, _, err := storage.getIDFromPath(0, storagePath)
if err != nil {
return nil, err
}
storagePathID, _, _, err := storage.getIDFromPath(0, storagePath)
if err != nil {
return nil, err
}
storage.idCache[""] = storagePathID
storage.idCache[""] = storagePathID
for _, dir := range []string { "chunks", "fossils", "snapshots" } {
dirID, isDir, _, err := client.ListByName(storagePathID, dir)
if err != nil {
return nil, err
}
if dirID == "" {
dirID, err = client.CreateDirectory(storagePathID, dir)
if err != nil {
return nil, err
}
} else if !isDir {
return nil, fmt.Errorf("%s/%s is not a directory", storagePath + "/" + dir)
}
storage.idCache[dir] = dirID
}
for _, dir := range []string{"chunks", "fossils", "snapshots"} {
dirID, isDir, _, err := client.ListByName(storagePathID, dir)
if err != nil {
return nil, err
}
if dirID == "" {
dirID, err = client.CreateDirectory(storagePathID, dir)
if err != nil {
return nil, err
}
} else if !isDir {
return nil, fmt.Errorf("%s/%s is not a directory", storagePath+"/"+dir)
}
storage.idCache[dir] = dirID
}
return storage, nil
return storage, nil
}
func (storage *ACDStorage) getPathID(path string) string {
storage.idCacheLock.Lock()
pathID := storage.idCache[path]
storage.idCacheLock.Unlock()
return pathID
storage.idCacheLock.Lock()
pathID := storage.idCache[path]
storage.idCacheLock.Unlock()
return pathID
}
func (storage *ACDStorage) findPathID(path string) (string, bool) {
storage.idCacheLock.Lock()
pathID, ok := storage.idCache[path]
storage.idCacheLock.Unlock()
return pathID, ok
storage.idCacheLock.Lock()
pathID, ok := storage.idCache[path]
storage.idCacheLock.Unlock()
return pathID, ok
}
func (storage *ACDStorage) savePathID(path string, pathID string) {
storage.idCacheLock.Lock()
storage.idCache[path] = pathID
storage.idCacheLock.Unlock()
storage.idCacheLock.Lock()
storage.idCache[path] = pathID
storage.idCacheLock.Unlock()
}
func (storage *ACDStorage) deletePathID(path string) {
storage.idCacheLock.Lock()
delete(storage.idCache, path)
storage.idCacheLock.Unlock()
storage.idCacheLock.Lock()
delete(storage.idCache, path)
storage.idCacheLock.Unlock()
}
func (storage *ACDStorage) convertFilePath(filePath string) (string) {
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
return "fossils/" + filePath[len("chunks/"):len(filePath) - len(".fsl")]
}
return filePath
func (storage *ACDStorage) convertFilePath(filePath string) string {
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
return "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
}
return filePath
}
func (storage *ACDStorage) getIDFromPath(threadIndex int, path string) (fileID string, isDir bool, size int64, err error) {
parentID, ok := storage.findPathID("")
if !ok {
parentID, isDir, size, err = storage.client.ListByName("", "")
if err != nil {
return "", false, 0, err
}
}
parentID, ok := storage.findPathID("")
if !ok {
parentID, isDir, size, err = storage.client.ListByName("", "")
if err != nil {
return "", false, 0, err
}
}
names := strings.Split(path, "/")
for i, name := range names {
parentID, isDir, _, err = storage.client.ListByName(parentID, name)
if err != nil {
return "", false, 0, err
}
if parentID == "" {
if i == len(names) - 1 {
return "", false, 0, nil
} else {
return "", false, 0, fmt.Errorf("File path '%s' does not exist", path)
}
}
if i != len(names) - 1 && !isDir {
return "", false, 0, fmt.Errorf("Invalid path %s", path)
}
}
names := strings.Split(path, "/")
for i, name := range names {
parentID, isDir, _, err = storage.client.ListByName(parentID, name)
if err != nil {
return "", false, 0, err
}
if parentID == "" {
if i == len(names)-1 {
return "", false, 0, nil
} else {
return "", false, 0, fmt.Errorf("File path '%s' does not exist", path)
}
}
if i != len(names)-1 && !isDir {
return "", false, 0, fmt.Errorf("Invalid path %s", path)
}
}
return parentID, isDir, size, err
return parentID, isDir, size, err
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
func (storage *ACDStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
var err error
var err error
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
dir = dir[:len(dir) - 1]
}
for len(dir) > 0 && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}
if dir == "snapshots" {
if dir == "snapshots" {
entries, err := storage.client.ListEntries(storage.getPathID(dir), false)
if err != nil {
return nil, nil, err
}
entries, err := storage.client.ListEntries(storage.getPathID(dir), false)
if err != nil {
return nil, nil, err
}
subDirs := []string{}
subDirs := []string{}
for _, entry := range entries {
storage.savePathID(entry.Name, entry.ID)
subDirs = append(subDirs, entry.Name + "/")
}
return subDirs, nil, nil
} else if strings.HasPrefix(dir, "snapshots/") {
name := dir[len("snapshots/"):]
pathID, ok := storage.findPathID(dir)
if !ok {
pathID, _, _, err = storage.client.ListByName(storage.getPathID("snapshots"), name)
if err != nil {
return nil, nil, err
}
if pathID == "" {
return nil, nil, nil
}
}
for _, entry := range entries {
storage.savePathID(entry.Name, entry.ID)
subDirs = append(subDirs, entry.Name+"/")
}
return subDirs, nil, nil
} else if strings.HasPrefix(dir, "snapshots/") {
name := dir[len("snapshots/"):]
pathID, ok := storage.findPathID(dir)
if !ok {
pathID, _, _, err = storage.client.ListByName(storage.getPathID("snapshots"), name)
if err != nil {
return nil, nil, err
}
if pathID == "" {
return nil, nil, nil
}
}
entries, err := storage.client.ListEntries(pathID, true)
if err != nil {
return nil, nil, err
}
entries, err := storage.client.ListEntries(pathID, true)
if err != nil {
return nil, nil, err
}
files := []string{}
files := []string{}
for _, entry := range entries {
storage.savePathID(dir + "/" + entry.Name, entry.ID)
files = append(files, entry.Name)
}
return files, nil, nil
} else {
files := []string{}
sizes := []int64{}
for _, parent := range []string {"chunks", "fossils" } {
entries, err := storage.client.ListEntries(storage.getPathID(parent), true)
if err != nil {
return nil, nil, err
}
for _, entry := range entries {
storage.savePathID(dir+"/"+entry.Name, entry.ID)
files = append(files, entry.Name)
}
return files, nil, nil
} else {
files := []string{}
sizes := []int64{}
for _, parent := range []string{"chunks", "fossils"} {
entries, err := storage.client.ListEntries(storage.getPathID(parent), true)
if err != nil {
return nil, nil, err
}
for _, entry := range entries {
name := entry.Name
if parent == "fossils" {
name += ".fsl"
}
for _, entry := range entries {
name := entry.Name
if parent == "fossils" {
name += ".fsl"
}
storage.savePathID(parent + "/" + entry.Name, entry.ID)
files = append(files, name)
sizes = append(sizes, entry.Size)
}
}
return files, sizes, nil
}
storage.savePathID(parent+"/"+entry.Name, entry.ID)
files = append(files, name)
sizes = append(sizes, entry.Size)
}
}
return files, sizes, nil
}
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *ACDStorage) DeleteFile(threadIndex int, filePath string) (err error) {
filePath = storage.convertFilePath(filePath)
fileID, ok := storage.findPathID(filePath)
if !ok {
fileID, _, _, err = storage.getIDFromPath(threadIndex, filePath)
if err != nil {
return err
}
if fileID == "" {
LOG_TRACE("ACD_STORAGE", "File %s has disappeared before deletion", filePath)
return nil
}
storage.savePathID(filePath, fileID)
}
filePath = storage.convertFilePath(filePath)
fileID, ok := storage.findPathID(filePath)
if !ok {
fileID, _, _, err = storage.getIDFromPath(threadIndex, filePath)
if err != nil {
return err
}
if fileID == "" {
LOG_TRACE("ACD_STORAGE", "File %s has disappeared before deletion", filePath)
return nil
}
storage.savePathID(filePath, fileID)
}
err = storage.client.DeleteFile(fileID)
if e, ok := err.(ACDError); ok && e.Status == 409 {
LOG_DEBUG("ACD_DELETE", "Ignore 409 conflict error")
return nil
}
return err
err = storage.client.DeleteFile(fileID)
if e, ok := err.(ACDError); ok && e.Status == 409 {
LOG_DEBUG("ACD_DELETE", "Ignore 409 conflict error")
return nil
}
return err
}
// MoveFile renames the file.
func (storage *ACDStorage) MoveFile(threadIndex int, from string, to string) (err error) {
from = storage.convertFilePath(from)
to = storage.convertFilePath(to)
from = storage.convertFilePath(from)
to = storage.convertFilePath(to)
fileID, ok := storage.findPathID(from)
if !ok {
return fmt.Errorf("Attempting to rename file %s with unknown id", from)
}
fileID, ok := storage.findPathID(from)
if !ok {
return fmt.Errorf("Attempting to rename file %s with unknown id", from)
}
fromParentID := storage.getPathID("chunks")
toParentID := storage.getPathID("fossils")
fromParentID := storage.getPathID("chunks")
toParentID := storage.getPathID("fossils")
if strings.HasPrefix(from, "fossils") {
fromParentID, toParentID = toParentID, fromParentID
}
if strings.HasPrefix(from, "fossils") {
fromParentID, toParentID = toParentID, fromParentID
}
err = storage.client.MoveFile(fileID, fromParentID, toParentID)
if err != nil {
if e, ok := err.(ACDError); ok && e.Status == 409 {
LOG_DEBUG("ACD_MOVE", "Ignore 409 conflict error")
} else {
return err
}
}
err = storage.client.MoveFile(fileID, fromParentID, toParentID)
if err != nil {
if e, ok := err.(ACDError); ok && e.Status == 409 {
LOG_DEBUG("ACD_MOVE", "Ignore 409 conflict error")
} else {
return err
}
}
storage.savePathID(to, storage.getPathID(from))
storage.deletePathID(from)
storage.savePathID(to, storage.getPathID(from))
storage.deletePathID(from)
return nil
return nil
}
// CreateDirectory creates a new directory.
func (storage *ACDStorage) CreateDirectory(threadIndex int, dir string) (err error) {
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
dir = dir[:len(dir) - 1]
}
for len(dir) > 0 && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}
if dir == "chunks" || dir == "snapshots" {
return nil
}
if dir == "chunks" || dir == "snapshots" {
return nil
}
if strings.HasPrefix(dir, "snapshots/") {
name := dir[len("snapshots/"):]
dirID, err := storage.client.CreateDirectory(storage.getPathID("snapshots"), name)
if err != nil {
if e, ok := err.(ACDError); ok && e.Status == 409 {
return nil
} else {
return err
}
}
storage.savePathID(dir, dirID)
return nil
if strings.HasPrefix(dir, "snapshots/") {
name := dir[len("snapshots/"):]
dirID, err := storage.client.CreateDirectory(storage.getPathID("snapshots"), name)
if err != nil {
if e, ok := err.(ACDError); ok && e.Status == 409 {
return nil
} else {
return err
}
}
storage.savePathID(dir, dirID)
return nil
}
}
return nil
return nil
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *ACDStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
for len(filePath) > 0 && filePath[len(filePath) - 1] == '/' {
filePath = filePath[:len(filePath) - 1]
}
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
filePath = filePath[:len(filePath)-1]
}
filePath = storage.convertFilePath(filePath)
fileID := ""
fileID, isDir, size, err = storage.getIDFromPath(threadIndex, filePath)
if err != nil {
return false, false, 0, err
}
if fileID == "" {
return false, false, 0, nil
}
filePath = storage.convertFilePath(filePath)
fileID := ""
fileID, isDir, size, err = storage.getIDFromPath(threadIndex, filePath)
if err != nil {
return false, false, 0, err
}
if fileID == "" {
return false, false, 0, nil
}
return true, isDir, size, nil
return true, isDir, size, nil
}
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
// the suffix '.fsl'.
func (storage *ACDStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
parentID := ""
filePath = "chunks/" + chunkID
realPath := filePath
if isFossil {
parentID = storage.getPathID("fossils")
filePath += ".fsl"
realPath = "fossils/" + chunkID + ".fsl"
} else {
parentID = storage.getPathID("chunks")
}
parentID := ""
filePath = "chunks/" + chunkID
realPath := filePath
if isFossil {
parentID = storage.getPathID("fossils")
filePath += ".fsl"
realPath = "fossils/" + chunkID + ".fsl"
} else {
parentID = storage.getPathID("chunks")
}
fileID := ""
fileID, _, size, err = storage.client.ListByName(parentID, chunkID)
if fileID != "" {
storage.savePathID(realPath, fileID)
}
return filePath, fileID != "", size, err
fileID := ""
fileID, _, size, err = storage.client.ListByName(parentID, chunkID)
if fileID != "" {
storage.savePathID(realPath, fileID)
}
return filePath, fileID != "", size, err
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *ACDStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
fileID, ok := storage.findPathID(filePath)
if !ok {
fileID, _, _, err = storage.getIDFromPath(threadIndex, filePath)
if err != nil {
return err
}
if fileID == "" {
return fmt.Errorf("File path '%s' does not exist", filePath)
}
storage.savePathID(filePath, fileID)
}
fileID, ok := storage.findPathID(filePath)
if !ok {
fileID, _, _, err = storage.getIDFromPath(threadIndex, filePath)
if err != nil {
return err
}
if fileID == "" {
return fmt.Errorf("File path '%s' does not exist", filePath)
}
storage.savePathID(filePath, fileID)
}
readCloser, _, err := storage.client.DownloadFile(fileID)
if err != nil {
return err
}
readCloser, _, err := storage.client.DownloadFile(fileID)
if err != nil {
return err
}
defer readCloser.Close()
defer readCloser.Close()
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / storage.numberOfThreads)
return err
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThreads)
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *ACDStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
parent := path.Dir(filePath)
parent := path.Dir(filePath)
if parent == "." {
parent = ""
}
if parent == "." {
parent = ""
}
parentID, ok := storage.findPathID(parent)
parentID, ok := storage.findPathID(parent)
if !ok {
parentID, _, _, err = storage.getIDFromPath(threadIndex, parent)
if err != nil {
return err
}
if parentID == "" {
return fmt.Errorf("File path '%s' does not exist", parent)
}
storage.savePathID(parent, parentID)
}
if !ok {
parentID, _, _, err = storage.getIDFromPath(threadIndex, parent)
if err != nil {
return err
}
if parentID == "" {
return fmt.Errorf("File path '%s' does not exist", parent)
}
storage.savePathID(parent, parentID)
}
fileID, err := storage.client.UploadFile(parentID, path.Base(filePath), content, storage.UploadRateLimit / storage.numberOfThreads)
if err == nil {
storage.savePathID(filePath, fileID)
return nil
}
fileID, err := storage.client.UploadFile(parentID, path.Base(filePath), content, storage.UploadRateLimit/storage.numberOfThreads)
if err == nil {
storage.savePathID(filePath, fileID)
return nil
}
if e, ok := err.(ACDError); ok && e.Status == 409 {
LOG_TRACE("ACD_UPLOAD", "File %s already exists", filePath)
return nil
} else {
return err
}
if e, ok := err.(ACDError); ok && e.Status == 409 {
LOG_TRACE("ACD_UPLOAD", "File %s already exists", filePath)
return nil
} else {
return err
}
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *ACDStorage) IsCacheNeeded() (bool) { return true }
func (storage *ACDStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *ACDStorage) IsMoveFileImplemented() (bool) { return true }
func (storage *ACDStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *ACDStorage) IsStrongConsistent() (bool) { return true }
func (storage *ACDStorage) IsStrongConsistent() bool { return true }
// If the storage supports fast listing of files names.
func (storage *ACDStorage) IsFastListing() (bool) { return true }
func (storage *ACDStorage) IsFastListing() bool { return true }
// Enable the test mode.
func (storage *ACDStorage) EnableTestMode() {}

View File

@@ -5,200 +5,200 @@
package duplicacy
import (
"fmt"
"strings"
"fmt"
"strings"
"github.com/gilbertchen/azure-sdk-for-go/storage"
"github.com/gilbertchen/azure-sdk-for-go/storage"
)
type AzureStorage struct {
RateLimitedStorage
RateLimitedStorage
containers []*storage.Container
containers []*storage.Container
}
func CreateAzureStorage(accountName string, accountKey string,
containerName string, threads int) (azureStorage *AzureStorage, err error) {
containerName string, threads int) (azureStorage *AzureStorage, err error) {
var containers []*storage.Container
for i := 0; i < threads; i++ {
var containers []*storage.Container
for i := 0; i < threads; i++ {
client, err := storage.NewBasicClient(accountName, accountKey)
client, err := storage.NewBasicClient(accountName, accountKey)
if err != nil {
return nil, err
}
if err != nil {
return nil, err
}
blobService := client.GetBlobService()
container := blobService.GetContainerReference(containerName)
containers = append(containers, container)
}
blobService := client.GetBlobService()
container := blobService.GetContainerReference(containerName)
containers = append(containers, container)
}
exist, err := containers[0].Exists()
if err != nil {
return nil, err
}
exist, err := containers[0].Exists()
if err != nil {
return nil, err
}
if !exist {
return nil, fmt.Errorf("container %s does not exist", containerName)
}
if !exist {
return nil, fmt.Errorf("container %s does not exist", containerName)
}
azureStorage = &AzureStorage {
containers: containers,
}
azureStorage = &AzureStorage{
containers: containers,
}
return
return
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
type ListBlobsParameters struct {
Prefix string
Delimiter string
Marker string
Include string
MaxResults uint
Timeout uint
}
type ListBlobsParameters struct {
Prefix string
Delimiter string
Marker string
Include string
MaxResults uint
Timeout uint
}
if len(dir) > 0 && dir[len(dir) - 1] != '/' {
dir += "/"
}
dirLength := len(dir)
if len(dir) > 0 && dir[len(dir)-1] != '/' {
dir += "/"
}
dirLength := len(dir)
parameters := storage.ListBlobsParameters {
Prefix: dir,
Delimiter: "",
}
parameters := storage.ListBlobsParameters{
Prefix: dir,
Delimiter: "",
}
subDirs := make(map[string]bool)
subDirs := make(map[string]bool)
for {
for {
results, err := azureStorage.containers[threadIndex].ListBlobs(parameters)
if err != nil {
return nil, nil, err
}
results, err := azureStorage.containers[threadIndex].ListBlobs(parameters)
if err != nil {
return nil, nil, err
}
if dir == "snapshots/" {
for _, blob := range results.Blobs {
name := strings.Split(blob.Name[dirLength:], "/")[0]
subDirs[name + "/"] = true
}
} else {
for _, blob := range results.Blobs {
files = append(files, blob.Name[dirLength:])
sizes = append(sizes, blob.Properties.ContentLength)
}
}
if dir == "snapshots/" {
for _, blob := range results.Blobs {
name := strings.Split(blob.Name[dirLength:], "/")[0]
subDirs[name+"/"] = true
}
} else {
for _, blob := range results.Blobs {
files = append(files, blob.Name[dirLength:])
sizes = append(sizes, blob.Properties.ContentLength)
}
}
if results.NextMarker == "" {
break
}
if results.NextMarker == "" {
break
}
parameters.Marker = results.NextMarker
}
parameters.Marker = results.NextMarker
}
if dir == "snapshots/" {
if dir == "snapshots/" {
for subDir, _ := range subDirs {
files = append(files, subDir)
}
for subDir, _ := range subDirs {
files = append(files, subDir)
}
}
}
return files, sizes, nil
return files, sizes, nil
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *AzureStorage) DeleteFile(threadIndex int, filePath string) (err error) {
_, err = storage.containers[threadIndex].GetBlobReference(filePath).DeleteIfExists(nil)
return err
_, err = storage.containers[threadIndex].GetBlobReference(filePath).DeleteIfExists(nil)
return err
}
// MoveFile renames the file.
func (storage *AzureStorage) MoveFile(threadIndex int, from string, to string) (err error) {
source := storage.containers[threadIndex].GetBlobReference(from)
destination := storage.containers[threadIndex].GetBlobReference(to)
err = destination.Copy(source.GetURL(), nil)
if err != nil {
return err
}
return storage.DeleteFile(threadIndex, from)
source := storage.containers[threadIndex].GetBlobReference(from)
destination := storage.containers[threadIndex].GetBlobReference(to)
err = destination.Copy(source.GetURL(), nil)
if err != nil {
return err
}
return storage.DeleteFile(threadIndex, from)
}
// CreateDirectory creates a new directory.
func (storage *AzureStorage) CreateDirectory(threadIndex int, dir string) (err error) {
return nil
return nil
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *AzureStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
blob := storage.containers[threadIndex].GetBlobReference(filePath)
err = blob.GetProperties(nil)
if err != nil {
if strings.Contains(err.Error(), "404") {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
blob := storage.containers[threadIndex].GetBlobReference(filePath)
err = blob.GetProperties(nil)
if err != nil {
if strings.Contains(err.Error(), "404") {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
return true, false, blob.Properties.ContentLength, nil
return true, false, blob.Properties.ContentLength, nil
}
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
// the suffix '.fsl'.
func (storage *AzureStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
filePath = "chunks/" + chunkID
if isFossil {
filePath += ".fsl"
}
filePath = "chunks/" + chunkID
if isFossil {
filePath += ".fsl"
}
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
if err != nil {
return "", false, 0, err
} else {
return filePath, exist, size, err
}
if err != nil {
return "", false, 0, err
} else {
return filePath, exist, size, err
}
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
readCloser, err := storage.containers[threadIndex].GetBlobReference(filePath).Get(nil)
if err != nil {
return err
}
readCloser, err := storage.containers[threadIndex].GetBlobReference(filePath).Get(nil)
if err != nil {
return err
}
defer readCloser.Close()
defer readCloser.Close()
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.containers))
return err
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.containers))
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.containers))
blob := storage.containers[threadIndex].GetBlobReference(filePath)
return blob.CreateBlockBlobFromReader(reader, nil)
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.containers))
blob := storage.containers[threadIndex].GetBlobReference(filePath)
return blob.CreateBlockBlobFromReader(reader, nil)
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *AzureStorage) IsCacheNeeded() (bool) { return true }
func (storage *AzureStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *AzureStorage) IsMoveFileImplemented() (bool) { return true }
func (storage *AzureStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *AzureStorage) IsStrongConsistent() (bool) { return true }
func (storage *AzureStorage) IsStrongConsistent() bool { return true }
// If the storage supports fast listing of files names.
func (storage *AzureStorage) IsFastListing() (bool) { return true }
func (storage *AzureStorage) IsFastListing() bool { return true }
// Enable the test mode.
func (storage *AzureStorage) EnableTestMode() {}

View File

@@ -5,516 +5,511 @@
package duplicacy
import (
"fmt"
"time"
"bytes"
"strconv"
"io/ioutil"
"encoding/json"
"encoding/base64"
"encoding/hex"
"io"
"net/http"
"strings"
"crypto/sha1"
"math/rand"
"bytes"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"strconv"
"strings"
"time"
)
type B2Error struct {
Status int
Code string
Message string
Status int
Code string
Message string
}
func (err *B2Error) Error() string {
return fmt.Sprintf("%d %s", err.Status, err.Message)
return fmt.Sprintf("%d %s", err.Status, err.Message)
}
type B2UploadArgument struct {
URL string
Token string
URL string
Token string
}
var B2AuthorizationURL = "https://api.backblazeb2.com/b2api/v1/b2_authorize_account"
type B2Client struct {
HTTPClient *http.Client
AccountID string
ApplicationKey string
AuthorizationToken string
APIURL string
DownloadURL string
BucketName string
BucketID string
HTTPClient *http.Client
AccountID string
ApplicationKey string
AuthorizationToken string
APIURL string
DownloadURL string
BucketName string
BucketID string
UploadURL string
UploadToken string
TestMode bool
UploadURL string
UploadToken string
TestMode bool
}
func NewB2Client(accountID string, applicationKey string) *B2Client {
client := &B2Client{
HTTPClient: http.DefaultClient,
AccountID: accountID,
ApplicationKey: applicationKey,
}
return client
client := &B2Client{
HTTPClient: http.DefaultClient,
AccountID: accountID,
ApplicationKey: applicationKey,
}
return client
}
func (client *B2Client) retry(backoff int, response *http.Response) int {
if response != nil {
if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 {
retryAfter, _ := strconv.Atoi(backoffList[0])
if retryAfter >= 1 {
time.Sleep(time.Duration(retryAfter) * time.Second)
return 0
}
}
}
if backoff == 0 {
backoff = 1
} else {
backoff *= 2
}
time.Sleep(time.Duration(backoff) * time.Second)
return backoff
if response != nil {
if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 {
retryAfter, _ := strconv.Atoi(backoffList[0])
if retryAfter >= 1 {
time.Sleep(time.Duration(retryAfter) * time.Second)
return 0
}
}
}
if backoff == 0 {
backoff = 1
} else {
backoff *= 2
}
time.Sleep(time.Duration(backoff) * time.Second)
return backoff
}
func (client *B2Client) call(url string, input interface{}) (io.ReadCloser, int64, error) {
var response *http.Response
var response *http.Response
backoff := 0
for i := 0; i < 8; i++ {
var inputReader *bytes.Reader
method := "POST"
backoff := 0
for i := 0; i < 8; i++ {
var inputReader *bytes.Reader
method := "POST"
switch input.(type) {
default:
jsonInput, err := json.Marshal(input)
if err != nil {
return nil, 0, err
}
inputReader = bytes.NewReader(jsonInput)
case []byte:
inputReader = bytes.NewReader(input.([]byte))
case int:
method = "GET"
inputReader = bytes.NewReader([]byte(""))
}
switch input.(type) {
default:
jsonInput, err := json.Marshal(input)
if err != nil {
return nil, 0, err
}
inputReader = bytes.NewReader(jsonInput)
case []byte:
inputReader = bytes.NewReader(input.([]byte))
case int:
method = "GET"
inputReader = bytes.NewReader([]byte(""))
}
request, err := http.NewRequest(method, url, inputReader)
if err != nil {
return nil, 0, err
}
request, err := http.NewRequest(method, url, inputReader)
if err != nil {
return nil, 0, err
}
if url == B2AuthorizationURL {
request.Header.Set("Authorization", "Basic " + base64.StdEncoding.EncodeToString([]byte(client.AccountID + ":" + client.ApplicationKey)))
} else {
request.Header.Set("Authorization", client.AuthorizationToken)
}
if url == B2AuthorizationURL {
request.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(client.AccountID+":"+client.ApplicationKey)))
} else {
request.Header.Set("Authorization", client.AuthorizationToken)
}
if client.TestMode {
r := rand.Float32()
if r < 0.5 {
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
} else {
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
}
}
if client.TestMode {
r := rand.Float32()
if r < 0.5 {
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
} else {
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
}
}
response, err = client.HTTPClient.Do(request)
if err != nil {
if url != B2AuthorizationURL {
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned an error: %v", url, err)
backoff = client.retry(backoff, response)
continue
}
return nil, 0, err
}
response, err = client.HTTPClient.Do(request)
if err != nil {
if url != B2AuthorizationURL {
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned an error: %v", url, err)
backoff = client.retry(backoff, response)
continue
}
return nil, 0, err
}
if response.StatusCode < 300 {
return response.Body, response.ContentLength, nil
}
if response.StatusCode < 300 {
return response.Body, response.ContentLength, nil
}
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
io.Copy(ioutil.Discard, response.Body)
response.Body.Close()
if response.StatusCode == 401 {
if url == B2AuthorizationURL {
return nil, 0, fmt.Errorf("Authorization failure")
}
client.AuthorizeAccount()
continue
} else if response.StatusCode == 403 {
if !client.TestMode {
return nil, 0, fmt.Errorf("B2 cap exceeded")
}
continue
} else if response.StatusCode == 429 || response.StatusCode == 408 {
backoff = client.retry(backoff, response)
continue
} else if response.StatusCode >= 500 && response.StatusCode <= 599 {
backoff = client.retry(backoff, response)
continue
} else {
LOG_INFO("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
backoff = client.retry(backoff, response)
continue
}
io.Copy(ioutil.Discard, response.Body)
response.Body.Close()
if response.StatusCode == 401 {
if url == B2AuthorizationURL {
return nil, 0, fmt.Errorf("Authorization failure")
}
client.AuthorizeAccount()
continue
} else if response.StatusCode == 403 {
if !client.TestMode {
return nil, 0, fmt.Errorf("B2 cap exceeded")
}
continue
} else if response.StatusCode == 429 || response.StatusCode == 408 {
backoff = client.retry(backoff, response)
continue
} else if response.StatusCode >= 500 && response.StatusCode <= 599 {
backoff = client.retry(backoff, response)
continue
} else {
LOG_INFO("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
backoff = client.retry(backoff, response)
continue
}
defer response.Body.Close()
defer response.Body.Close()
e := &B2Error {
}
e := &B2Error{}
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
return nil, 0, err
}
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
return nil, 0, err
}
return nil, 0, e
}
return nil, 0, e
}
return nil, 0, fmt.Errorf("Maximum backoff reached")
return nil, 0, fmt.Errorf("Maximum backoff reached")
}
type B2AuthorizeAccountOutput struct {
AccountID string
AuthorizationToken string
APIURL string
DownloadURL string
AccountID string
AuthorizationToken string
APIURL string
DownloadURL string
}
func (client *B2Client) AuthorizeAccount() (err error) {
readCloser, _, err := client.call(B2AuthorizationURL, make(map[string]string))
if err != nil {
return err
}
readCloser, _, err := client.call(B2AuthorizationURL, make(map[string]string))
if err != nil {
return err
}
defer readCloser.Close()
defer readCloser.Close()
output := &B2AuthorizeAccountOutput {}
output := &B2AuthorizeAccountOutput{}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return err
}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return err
}
client.AuthorizationToken = output.AuthorizationToken
client.APIURL = output.APIURL
client.DownloadURL = output.DownloadURL
client.AuthorizationToken = output.AuthorizationToken
client.APIURL = output.APIURL
client.DownloadURL = output.DownloadURL
return nil
return nil
}
type ListBucketOutput struct {
AccoundID string
BucketID string
BucketName string
BucketType string
AccoundID string
BucketID string
BucketName string
BucketType string
}
func (client *B2Client) FindBucket(bucketName string) (err error) {
input := make(map[string]string)
input["accountId"] = client.AccountID
input := make(map[string]string)
input["accountId"] = client.AccountID
url := client.APIURL + "/b2api/v1/b2_list_buckets"
url := client.APIURL + "/b2api/v1/b2_list_buckets"
readCloser, _, err := client.call(url, input)
if err != nil {
return err
}
readCloser, _, err := client.call(url, input)
if err != nil {
return err
}
defer readCloser.Close()
defer readCloser.Close()
output := make(map[string][]ListBucketOutput, 0)
output := make(map[string][]ListBucketOutput, 0)
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return err
}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return err
}
for _, bucket := range output["buckets"] {
if bucket.BucketName == bucketName {
client.BucketName = bucket.BucketName
client.BucketID = bucket.BucketID
break
}
}
for _, bucket := range output["buckets"] {
if bucket.BucketName == bucketName {
client.BucketName = bucket.BucketName
client.BucketID = bucket.BucketID
break
}
}
if client.BucketID == "" {
return fmt.Errorf("Bucket %s not found", bucketName)
}
if client.BucketID == "" {
return fmt.Errorf("Bucket %s not found", bucketName)
}
return nil
return nil
}
type B2Entry struct {
FileID string
FileName string
Action string
Size int64
UploadTimestamp int64
FileID string
FileName string
Action string
Size int64
UploadTimestamp int64
}
type B2ListFileNamesOutput struct {
Files []*B2Entry
NextFileName string
NextFileId string
Files []*B2Entry
NextFileName string
NextFileId string
}
func (client *B2Client) ListFileNames(startFileName string, singleFile bool, includeVersions bool) (files []*B2Entry, err error) {
maxFileCount := 1000
if singleFile {
if includeVersions {
maxFileCount = 4
if client.TestMode {
maxFileCount = 1
}
} else {
maxFileCount = 1
}
} else if client.TestMode {
maxFileCount = 10
}
maxFileCount := 1000
if singleFile {
if includeVersions {
maxFileCount = 4
if client.TestMode {
maxFileCount = 1
}
} else {
maxFileCount = 1
}
} else if client.TestMode {
maxFileCount = 10
}
input := make(map[string]interface{})
input["bucketId"] = client.BucketID
input["startFileName"] = startFileName
input["maxFileCount"] = maxFileCount
input := make(map[string]interface{})
input["bucketId"] = client.BucketID
input["startFileName"] = startFileName
input["maxFileCount"] = maxFileCount
for {
url := client.APIURL + "/b2api/v1/b2_list_file_names"
if includeVersions {
url = client.APIURL + "/b2api/v1/b2_list_file_versions"
}
readCloser, _, err := client.call(url, input)
if err != nil {
return nil, err
}
for {
url := client.APIURL + "/b2api/v1/b2_list_file_names"
if includeVersions {
url = client.APIURL + "/b2api/v1/b2_list_file_versions"
}
readCloser, _, err := client.call(url, input)
if err != nil {
return nil, err
}
defer readCloser.Close()
defer readCloser.Close()
output := B2ListFileNamesOutput {
}
output := B2ListFileNamesOutput{}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return nil, err
}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return nil, err
}
ioutil.ReadAll(readCloser)
ioutil.ReadAll(readCloser)
if startFileName == "" {
files = append(files, output.Files...)
} else {
for _, file := range output.Files {
if singleFile {
if file.FileName == startFileName {
files = append(files, file)
if !includeVersions {
output.NextFileName = ""
break
}
} else {
output.NextFileName = ""
break
}
} else {
if strings.HasPrefix(file.FileName, startFileName) {
files = append(files, file)
} else {
output.NextFileName = ""
break
}
}
}
if startFileName == "" {
files = append(files, output.Files...)
} else {
for _, file := range output.Files {
if singleFile {
if file.FileName == startFileName {
files = append(files, file)
if !includeVersions {
output.NextFileName = ""
break
}
} else {
output.NextFileName = ""
break
}
} else {
if strings.HasPrefix(file.FileName, startFileName) {
files = append(files, file)
} else {
output.NextFileName = ""
break
}
}
}
}
}
if len(output.NextFileName) == 0 {
break
}
if len(output.NextFileName) == 0 {
break
}
input["startFileName"] = output.NextFileName
if includeVersions {
input["startFileId"] = output.NextFileId
}
}
input["startFileName"] = output.NextFileName
if includeVersions {
input["startFileId"] = output.NextFileId
}
}
return files, nil
return files, nil
}
func (client *B2Client) DeleteFile(fileName string, fileID string) (err error) {
input := make(map[string]string)
input["fileName"] = fileName
input["fileId"] = fileID
input := make(map[string]string)
input["fileName"] = fileName
input["fileId"] = fileID
url := client.APIURL + "/b2api/v1/b2_delete_file_version"
readCloser, _, err := client.call(url, input)
if err != nil {
return err
}
url := client.APIURL + "/b2api/v1/b2_delete_file_version"
readCloser, _, err := client.call(url, input)
if err != nil {
return err
}
readCloser.Close()
return nil
readCloser.Close()
return nil
}
type B2HideFileOutput struct {
FileID string
FileID string
}
func (client *B2Client) HideFile(fileName string) (fileID string, err error) {
input := make(map[string]string)
input["bucketId"] = client.BucketID
input["fileName"] = fileName
input := make(map[string]string)
input["bucketId"] = client.BucketID
input["fileName"] = fileName
url := client.APIURL + "/b2api/v1/b2_hide_file"
readCloser, _, err := client.call(url, input)
if err != nil {
return "", err
}
url := client.APIURL + "/b2api/v1/b2_hide_file"
readCloser, _, err := client.call(url, input)
if err != nil {
return "", err
}
defer readCloser.Close()
defer readCloser.Close()
output := & B2HideFileOutput {}
output := &B2HideFileOutput{}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return "", err
}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return "", err
}
readCloser.Close()
return output.FileID, nil
readCloser.Close()
return output.FileID, nil
}
func (client *B2Client) DownloadFile(filePath string) (io.ReadCloser, int64, error) {
url := client.DownloadURL + "/file/" + client.BucketName + "/" + filePath
url := client.DownloadURL + "/file/" + client.BucketName + "/" + filePath
return client.call(url, 0)
return client.call(url, 0)
}
type B2GetUploadArgumentOutput struct {
BucketID string
UploadURL string
AuthorizationToken string
BucketID string
UploadURL string
AuthorizationToken string
}
func (client *B2Client) getUploadURL() (error) {
input := make(map[string]string)
input["bucketId"] = client.BucketID
func (client *B2Client) getUploadURL() error {
input := make(map[string]string)
input["bucketId"] = client.BucketID
url := client.APIURL + "/b2api/v1/b2_get_upload_url"
readCloser, _, err := client.call(url, input)
if err != nil {
return err
}
url := client.APIURL + "/b2api/v1/b2_get_upload_url"
readCloser, _, err := client.call(url, input)
if err != nil {
return err
}
defer readCloser.Close()
defer readCloser.Close()
output := & B2GetUploadArgumentOutput {}
output := &B2GetUploadArgumentOutput{}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return err
}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return err
}
client.UploadURL = output.UploadURL
client.UploadToken = output.AuthorizationToken
client.UploadURL = output.UploadURL
client.UploadToken = output.AuthorizationToken
return nil
return nil
}
func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit int) (err error) {
hasher := sha1.New()
hasher.Write(content)
hash := hex.EncodeToString(hasher.Sum(nil))
hasher := sha1.New()
hasher.Write(content)
hash := hex.EncodeToString(hasher.Sum(nil))
headers := make(map[string]string)
headers["X-Bz-File-Name"] = filePath
headers["Content-Type"] = "application/octet-stream"
headers["X-Bz-Content-Sha1"] = hash
headers := make(map[string]string)
headers["X-Bz-File-Name"] = filePath
headers["Content-Type"] = "application/octet-stream"
headers["X-Bz-Content-Sha1"] = hash
var response *http.Response
var response *http.Response
backoff := 0
for i := 0; i < 8; i++ {
backoff := 0
for i := 0; i < 8; i++ {
if client.UploadURL == "" || client.UploadToken == "" {
err = client.getUploadURL()
if err != nil {
return err
}
}
if client.UploadURL == "" || client.UploadToken == "" {
err = client.getUploadURL()
if err != nil {
return err
}
}
request, err := http.NewRequest("POST", client.UploadURL, CreateRateLimitedReader(content, rateLimit))
if err != nil {
return err
}
request.ContentLength = int64(len(content))
request, err := http.NewRequest("POST", client.UploadURL, CreateRateLimitedReader(content, rateLimit))
if err != nil {
return err
}
request.ContentLength = int64(len(content))
request.Header.Set("Authorization", client.UploadToken)
request.Header.Set("X-Bz-File-Name", filePath)
request.Header.Set("Content-Type", "application/octet-stream")
request.Header.Set("X-Bz-Content-Sha1", hash)
request.Header.Set("Authorization", client.UploadToken)
request.Header.Set("X-Bz-File-Name", filePath)
request.Header.Set("Content-Type", "application/octet-stream")
request.Header.Set("X-Bz-Content-Sha1", hash)
for key, value := range headers {
request.Header.Set(key, value)
}
for key, value := range headers {
request.Header.Set(key, value)
}
if client.TestMode {
r := rand.Float32()
if r < 0.8 {
request.Header.Set("X-Bz-Test-Mode", "fail_some_uploads")
} else if r < 0.9 {
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
} else {
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
}
}
if client.TestMode {
r := rand.Float32()
if r < 0.8 {
request.Header.Set("X-Bz-Test-Mode", "fail_some_uploads")
} else if r < 0.9 {
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
} else {
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
}
}
response, err = client.HTTPClient.Do(request)
if err != nil {
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned an error: %v", client.UploadURL, err)
backoff = client.retry(backoff, response)
client.UploadURL = ""
client.UploadToken = ""
continue
}
response, err = client.HTTPClient.Do(request)
if err != nil {
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned an error: %v", client.UploadURL, err)
backoff = client.retry(backoff, response)
client.UploadURL = ""
client.UploadToken = ""
continue
}
io.Copy(ioutil.Discard, response.Body)
response.Body.Close()
io.Copy(ioutil.Discard, response.Body)
response.Body.Close()
if response.StatusCode < 300 {
return nil
}
if response.StatusCode < 300 {
return nil
}
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
if response.StatusCode == 401 {
LOG_INFO("BACKBLAZE_UPLOAD", "Re-authorizatoin required")
client.UploadURL = ""
client.UploadToken = ""
continue
} else if response.StatusCode == 403 {
if !client.TestMode {
return fmt.Errorf("B2 cap exceeded")
}
continue
} else {
LOG_INFO("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
backoff = client.retry(backoff, response)
client.UploadURL = ""
client.UploadToken = ""
}
}
if response.StatusCode == 401 {
LOG_INFO("BACKBLAZE_UPLOAD", "Re-authorizatoin required")
client.UploadURL = ""
client.UploadToken = ""
continue
} else if response.StatusCode == 403 {
if !client.TestMode {
return fmt.Errorf("B2 cap exceeded")
}
continue
} else {
LOG_INFO("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
backoff = client.retry(backoff, response)
client.UploadURL = ""
client.UploadToken = ""
}
}
return fmt.Errorf("Maximum backoff reached")
return fmt.Errorf("Maximum backoff reached")
}

View File

@@ -5,129 +5,129 @@
package duplicacy
import (
"testing"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"testing"
crypto_rand "crypto/rand"
"math/rand"
"io"
"io/ioutil"
crypto_rand "crypto/rand"
"io"
"io/ioutil"
"math/rand"
)
func createB2ClientForTest(t *testing.T) (*B2Client, string) {
config, err := ioutil.ReadFile("test_storage.conf")
if err != nil {
t.Errorf("Failed to read config file: %v", err)
return nil, ""
}
config, err := ioutil.ReadFile("test_storage.conf")
if err != nil {
t.Errorf("Failed to read config file: %v", err)
return nil, ""
}
storages := make(map[string]map[string]string)
storages := make(map[string]map[string]string)
err = json.Unmarshal(config, &storages)
if err != nil {
t.Errorf("Failed to parse config file: %v", err)
return nil, ""
}
err = json.Unmarshal(config, &storages)
if err != nil {
t.Errorf("Failed to parse config file: %v", err)
return nil, ""
}
b2, found := storages["b2"]
if !found {
t.Errorf("Failed to find b2 config")
return nil, ""
}
b2, found := storages["b2"]
if !found {
t.Errorf("Failed to find b2 config")
return nil, ""
}
return NewB2Client(b2["account"], b2["key"]), b2["bucket"]
return NewB2Client(b2["account"], b2["key"]), b2["bucket"]
}
func TestB2Client(t *testing.T) {
b2Client, bucket := createB2ClientForTest(t)
if b2Client == nil {
return
}
b2Client, bucket := createB2ClientForTest(t)
if b2Client == nil {
return
}
b2Client.TestMode = true
b2Client.TestMode = true
err := b2Client.AuthorizeAccount()
if err != nil {
t.Errorf("Failed to authorize the b2 account: %v", err)
return
}
err := b2Client.AuthorizeAccount()
if err != nil {
t.Errorf("Failed to authorize the b2 account: %v", err)
return
}
err = b2Client.FindBucket(bucket)
if err != nil {
t.Errorf("Failed to find bucket '%s': %v", bucket, err)
return
}
err = b2Client.FindBucket(bucket)
if err != nil {
t.Errorf("Failed to find bucket '%s': %v", bucket, err)
return
}
testDirectory := "b2client_test/"
testDirectory := "b2client_test/"
files, err := b2Client.ListFileNames(testDirectory, false, false)
if err != nil {
t.Errorf("Failed to list files: %v", err)
return
}
files, err := b2Client.ListFileNames(testDirectory, false, false)
if err != nil {
t.Errorf("Failed to list files: %v", err)
return
}
for _, file := range files {
err = b2Client.DeleteFile(file.FileName, file.FileID)
if err != nil {
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
}
}
for _, file := range files {
err = b2Client.DeleteFile(file.FileName, file.FileID)
if err != nil {
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
}
}
maxSize := 10000
for i := 0; i < 20; i++ {
size := rand.Int() % maxSize + 1
content := make([]byte, size)
_, err := crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
maxSize := 10000
for i := 0; i < 20; i++ {
size := rand.Int()%maxSize + 1
content := make([]byte, size)
_, err := crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
hash := sha256.Sum256(content)
name := hex.EncodeToString(hash[:])
hash := sha256.Sum256(content)
name := hex.EncodeToString(hash[:])
err = b2Client.UploadFile(testDirectory + name, content, 100)
if err != nil {
t.Errorf("Error uploading file '%s': %v", name, err)
return
}
}
err = b2Client.UploadFile(testDirectory+name, content, 100)
if err != nil {
t.Errorf("Error uploading file '%s': %v", name, err)
return
}
}
files, err = b2Client.ListFileNames(testDirectory, false, false)
if err != nil {
t.Errorf("Failed to list files: %v", err)
return
}
files, err = b2Client.ListFileNames(testDirectory, false, false)
if err != nil {
t.Errorf("Failed to list files: %v", err)
return
}
for _, file := range files {
for _, file := range files {
readCloser, _, err := b2Client.DownloadFile(file.FileName)
if err != nil {
t.Errorf("Error downloading file '%s': %v", file.FileName, err)
return
}
readCloser, _, err := b2Client.DownloadFile(file.FileName)
if err != nil {
t.Errorf("Error downloading file '%s': %v", file.FileName, err)
return
}
defer readCloser.Close()
defer readCloser.Close()
hasher := sha256.New()
_, err = io.Copy(hasher, readCloser)
hasher := sha256.New()
_, err = io.Copy(hasher, readCloser)
hash := hex.EncodeToString(hasher.Sum(nil))
hash := hex.EncodeToString(hasher.Sum(nil))
if testDirectory + hash != file.FileName {
t.Errorf("File %s has hash %s", file.FileName, hash)
}
if testDirectory+hash != file.FileName {
t.Errorf("File %s has hash %s", file.FileName, hash)
}
}
}
for _, file := range files {
err = b2Client.DeleteFile(file.FileName, file.FileID)
if err != nil {
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
}
}
for _, file := range files {
err = b2Client.DeleteFile(file.FileName, file.FileID)
if err != nil {
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
}
}
}

View File

@@ -5,251 +5,251 @@
package duplicacy
import (
"strings"
"strings"
)
type B2Storage struct {
RateLimitedStorage
RateLimitedStorage
clients []*B2Client
clients []*B2Client
}
// CreateB2Storage creates a B2 storage object.
func CreateB2Storage(accountID string, applicationKey string, bucket string, threads int) (storage *B2Storage, err error) {
var clients []*B2Client
var clients []*B2Client
for i := 0; i < threads; i++ {
client := NewB2Client(accountID, applicationKey)
for i := 0; i < threads; i++ {
client := NewB2Client(accountID, applicationKey)
err = client.AuthorizeAccount()
if err != nil {
return nil, err
}
err = client.AuthorizeAccount()
if err != nil {
return nil, err
}
err = client.FindBucket(bucket)
if err != nil {
return nil, err
}
err = client.FindBucket(bucket)
if err != nil {
return nil, err
}
clients = append(clients, client)
}
clients = append(clients, client)
}
storage = &B2Storage {
clients: clients,
}
return storage, nil
storage = &B2Storage{
clients: clients,
}
return storage, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
dir = dir[:len(dir) - 1]
}
length := len(dir) + 1
for len(dir) > 0 && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}
length := len(dir) + 1
includeVersions := false
if dir == "chunks" {
includeVersions = true
}
includeVersions := false
if dir == "chunks" {
includeVersions = true
}
entries, err := storage.clients[threadIndex].ListFileNames(dir, false, includeVersions)
if err != nil {
return nil, nil, err
}
entries, err := storage.clients[threadIndex].ListFileNames(dir, false, includeVersions)
if err != nil {
return nil, nil, err
}
if dir == "snapshots" {
if dir == "snapshots" {
subDirs := make(map[string]bool)
subDirs := make(map[string]bool)
for _, entry := range entries {
name := entry.FileName[length:]
subDir := strings.Split(name, "/")[0]
subDirs[subDir + "/"] = true
}
for _, entry := range entries {
name := entry.FileName[length:]
subDir := strings.Split(name, "/")[0]
subDirs[subDir+"/"] = true
}
for subDir, _ := range subDirs {
files = append(files, subDir)
}
} else if dir == "chunks" {
lastFile := ""
for _, entry := range entries {
if entry.FileName == lastFile {
continue
}
lastFile = entry.FileName
if entry.Action == "hide" {
files = append(files, entry.FileName[length:] + ".fsl")
} else {
files = append(files, entry.FileName[length:])
}
sizes = append(sizes, entry.Size)
}
} else {
for _, entry := range entries {
files = append(files, entry.FileName[length:])
}
}
for subDir, _ := range subDirs {
files = append(files, subDir)
}
} else if dir == "chunks" {
lastFile := ""
for _, entry := range entries {
if entry.FileName == lastFile {
continue
}
lastFile = entry.FileName
if entry.Action == "hide" {
files = append(files, entry.FileName[length:]+".fsl")
} else {
files = append(files, entry.FileName[length:])
}
sizes = append(sizes, entry.Size)
}
} else {
for _, entry := range entries {
files = append(files, entry.FileName[length:])
}
}
return files, sizes, nil
return files, sizes, nil
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err error) {
if strings.HasSuffix(filePath, ".fsl") {
filePath = filePath[:len(filePath) - len(".fsl")]
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
if err != nil {
return err
}
if strings.HasSuffix(filePath, ".fsl") {
filePath = filePath[:len(filePath)-len(".fsl")]
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
if err != nil {
return err
}
toBeDeleted := false
toBeDeleted := false
for _, entry := range entries {
if entry.FileName != filePath || (!toBeDeleted && entry.Action != "hide" ) {
continue
}
for _, entry := range entries {
if entry.FileName != filePath || (!toBeDeleted && entry.Action != "hide") {
continue
}
toBeDeleted = true
toBeDeleted = true
err = storage.clients[threadIndex].DeleteFile(filePath, entry.FileID)
if err != nil {
return err
}
}
err = storage.clients[threadIndex].DeleteFile(filePath, entry.FileID)
if err != nil {
return err
}
}
return nil
return nil
} else {
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, false)
if err != nil {
return err
}
} else {
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, false)
if err != nil {
return err
}
if len(entries) == 0 {
return nil
}
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
}
if len(entries) == 0 {
return nil
}
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
}
}
// MoveFile renames the file.
func (storage *B2Storage) MoveFile(threadIndex int, from string, to string) (err error) {
filePath := ""
filePath := ""
if strings.HasSuffix(from, ".fsl") {
filePath = to
if from != to + ".fsl" {
filePath = ""
}
} else if strings.HasSuffix(to, ".fsl") {
filePath = from
if to != from + ".fsl" {
filePath = ""
}
}
if strings.HasSuffix(from, ".fsl") {
filePath = to
if from != to+".fsl" {
filePath = ""
}
} else if strings.HasSuffix(to, ".fsl") {
filePath = from
if to != from+".fsl" {
filePath = ""
}
}
if filePath == "" {
LOG_FATAL("STORAGE_MOVE", "Moving file '%s' to '%s' is not supported", from, to)
return nil
}
if filePath == "" {
LOG_FATAL("STORAGE_MOVE", "Moving file '%s' to '%s' is not supported", from, to)
return nil
}
if filePath == from {
_, err = storage.clients[threadIndex].HideFile(from)
return err
} else {
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
if err != nil {
return err
}
if len(entries) == 0 || entries[0].FileName != filePath || entries[0].Action != "hide" {
return nil
}
if filePath == from {
_, err = storage.clients[threadIndex].HideFile(from)
return err
} else {
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
if err != nil {
return err
}
if len(entries) == 0 || entries[0].FileName != filePath || entries[0].Action != "hide" {
return nil
}
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
}
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
}
}
// CreateDirectory creates a new directory.
func (storage *B2Storage) CreateDirectory(threadIndex int, dir string) (err error) {
return nil
return nil
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
isFossil := false
if strings.HasSuffix(filePath, ".fsl") {
isFossil = true
filePath = filePath[:len(filePath) - len(".fsl")]
}
isFossil := false
if strings.HasSuffix(filePath, ".fsl") {
isFossil = true
filePath = filePath[:len(filePath)-len(".fsl")]
}
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, isFossil)
if err != nil {
return false, false, 0, err
}
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, isFossil)
if err != nil {
return false, false, 0, err
}
if len(entries) == 0 || entries[0].FileName != filePath {
return false, false, 0, nil
}
if len(entries) == 0 || entries[0].FileName != filePath {
return false, false, 0, nil
}
if isFossil {
if entries[0].Action == "hide" {
return true, false, entries[0].Size, nil
} else {
return false, false, 0, nil
}
}
return true, false, entries[0].Size, nil
if isFossil {
if entries[0].Action == "hide" {
return true, false, entries[0].Size, nil
} else {
return false, false, 0, nil
}
}
return true, false, entries[0].Size, nil
}
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
// the suffix '.fsl'.
func (storage *B2Storage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
filePath = "chunks/" + chunkID
if isFossil {
filePath += ".fsl"
}
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
return filePath, exist, size, err
filePath = "chunks/" + chunkID
if isFossil {
filePath += ".fsl"
}
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
return filePath, exist, size, err
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
readCloser, _, err := storage.clients[threadIndex].DownloadFile(filePath)
if err != nil {
return err
}
readCloser, _, err := storage.clients[threadIndex].DownloadFile(filePath)
if err != nil {
return err
}
defer readCloser.Close()
defer readCloser.Close()
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.clients))
return err
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.clients))
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
return storage.clients[threadIndex].UploadFile(filePath, content, storage.UploadRateLimit / len(storage.clients))
return storage.clients[threadIndex].UploadFile(filePath, content, storage.UploadRateLimit/len(storage.clients))
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *B2Storage) IsCacheNeeded() (bool) { return true }
func (storage *B2Storage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *B2Storage) IsMoveFileImplemented() (bool) { return true }
func (storage *B2Storage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *B2Storage) IsStrongConsistent() (bool) { return true }
func (storage *B2Storage) IsStrongConsistent() bool { return true }
// If the storage supports fast listing of files names.
func (storage *B2Storage) IsFastListing() (bool) { return true }
func (storage *B2Storage) IsFastListing() bool { return true }
// Enable the test mode.
func (storage *B2Storage) EnableTestMode() {
for _, client := range storage.clients {
client.TestMode = true
}
for _, client := range storage.clients {
client.TestMode = true
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -5,339 +5,338 @@
package duplicacy
import (
"os"
"io"
"path"
"testing"
"math/rand"
"encoding/hex"
"time"
"crypto/sha256"
crypto_rand "crypto/rand"
crypto_rand "crypto/rand"
"crypto/sha256"
"encoding/hex"
"io"
"math/rand"
"os"
"path"
"testing"
"time"
"runtime/debug"
"runtime/debug"
)
func createRandomFile(path string, maxSize int) {
file, err := os.OpenFile(path, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0644)
if err != nil {
LOG_ERROR("RANDOM_FILE", "Can't open %s for writing: %v", path, err)
return
}
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
LOG_ERROR("RANDOM_FILE", "Can't open %s for writing: %v", path, err)
return
}
defer file.Close()
defer file.Close()
size := maxSize / 2 + rand.Int() % (maxSize / 2)
size := maxSize/2 + rand.Int()%(maxSize/2)
buffer := make([]byte, 32 * 1024)
for size > 0 {
bytes := size
if bytes > cap(buffer) {
bytes = cap(buffer)
}
crypto_rand.Read(buffer[:bytes])
bytes, err = file.Write(buffer[:bytes])
if err != nil {
LOG_ERROR("RANDOM_FILE", "Failed to write to %s: %v", path, err)
return
}
size -= bytes
}
buffer := make([]byte, 32*1024)
for size > 0 {
bytes := size
if bytes > cap(buffer) {
bytes = cap(buffer)
}
crypto_rand.Read(buffer[:bytes])
bytes, err = file.Write(buffer[:bytes])
if err != nil {
LOG_ERROR("RANDOM_FILE", "Failed to write to %s: %v", path, err)
return
}
size -= bytes
}
}
func modifyFile(path string, portion float32) {
stat, err := os.Stat(path)
if err != nil {
LOG_ERROR("MODIFY_FILE", "Can't stat the file %s: %v", path, err)
return
}
stat, err := os.Stat(path)
if err != nil {
LOG_ERROR("MODIFY_FILE", "Can't stat the file %s: %v", path, err)
return
}
modifiedTime := stat.ModTime()
modifiedTime := stat.ModTime()
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
if err != nil {
LOG_ERROR("MODIFY_FILE", "Can't open %s for writing: %v", path, err)
return
}
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
if err != nil {
LOG_ERROR("MODIFY_FILE", "Can't open %s for writing: %v", path, err)
return
}
defer func() {
if file != nil {
file.Close()
}
} ()
defer func() {
if file != nil {
file.Close()
}
}()
size, err := file.Seek(0, 2)
if err != nil {
LOG_ERROR("MODIFY_FILE", "Can't seek to the end of the file %s: %v", path, err)
return
}
size, err := file.Seek(0, 2)
if err != nil {
LOG_ERROR("MODIFY_FILE", "Can't seek to the end of the file %s: %v", path, err)
return
}
length := int (float32(size) * portion)
start := rand.Int() % (int(size) - length)
length := int(float32(size) * portion)
start := rand.Int() % (int(size) - length)
_, err = file.Seek(int64(start), 0)
if err != nil {
LOG_ERROR("MODIFY_FILE", "Can't seek to the offset %d: %v", start, err)
return
}
_, err = file.Seek(int64(start), 0)
if err != nil {
LOG_ERROR("MODIFY_FILE", "Can't seek to the offset %d: %v", start, err)
return
}
buffer := make([]byte, length)
crypto_rand.Read(buffer)
buffer := make([]byte, length)
crypto_rand.Read(buffer)
_, err = file.Write(buffer)
if err != nil {
LOG_ERROR("MODIFY_FILE", "Failed to write to %s: %v", path, err)
return
}
_, err = file.Write(buffer)
if err != nil {
LOG_ERROR("MODIFY_FILE", "Failed to write to %s: %v", path, err)
return
}
file.Close()
file = nil
file.Close()
file = nil
// Add 2 seconds to the modified time for the changes to be detectable in quick mode.
modifiedTime = modifiedTime.Add(time.Second * 2)
err = os.Chtimes(path, modifiedTime, modifiedTime)
// Add 2 seconds to the modified time for the changes to be detectable in quick mode.
modifiedTime = modifiedTime.Add(time.Second * 2)
err = os.Chtimes(path, modifiedTime, modifiedTime)
if err != nil {
LOG_ERROR("MODIFY_FILE", "Failed to change the modification time of %s: %v", path, err)
return
}
if err != nil {
LOG_ERROR("MODIFY_FILE", "Failed to change the modification time of %s: %v", path, err)
return
}
}
func checkExistence(t *testing.T, path string, exists bool, isDir bool) {
stat, err := os.Stat(path)
if exists {
if err != nil {
t.Errorf("%s does not exist: %v", path, err)
} else if isDir {
if !stat.Mode().IsDir() {
t.Errorf("%s is not a directory", path)
}
} else {
if stat.Mode().IsDir() {
t.Errorf("%s is not a file", path)
}
}
} else {
if err == nil || !os.IsNotExist(err) {
t.Errorf("%s may exist: %v", path, err)
}
}
stat, err := os.Stat(path)
if exists {
if err != nil {
t.Errorf("%s does not exist: %v", path, err)
} else if isDir {
if !stat.Mode().IsDir() {
t.Errorf("%s is not a directory", path)
}
} else {
if stat.Mode().IsDir() {
t.Errorf("%s is not a file", path)
}
}
} else {
if err == nil || !os.IsNotExist(err) {
t.Errorf("%s may exist: %v", path, err)
}
}
}
func truncateFile(path string) {
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
if err != nil {
LOG_ERROR("TRUNCATE_FILE", "Can't open %s for writing: %v", path, err)
return
}
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
if err != nil {
LOG_ERROR("TRUNCATE_FILE", "Can't open %s for writing: %v", path, err)
return
}
defer file.Close()
defer file.Close()
oldSize, err := file.Seek(0, 2)
if err != nil {
LOG_ERROR("TRUNCATE_FILE", "Can't seek to the end of the file %s: %v", path, err)
return
}
oldSize, err := file.Seek(0, 2)
if err != nil {
LOG_ERROR("TRUNCATE_FILE", "Can't seek to the end of the file %s: %v", path, err)
return
}
newSize := rand.Int63() % oldSize
newSize := rand.Int63() % oldSize
err = file.Truncate(newSize)
if err != nil {
LOG_ERROR("TRUNCATE_FILE", "Can't truncate the file %s to size %d: %v", path, newSize, err)
return
}
err = file.Truncate(newSize)
if err != nil {
LOG_ERROR("TRUNCATE_FILE", "Can't truncate the file %s to size %d: %v", path, newSize, err)
return
}
}
func getFileHash(path string) (hash string) {
file, err := os.Open(path)
if err != nil {
LOG_ERROR("FILE_HASH", "Can't open %s for reading: %v", path, err)
return ""
}
file, err := os.Open(path)
if err != nil {
LOG_ERROR("FILE_HASH", "Can't open %s for reading: %v", path, err)
return ""
}
defer file.Close()
defer file.Close()
hasher := sha256.New()
_, err = io.Copy(hasher, file)
if err != nil {
LOG_ERROR("FILE_HASH", "Can't read file %s: %v", path, err)
return ""
}
hasher := sha256.New()
_, err = io.Copy(hasher, file)
if err != nil {
LOG_ERROR("FILE_HASH", "Can't read file %s: %v", path, err)
return ""
}
return hex.EncodeToString(hasher.Sum(nil))
return hex.EncodeToString(hasher.Sum(nil))
}
func TestBackupManager(t *testing.T) {
rand.Seed(time.Now().UnixNano())
setTestingT(t)
SetLoggingLevel(INFO)
rand.Seed(time.Now().UnixNano())
setTestingT(t)
SetLoggingLevel(INFO)
defer func() {
if r := recover(); r != nil {
switch e := r.(type) {
case Exception:
t.Errorf("%s %s", e.LogID, e.Message)
debug.PrintStack()
default:
t.Errorf("%v", e)
debug.PrintStack()
}
}
} ()
defer func() {
if r := recover(); r != nil {
switch e := r.(type) {
case Exception:
t.Errorf("%s %s", e.LogID, e.Message)
debug.PrintStack()
default:
t.Errorf("%v", e)
debug.PrintStack()
}
}
}()
testDir := path.Join(os.TempDir(), "duplicacy_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
testDir := path.Join(os.TempDir(), "duplicacy_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
os.Mkdir(testDir + "/repository1", 0700)
os.Mkdir(testDir + "/repository1/dir1", 0700)
os.Mkdir(testDir + "/repository1/.duplicacy", 0700)
os.Mkdir(testDir + "/repository2", 0700)
os.Mkdir(testDir + "/repository2/.duplicacy", 0700)
os.Mkdir(testDir+"/repository1", 0700)
os.Mkdir(testDir+"/repository1/dir1", 0700)
os.Mkdir(testDir+"/repository1/.duplicacy", 0700)
os.Mkdir(testDir+"/repository2", 0700)
os.Mkdir(testDir+"/repository2/.duplicacy", 0700)
maxFileSize := 1000000
//maxFileSize := 200000
maxFileSize := 1000000
//maxFileSize := 200000
createRandomFile(testDir + "/repository1/file1", maxFileSize)
createRandomFile(testDir + "/repository1/file2", maxFileSize)
createRandomFile(testDir + "/repository1/dir1/file3", maxFileSize)
createRandomFile(testDir+"/repository1/file1", maxFileSize)
createRandomFile(testDir+"/repository1/file2", maxFileSize)
createRandomFile(testDir+"/repository1/dir1/file3", maxFileSize)
threads := 1
threads := 1
storage, err := loadStorage(testDir + "/storage", threads)
if err != nil {
t.Errorf("Failed to create storage: %v", err)
return
}
storage, err := loadStorage(testDir+"/storage", threads)
if err != nil {
t.Errorf("Failed to create storage: %v", err)
return
}
delay := 0
if _, ok := storage.(*ACDStorage); ok {
delay = 1
}
if _, ok := storage.(*OneDriveStorage); ok {
delay = 5
}
delay := 0
if _, ok := storage.(*ACDStorage); ok {
delay = 1
}
if _, ok := storage.(*OneDriveStorage); ok {
delay = 5
}
password := "duplicacy"
password := "duplicacy"
cleanStorage(storage)
cleanStorage(storage)
time.Sleep(time.Duration(delay) * time.Second)
if testFixedChunkSize {
if !ConfigStorage(storage, 100, 64 * 1024, 64 * 1024, 64 * 1024, password, nil) {
t.Errorf("Failed to initialize the storage")
}
} else {
if !ConfigStorage(storage, 100, 64 * 1024, 256 * 1024, 16 * 1024, password, nil) {
t.Errorf("Failed to initialize the storage")
}
}
time.Sleep(time.Duration(delay) * time.Second)
if testFixedChunkSize {
if !ConfigStorage(storage, 100, 64*1024, 64*1024, 64*1024, password, nil) {
t.Errorf("Failed to initialize the storage")
}
} else {
if !ConfigStorage(storage, 100, 64*1024, 256*1024, 16*1024, password, nil) {
t.Errorf("Failed to initialize the storage")
}
}
time.Sleep(time.Duration(delay) * time.Second)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager := CreateBackupManager("host1", storage, testDir, password)
backupManager.SetupSnapshotCache("default")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager := CreateBackupManager("host1", storage, testDir, password)
backupManager.SetupSnapshotCache("default")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*showStatistics=*/, false /*patterns=*/, nil)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "first", false, false)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir + "/repository2", threads, /*inPlace=*/false, /*quickMode=*/false, threads, /*overwrite=*/true,
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
for _, f := range []string{"file1", "file2", "dir1/file3"} {
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
t.Errorf("File %s does not exist", f)
continue
}
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
t.Errorf("File %s does not exist", f)
continue
}
hash1 := getFileHash(testDir + "/repository1/" + f)
hash2 := getFileHash(testDir + "/repository2/" + f)
if hash1 != hash2 {
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
}
}
hash1 := getFileHash(testDir + "/repository1/" + f)
hash2 := getFileHash(testDir + "/repository2/" + f)
if hash1 != hash2 {
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
}
}
modifyFile(testDir+"/repository1/file1", 0.1)
modifyFile(testDir+"/repository1/file2", 0.2)
modifyFile(testDir+"/repository1/dir1/file3", 0.3)
modifyFile(testDir + "/repository1/file1", 0.1)
modifyFile(testDir + "/repository1/file2", 0.2)
modifyFile(testDir + "/repository1/dir1/file3", 0.3)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*showStatistics=*/, false /*patterns=*/, nil)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "second", false, false)
time.Sleep(time.Duration(delay) * time.Second)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir + "/repository2", 2, /*inPlace=*/true, /*quickMode=*/true, threads, /*overwrite=*/true,
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f)
hash2 := getFileHash(testDir + "/repository2/" + f)
if hash1 != hash2 {
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
}
}
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
hash1 := getFileHash(testDir + "/repository1/" + f)
hash2 := getFileHash(testDir + "/repository2/" + f)
if hash1 != hash2 {
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
}
}
// Truncate file2 and add a few empty directories
truncateFile(testDir + "/repository1/file2")
os.Mkdir(testDir+"/repository1/dir2", 0700)
os.Mkdir(testDir+"/repository1/dir2/dir3", 0700)
os.Mkdir(testDir+"/repository1/dir4", 0700)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "third", false, false)
time.Sleep(time.Duration(delay) * time.Second)
// Truncate file2 and add a few empty directories
truncateFile(testDir + "/repository1/file2")
os.Mkdir(testDir + "/repository1/dir2", 0700)
os.Mkdir(testDir + "/repository1/dir2/dir3", 0700)
os.Mkdir(testDir + "/repository1/dir4", 0700)
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Backup(testDir + "/repository1", /*quickMode=*/false, threads, "third", false, false)
time.Sleep(time.Duration(delay) * time.Second)
// Create some directories and files under repository2 that will be deleted during restore
os.Mkdir(testDir+"/repository2/dir5", 0700)
os.Mkdir(testDir+"/repository2/dir5/dir6", 0700)
os.Mkdir(testDir+"/repository2/dir7", 0700)
createRandomFile(testDir+"/repository2/file4", 100)
createRandomFile(testDir+"/repository2/dir5/file5", 100)
// Create some directories and files under repository2 that will be deleted during restore
os.Mkdir(testDir + "/repository2/dir5", 0700)
os.Mkdir(testDir + "/repository2/dir5/dir6", 0700)
os.Mkdir(testDir + "/repository2/dir7", 0700)
createRandomFile(testDir + "/repository2/file4", 100)
createRandomFile(testDir + "/repository2/dir5/file5", 100)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ true /*showStatistics=*/, false /*patterns=*/, nil)
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
backupManager.Restore(testDir + "/repository2", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
/*deleteMode=*/true, /*showStatistics=*/false, /*patterns=*/nil)
for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f)
hash2 := getFileHash(testDir + "/repository2/" + f)
if hash1 != hash2 {
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
}
}
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
hash1 := getFileHash(testDir + "/repository1/" + f)
hash2 := getFileHash(testDir + "/repository2/" + f)
if hash1 != hash2 {
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
}
}
// These files/dirs should not exist because deleteMode == true
checkExistence(t, testDir+"/repository2/dir5", false, false)
checkExistence(t, testDir+"/repository2/dir5/dir6", false, false)
checkExistence(t, testDir+"/repository2/dir7", false, false)
checkExistence(t, testDir+"/repository2/file4", false, false)
checkExistence(t, testDir+"/repository2/dir5/file5", false, false)
// These files/dirs should not exist because deleteMode == true
checkExistence(t, testDir + "/repository2/dir5", false, false);
checkExistence(t, testDir + "/repository2/dir5/dir6", false, false);
checkExistence(t, testDir + "/repository2/dir7", false, false);
checkExistence(t, testDir + "/repository2/file4", false, false);
checkExistence(t, testDir + "/repository2/dir5/file5", false, false);
// These empty dirs should exist
checkExistence(t, testDir+"/repository2/dir2", true, true)
checkExistence(t, testDir+"/repository2/dir2/dir3", true, true)
checkExistence(t, testDir+"/repository2/dir4", true, true)
// These empty dirs should exist
checkExistence(t, testDir + "/repository2/dir2", true, true);
checkExistence(t, testDir + "/repository2/dir2/dir3", true, true);
checkExistence(t, testDir + "/repository2/dir4", true, true);
// Remove file2 and dir1/file3 and restore them from revision 3
os.Remove(testDir + "/repository1/file2")
os.Remove(testDir + "/repository1/dir1/file3")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
/*deleteMode=*/ false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"})
// Remove file2 and dir1/file3 and restore them from revision 3
os.Remove(testDir + "/repository1/file2")
os.Remove(testDir + "/repository1/dir1/file3")
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
backupManager.Restore(testDir + "/repository1", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/[]string{"+file2", "+dir1/file3", "-*"})
for _, f := range []string{"file1", "file2", "dir1/file3"} {
hash1 := getFileHash(testDir + "/repository1/" + f)
hash2 := getFileHash(testDir + "/repository2/" + f)
if hash1 != hash2 {
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
}
}
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
hash1 := getFileHash(testDir + "/repository1/" + f)
hash2 := getFileHash(testDir + "/repository2/" + f)
if hash1 != hash2 {
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
}
}
/*buf := make([]byte, 1<<16)
runtime.Stack(buf, true)
fmt.Printf("%s", buf)*/
/*buf := make([]byte, 1<<16)
runtime.Stack(buf, true)
fmt.Printf("%s", buf)*/
}

View File

@@ -5,59 +5,58 @@
package duplicacy
import (
"io"
"fmt"
"hash"
"bytes"
"runtime"
"crypto/cipher"
"crypto/aes"
"crypto/rand"
"encoding/hex"
"compress/zlib"
"github.com/bkaradzic/go-lz4"
"bytes"
"compress/zlib"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/hex"
"fmt"
"hash"
"io"
"runtime"
"github.com/bkaradzic/go-lz4"
)
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
// we maintain a pool of previously used buffers.
var chunkBufferPool chan *bytes.Buffer = make(chan *bytes.Buffer, runtime.NumCPU() * 16)
var chunkBufferPool chan *bytes.Buffer = make(chan *bytes.Buffer, runtime.NumCPU()*16)
func AllocateChunkBuffer() (buffer *bytes.Buffer) {
select {
case buffer = <- chunkBufferPool:
default:
buffer = new(bytes.Buffer)
}
return buffer
select {
case buffer = <-chunkBufferPool:
default:
buffer = new(bytes.Buffer)
}
return buffer
}
func ReleaseChunkBuffer(buffer *bytes.Buffer) {
select {
case chunkBufferPool <- buffer:
default:
LOG_INFO("CHUNK_BUFFER", "Discarding a free chunk buffer due to a full pool")
}
select {
case chunkBufferPool <- buffer:
default:
LOG_INFO("CHUNK_BUFFER", "Discarding a free chunk buffer due to a full pool")
}
}
// Chunk is the object being passed between the chunk maker, the chunk uploader, and chunk downloader. It can be
// read and written like a bytes.Buffer, and provides convenient functions to calculate the hash and id of the chunk.
type Chunk struct {
buffer *bytes.Buffer // Where the actual data is stored. It may be nil for hash-only chunks, where chunks
// are only used to compute the hashes
buffer *bytes.Buffer // Where the actual data is stored. It may be nil for hash-only chunks, where chunks
// are only used to compute the hashes
size int // The size of data stored. This field is needed if buffer is nil
size int // The size of data stored. This field is needed if buffer is nil
hasher hash.Hash // Keeps track of the hash of data stored in the buffer. It may be nil, since sometimes
// it isn't necessary to compute the hash, for instance, when the encrypted data is being
// read into the primary buffer
hasher hash.Hash // Keeps track of the hash of data stored in the buffer. It may be nil, since sometimes
// it isn't necessary to compute the hash, for instance, when the encrypted data is being
// read into the primary buffer
hash []byte // The hash of the chunk data. It is always in the binary format
id string // The id of the chunk data (used as the file name for saving the chunk); always in hex format
hash []byte // The hash of the chunk data. It is always in the binary format
id string // The id of the chunk data (used as the file name for saving the chunk); always in hex format
config *Config // Every chunk is associated with a Config object. Which hashing algorithm to use is determined
// by the config
config *Config // Every chunk is associated with a Config object. Which hashing algorithm to use is determined
// by the config
}
// Magic word to identify a duplicacy format encrypted file, plus a version number.
@@ -66,205 +65,205 @@ var ENCRYPTION_HEADER = "duplicacy\000"
// CreateChunk creates a new chunk.
func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
var buffer *bytes.Buffer
var buffer *bytes.Buffer
if bufferNeeded {
buffer = AllocateChunkBuffer()
buffer.Reset()
if buffer.Cap() < config.MaximumChunkSize {
buffer.Grow(config.MaximumChunkSize - buffer.Cap())
}
}
if bufferNeeded {
buffer = AllocateChunkBuffer()
buffer.Reset()
if buffer.Cap() < config.MaximumChunkSize {
buffer.Grow(config.MaximumChunkSize - buffer.Cap())
}
}
return &Chunk {
buffer : buffer,
config : config,
}
return &Chunk{
buffer: buffer,
config: config,
}
}
// GetLength returns the length of available data
func (chunk *Chunk) GetLength() int {
if chunk.buffer != nil {
return len(chunk.buffer.Bytes())
} else {
return chunk.size
}
if chunk.buffer != nil {
return len(chunk.buffer.Bytes())
} else {
return chunk.size
}
}
// GetBytes returns data available in this chunk
func (chunk *Chunk) GetBytes() [] byte {
return chunk.buffer.Bytes()
func (chunk *Chunk) GetBytes() []byte {
return chunk.buffer.Bytes()
}
// Reset makes the chunk reusable by clearing the existing data in the buffers. 'hashNeeded' indicates whether the
// hash of the new data to be read is needed. If the data to be read in is encrypted, there is no need to
// calculate the hash so hashNeeded should be 'false'.
func (chunk *Chunk) Reset(hashNeeded bool) {
if chunk.buffer != nil {
chunk.buffer.Reset()
}
if hashNeeded {
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
} else {
chunk.hasher = nil
}
chunk.hash = nil
chunk.id = ""
chunk.size = 0
if chunk.buffer != nil {
chunk.buffer.Reset()
}
if hashNeeded {
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
} else {
chunk.hasher = nil
}
chunk.hash = nil
chunk.id = ""
chunk.size = 0
}
// Write implements the Writer interface.
func (chunk *Chunk) Write(p []byte) (int, error){
func (chunk *Chunk) Write(p []byte) (int, error) {
// buffer may be nil, when the chunk is used for computing the hash only.
if chunk.buffer == nil {
chunk.size += len(p)
} else {
chunk.buffer.Write(p)
}
// buffer may be nil, when the chunk is used for computing the hash only.
if chunk.buffer == nil {
chunk.size += len(p)
} else {
chunk.buffer.Write(p)
}
// hasher may be nil, when the chunk is used to stored encrypted content
if chunk.hasher != nil {
chunk.hasher.Write(p)
}
return len(p), nil
// hasher may be nil, when the chunk is used to stored encrypted content
if chunk.hasher != nil {
chunk.hasher.Write(p)
}
return len(p), nil
}
// GetHash returns the chunk hash.
func (chunk *Chunk) GetHash() string {
if (len(chunk.hash) == 0) {
chunk.hash = chunk.hasher.Sum(nil)
}
if len(chunk.hash) == 0 {
chunk.hash = chunk.hasher.Sum(nil)
}
return string(chunk.hash)
return string(chunk.hash)
}
// GetID returns the chunk id.
func (chunk *Chunk) GetID() string {
if len(chunk.id) == 0 {
if len(chunk.hash) == 0 {
chunk.hash = chunk.hasher.Sum(nil)
}
if len(chunk.id) == 0 {
if len(chunk.hash) == 0 {
chunk.hash = chunk.hasher.Sum(nil)
}
hasher := chunk.config.NewKeyedHasher(chunk.config.IDKey)
hasher.Write([]byte(chunk.hash))
chunk.id = hex.EncodeToString(hasher.Sum(nil))
}
hasher := chunk.config.NewKeyedHasher(chunk.config.IDKey)
hasher.Write([]byte(chunk.hash))
chunk.id = hex.EncodeToString(hasher.Sum(nil))
}
return chunk.id
return chunk.id
}
func (chunk *Chunk) VerifyID() {
hasher := chunk.config.NewKeyedHasher(chunk.config.HashKey)
hasher.Write(chunk.buffer.Bytes())
hash := hasher.Sum(nil)
hasher = chunk.config.NewKeyedHasher(chunk.config.IDKey)
hasher.Write([]byte(hash))
chunkID := hex.EncodeToString(hasher.Sum(nil))
if chunkID != chunk.GetID() {
LOG_ERROR("CHUNK_ID", "The chunk id should be %s instead of %s, length: %d", chunkID, chunk.GetID(), len(chunk.buffer.Bytes()))
}
hasher := chunk.config.NewKeyedHasher(chunk.config.HashKey)
hasher.Write(chunk.buffer.Bytes())
hash := hasher.Sum(nil)
hasher = chunk.config.NewKeyedHasher(chunk.config.IDKey)
hasher.Write([]byte(hash))
chunkID := hex.EncodeToString(hasher.Sum(nil))
if chunkID != chunk.GetID() {
LOG_ERROR("CHUNK_ID", "The chunk id should be %s instead of %s, length: %d", chunkID, chunk.GetID(), len(chunk.buffer.Bytes()))
}
}
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err error) {
var aesBlock cipher.Block
var gcm cipher.AEAD
var nonce []byte
var offset int
var aesBlock cipher.Block
var gcm cipher.AEAD
var nonce []byte
var offset int
encryptedBuffer := AllocateChunkBuffer()
encryptedBuffer.Reset()
defer func() {
ReleaseChunkBuffer(encryptedBuffer)
} ()
encryptedBuffer := AllocateChunkBuffer()
encryptedBuffer.Reset()
defer func() {
ReleaseChunkBuffer(encryptedBuffer)
}()
if len(encryptionKey) > 0 {
if len(encryptionKey) > 0 {
key := encryptionKey
key := encryptionKey
if len(derivationKey) > 0 {
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
hasher.Write(encryptionKey)
key = hasher.Sum(nil)
}
if len(derivationKey) > 0 {
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
hasher.Write(encryptionKey)
key = hasher.Sum(nil)
}
aesBlock, err = aes.NewCipher(key)
if err != nil {
return err
}
aesBlock, err = aes.NewCipher(key)
if err != nil {
return err
}
gcm, err = cipher.NewGCM(aesBlock)
if err != nil {
return err
}
gcm, err = cipher.NewGCM(aesBlock)
if err != nil {
return err
}
// Start with the magic number and the version number.
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER))
// Start with the magic number and the version number.
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER))
// Followed by the nonce
nonce = make([]byte, gcm.NonceSize())
_, err := rand.Read(nonce)
if err != nil {
return err
}
encryptedBuffer.Write(nonce)
offset = encryptedBuffer.Len()
// Followed by the nonce
nonce = make([]byte, gcm.NonceSize())
_, err := rand.Read(nonce)
if err != nil {
return err
}
encryptedBuffer.Write(nonce)
offset = encryptedBuffer.Len()
}
}
// offset is either 0 or the length of header + nonce
// offset is either 0 or the length of header + nonce
if chunk.config.CompressionLevel >= -1 && chunk.config.CompressionLevel <= 9 {
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
deflater.Write(chunk.buffer.Bytes())
deflater.Close()
} else if chunk.config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
encryptedBuffer.Write([]byte("LZ4 "))
// Make sure we have enough space in encryptedBuffer
availableLength := encryptedBuffer.Cap() - len(encryptedBuffer.Bytes())
maximumLength := lz4.CompressBound(len(chunk.buffer.Bytes()))
if availableLength < maximumLength {
encryptedBuffer.Grow(maximumLength - availableLength)
}
written, err := lz4.Encode(encryptedBuffer.Bytes()[offset + 4:], chunk.buffer.Bytes())
if err != nil {
return fmt.Errorf("LZ4 compression error: %v", err)
}
// written is actually encryptedBuffer[offset + 4:], but we need to move the write pointer
// and this seems to be the only way
encryptedBuffer.Write(written)
} else {
return fmt.Errorf("Invalid compression level: %d", chunk.config.CompressionLevel)
}
if chunk.config.CompressionLevel >= -1 && chunk.config.CompressionLevel <= 9 {
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
deflater.Write(chunk.buffer.Bytes())
deflater.Close()
} else if chunk.config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
encryptedBuffer.Write([]byte("LZ4 "))
// Make sure we have enough space in encryptedBuffer
availableLength := encryptedBuffer.Cap() - len(encryptedBuffer.Bytes())
maximumLength := lz4.CompressBound(len(chunk.buffer.Bytes()))
if availableLength < maximumLength {
encryptedBuffer.Grow(maximumLength - availableLength)
}
written, err := lz4.Encode(encryptedBuffer.Bytes()[offset+4:], chunk.buffer.Bytes())
if err != nil {
return fmt.Errorf("LZ4 compression error: %v", err)
}
// written is actually encryptedBuffer[offset + 4:], but we need to move the write pointer
// and this seems to be the only way
encryptedBuffer.Write(written)
} else {
return fmt.Errorf("Invalid compression level: %d", chunk.config.CompressionLevel)
}
if len(encryptionKey) == 0 {
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
return nil
}
if len(encryptionKey) == 0 {
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
return nil
}
// PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes
// to be the maximum allowed by PKCS7
dataLength := encryptedBuffer.Len() - offset
paddingLength := dataLength % 256
if paddingLength == 0 {
paddingLength = 256
}
// PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes
// to be the maximum allowed by PKCS7
dataLength := encryptedBuffer.Len() - offset
paddingLength := dataLength % 256
if paddingLength == 0 {
paddingLength = 256
}
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
// The encrypted data will be appended to the duplicacy header and the once.
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
encryptedBuffer.Bytes()[offset: offset + dataLength + paddingLength], nil)
// The encrypted data will be appended to the duplicacy header and the once.
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
encryptedBuffer.Bytes()[offset:offset+dataLength+paddingLength], nil)
encryptedBuffer.Truncate(len(encryptedBytes))
encryptedBuffer.Truncate(len(encryptedBytes))
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
return nil
return nil
}
@@ -272,111 +271,110 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err error) {
var offset int
var offset int
encryptedBuffer := AllocateChunkBuffer()
encryptedBuffer.Reset()
defer func() {
ReleaseChunkBuffer(encryptedBuffer)
} ()
encryptedBuffer := AllocateChunkBuffer()
encryptedBuffer.Reset()
defer func() {
ReleaseChunkBuffer(encryptedBuffer)
}()
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
if len(encryptionKey) > 0 {
if len(encryptionKey) > 0 {
key := encryptionKey
key := encryptionKey
if len(derivationKey) > 0 {
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
hasher.Write(encryptionKey)
key = hasher.Sum(nil)
}
if len(derivationKey) > 0 {
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
hasher.Write(encryptionKey)
key = hasher.Sum(nil)
}
aesBlock, err := aes.NewCipher(key)
if err != nil {
return err
}
aesBlock, err := aes.NewCipher(key)
if err != nil {
return err
}
gcm, err := cipher.NewGCM(aesBlock)
if err != nil {
return err
}
gcm, err := cipher.NewGCM(aesBlock)
if err != nil {
return err
}
headerLength := len(ENCRYPTION_HEADER)
offset = headerLength + gcm.NonceSize()
headerLength := len(ENCRYPTION_HEADER)
offset = headerLength + gcm.NonceSize()
if len(encryptedBuffer.Bytes()) < offset {
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
}
if len(encryptedBuffer.Bytes()) < offset {
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
}
if string(encryptedBuffer.Bytes()[:headerLength - 1]) != ENCRYPTION_HEADER[:headerLength - 1] {
return fmt.Errorf("The storage doesn't seem to be encrypted")
}
if string(encryptedBuffer.Bytes()[:headerLength-1]) != ENCRYPTION_HEADER[:headerLength-1] {
return fmt.Errorf("The storage doesn't seem to be encrypted")
}
if encryptedBuffer.Bytes()[headerLength - 1] != 0 {
return fmt.Errorf("Unsupported encryption version %d", encryptedBuffer.Bytes()[headerLength - 1])
}
if encryptedBuffer.Bytes()[headerLength-1] != 0 {
return fmt.Errorf("Unsupported encryption version %d", encryptedBuffer.Bytes()[headerLength-1])
}
nonce := encryptedBuffer.Bytes()[headerLength: offset]
nonce := encryptedBuffer.Bytes()[headerLength:offset]
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
encryptedBuffer.Bytes()[offset:], nil)
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
encryptedBuffer.Bytes()[offset:], nil)
if err != nil {
return err
}
if err != nil {
return err
}
paddingLength := int(decryptedBytes[len(decryptedBytes) - 1])
if paddingLength == 0 {
paddingLength = 256
}
if len(decryptedBytes) <= paddingLength {
return fmt.Errorf("Incorrect padding length %d out of %d bytes", paddingLength, len(decryptedBytes))
}
paddingLength := int(decryptedBytes[len(decryptedBytes)-1])
if paddingLength == 0 {
paddingLength = 256
}
if len(decryptedBytes) <= paddingLength {
return fmt.Errorf("Incorrect padding length %d out of %d bytes", paddingLength, len(decryptedBytes))
}
for i := 0; i < paddingLength; i++ {
padding := decryptedBytes[len(decryptedBytes) - 1 - i]
if padding != byte(paddingLength) {
return fmt.Errorf("Incorrect padding of length %d: %x", paddingLength,
decryptedBytes[len(decryptedBytes) - paddingLength:])
}
}
for i := 0; i < paddingLength; i++ {
padding := decryptedBytes[len(decryptedBytes)-1-i]
if padding != byte(paddingLength) {
return fmt.Errorf("Incorrect padding of length %d: %x", paddingLength,
decryptedBytes[len(decryptedBytes)-paddingLength:])
}
}
encryptedBuffer.Truncate(len(decryptedBytes) - paddingLength)
}
encryptedBuffer.Truncate(len(decryptedBytes) - paddingLength)
}
encryptedBuffer.Read(encryptedBuffer.Bytes()[:offset])
encryptedBuffer.Read(encryptedBuffer.Bytes()[:offset])
compressed := encryptedBuffer.Bytes()
if len(compressed) > 4 && string(compressed[:4]) == "LZ4 " {
chunk.buffer.Reset()
decompressed, err := lz4.Decode(chunk.buffer.Bytes(), encryptedBuffer.Bytes()[4:])
if err != nil {
return err
}
compressed := encryptedBuffer.Bytes()
if len(compressed) > 4 && string(compressed[:4]) == "LZ4 " {
chunk.buffer.Reset()
decompressed, err := lz4.Decode(chunk.buffer.Bytes(), encryptedBuffer.Bytes()[4:])
if err != nil {
return err
}
chunk.buffer.Write(decompressed)
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
chunk.hasher.Write(decompressed)
chunk.hash = nil
return nil
}
inflater, err := zlib.NewReader(encryptedBuffer)
if err != nil {
return err
}
chunk.buffer.Write(decompressed)
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
chunk.hasher.Write(decompressed)
chunk.hash = nil
return nil
}
inflater, err := zlib.NewReader(encryptedBuffer)
if err != nil {
return err
}
defer inflater.Close()
defer inflater.Close()
chunk.buffer.Reset()
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
chunk.hash = nil
chunk.buffer.Reset()
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
chunk.hash = nil
if _, err = io.Copy(chunk, inflater); err != nil {
return err
}
if _, err = io.Copy(chunk, inflater); err != nil {
return err
}
return nil
return nil
}

View File

@@ -5,69 +5,68 @@
package duplicacy
import (
"testing"
"bytes"
crypto_rand "crypto/rand"
"math/rand"
"bytes"
crypto_rand "crypto/rand"
"math/rand"
"testing"
)
func TestChunk(t *testing.T) {
key := []byte("duplicacydefault")
key := []byte("duplicacydefault")
config := CreateConfig()
config.HashKey = key
config.IDKey = key
config.MinimumChunkSize = 100
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
maxSize := 1000000
config := CreateConfig()
config.HashKey = key
config.IDKey = key
config.MinimumChunkSize = 100
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
maxSize := 1000000
for i := 0; i < 500; i++ {
for i := 0; i < 500; i++ {
size := rand.Int() % maxSize
size := rand.Int() % maxSize
plainData := make([]byte, size)
crypto_rand.Read(plainData)
chunk := CreateChunk(config, true)
chunk.Reset(true)
chunk.Write(plainData)
plainData := make([]byte, size)
crypto_rand.Read(plainData)
chunk := CreateChunk(config, true)
chunk.Reset(true)
chunk.Write(plainData)
hash := chunk.GetHash()
id := chunk.GetID()
hash := chunk.GetHash()
id := chunk.GetID()
err := chunk.Encrypt(key, "")
if err != nil {
t.Errorf("Failed to encrypt the data: %v", err)
continue
}
err := chunk.Encrypt(key, "")
if err != nil {
t.Errorf("Failed to encrypt the data: %v", err)
continue
}
encryptedData := make([]byte, chunk.GetLength())
copy(encryptedData, chunk.GetBytes())
encryptedData := make([]byte, chunk.GetLength())
copy(encryptedData, chunk.GetBytes())
chunk.Reset(false)
chunk.Write(encryptedData)
err = chunk.Decrypt(key, "")
if err != nil {
t.Errorf("Failed to decrypt the data: %v", err)
continue
}
chunk.Reset(false)
chunk.Write(encryptedData)
err = chunk.Decrypt(key, "")
if err != nil {
t.Errorf("Failed to decrypt the data: %v", err)
continue
}
decryptedData := chunk.GetBytes()
decryptedData := chunk.GetBytes()
if hash != chunk.GetHash() {
t.Errorf("Original hash: %x, decrypted hash: %x", hash, chunk.GetHash())
}
if hash != chunk.GetHash() {
t.Errorf("Original hash: %x, decrypted hash: %x", hash, chunk.GetHash())
}
if id != chunk.GetID() {
t.Errorf("Original id: %s, decrypted hash: %s", id, chunk.GetID())
}
if id != chunk.GetID() {
t.Errorf("Original id: %s, decrypted hash: %s", id, chunk.GetID())
}
if bytes.Compare(plainData, decryptedData) != 0 {
t.Logf("orginal length: %d, decrypted length: %d", len(plainData), len(decryptedData))
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
}
if bytes.Compare(plainData, decryptedData) != 0 {
t.Logf("orginal length: %d, decrypted length: %d", len(plainData), len(decryptedData))
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
}
}
}
}

View File

@@ -5,24 +5,24 @@
package duplicacy
import (
"sync/atomic"
"time"
"sync/atomic"
"time"
)
// ChunkDownloadTask encapsulates information need to download a chunk.
type ChunkDownloadTask struct {
chunk *Chunk // The chunk that will be downloaded; initially nil
chunkIndex int // The index of this chunk in the chunk list
chunkHash string // The chunk hash
chunkLength int // The length of the chunk; may be zero
needed bool // Whether this chunk can be skipped if a local copy exists
isDownloading bool // 'true' means the chunk has been downloaded or is being downloaded
chunk *Chunk // The chunk that will be downloaded; initially nil
chunkIndex int // The index of this chunk in the chunk list
chunkHash string // The chunk hash
chunkLength int // The length of the chunk; may be zero
needed bool // Whether this chunk can be skipped if a local copy exists
isDownloading bool // 'true' means the chunk has been downloaded or is being downloaded
}
// ChunkDownloadCompletion represents the nofication when a chunk has been downloaded.
type ChunkDownloadCompletion struct {
chunkIndex int // The index of this chunk in the chunk list
chunk *Chunk // The chunk that has been downloaded
chunkIndex int // The index of this chunk in the chunk list
chunk *Chunk // The chunk that has been downloaded
}
// ChunkDownloader is capable of performing multi-threaded downloading. Chunks to be downloaded are first organized
@@ -30,348 +30,347 @@ type ChunkDownloadCompletion struct {
// corresponding ChunkDownloadTask is sent to the dowloading goroutine. Once a chunk is downloaded, it will be
// inserted in the completed task list.
type ChunkDownloader struct {
config *Config // Associated config
storage Storage // Download from this storage
snapshotCache *FileStorage // Used as cache if not nil; usually for downloading snapshot chunks
showStatistics bool // Show a stats log for each chunk if true
threads int // Number of threads
config *Config // Associated config
storage Storage // Download from this storage
snapshotCache *FileStorage // Used as cache if not nil; usually for downloading snapshot chunks
showStatistics bool // Show a stats log for each chunk if true
threads int // Number of threads
taskList [] ChunkDownloadTask // The list of chunks to be downloaded
completedTasks map[int]bool // Store downloaded chunks
lastChunkIndex int // a monotonically increasing number indicating the last chunk to be downloaded
taskList []ChunkDownloadTask // The list of chunks to be downloaded
completedTasks map[int]bool // Store downloaded chunks
lastChunkIndex int // a monotonically increasing number indicating the last chunk to be downloaded
taskQueue chan ChunkDownloadTask // Downloading goroutines are waiting on this channel for input
stopChannel chan bool // Used to stop the dowloading goroutines
completionChannel chan ChunkDownloadCompletion // A downloading goroutine sends back the chunk via this channel after downloading
taskQueue chan ChunkDownloadTask // Downloading goroutines are waiting on this channel for input
stopChannel chan bool // Used to stop the dowloading goroutines
completionChannel chan ChunkDownloadCompletion // A downloading goroutine sends back the chunk via this channel after downloading
startTime int64 // The time it starts downloading
totalChunkSize int64 // Total chunk size
downloadedChunkSize int64 // Downloaded chunk size
numberOfDownloadedChunks int // The number of chunks that have been downloaded
numberOfDownloadingChunks int // The number of chunks still being downloaded
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
startTime int64 // The time it starts downloading
totalChunkSize int64 // Total chunk size
downloadedChunkSize int64 // Downloaded chunk size
numberOfDownloadedChunks int // The number of chunks that have been downloaded
numberOfDownloadingChunks int // The number of chunks still being downloaded
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
}
func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int) *ChunkDownloader {
downloader := &ChunkDownloader {
config: config,
storage: storage,
snapshotCache: snapshotCache,
showStatistics: showStatistics,
threads: threads,
downloader := &ChunkDownloader{
config: config,
storage: storage,
snapshotCache: snapshotCache,
showStatistics: showStatistics,
threads: threads,
taskList: nil,
completedTasks: make(map[int]bool),
lastChunkIndex: 0,
taskList: nil,
completedTasks: make(map[int]bool),
lastChunkIndex: 0,
taskQueue: make(chan ChunkDownloadTask, threads),
stopChannel: make(chan bool),
completionChannel: make(chan ChunkDownloadCompletion),
taskQueue: make(chan ChunkDownloadTask, threads),
stopChannel: make(chan bool),
completionChannel: make(chan ChunkDownloadCompletion),
startTime: time.Now().Unix(),
}
startTime: time.Now().Unix(),
}
// Start the downloading goroutines
for i := 0; i < downloader.threads; i++ {
go func(threadIndex int) {
defer CatchLogException()
for {
select {
case task := <- downloader.taskQueue:
downloader.Download(threadIndex, task)
case <- downloader.stopChannel:
return
}
}
} (i)
}
// Start the downloading goroutines
for i := 0; i < downloader.threads; i++ {
go func(threadIndex int) {
defer CatchLogException()
for {
select {
case task := <-downloader.taskQueue:
downloader.Download(threadIndex, task)
case <-downloader.stopChannel:
return
}
}
}(i)
}
return downloader
return downloader
}
// AddFiles adds chunks needed by the specified files to the download list.
func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry) {
func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files []*Entry) {
downloader.taskList = nil
lastChunkIndex := -1
maximumChunks := 0
downloader.totalChunkSize = 0
for _, file := range files {
if file.Size == 0 {
continue
}
for i := file.StartChunk; i <= file.EndChunk; i++ {
if lastChunkIndex != i {
task := ChunkDownloadTask {
chunkIndex: len(downloader.taskList),
chunkHash: snapshot.ChunkHashes[i],
chunkLength: snapshot.ChunkLengths[i],
needed: false,
}
downloader.taskList = append(downloader.taskList, task)
downloader.totalChunkSize += int64(snapshot.ChunkLengths[i])
} else {
downloader.taskList[len(downloader.taskList) - 1].needed = true
}
lastChunkIndex = i
}
file.StartChunk = len(downloader.taskList) - (file.EndChunk - file.StartChunk) - 1
file.EndChunk = len(downloader.taskList) - 1
if file.EndChunk - file.StartChunk > maximumChunks {
maximumChunks = file.EndChunk - file.StartChunk
}
}
downloader.taskList = nil
lastChunkIndex := -1
maximumChunks := 0
downloader.totalChunkSize = 0
for _, file := range files {
if file.Size == 0 {
continue
}
for i := file.StartChunk; i <= file.EndChunk; i++ {
if lastChunkIndex != i {
task := ChunkDownloadTask{
chunkIndex: len(downloader.taskList),
chunkHash: snapshot.ChunkHashes[i],
chunkLength: snapshot.ChunkLengths[i],
needed: false,
}
downloader.taskList = append(downloader.taskList, task)
downloader.totalChunkSize += int64(snapshot.ChunkLengths[i])
} else {
downloader.taskList[len(downloader.taskList)-1].needed = true
}
lastChunkIndex = i
}
file.StartChunk = len(downloader.taskList) - (file.EndChunk - file.StartChunk) - 1
file.EndChunk = len(downloader.taskList) - 1
if file.EndChunk-file.StartChunk > maximumChunks {
maximumChunks = file.EndChunk - file.StartChunk
}
}
}
// AddChunk adds a single chunk the download list.
func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
task := ChunkDownloadTask {
chunkIndex: len(downloader.taskList),
chunkHash: chunkHash,
chunkLength: 0,
needed: true,
isDownloading: false,
}
downloader.taskList = append(downloader.taskList, task)
if downloader.numberOfActiveChunks < downloader.threads {
downloader.taskQueue <- task
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
downloader.taskList[len(downloader.taskList) - 1].isDownloading = true
}
return len(downloader.taskList) - 1
task := ChunkDownloadTask{
chunkIndex: len(downloader.taskList),
chunkHash: chunkHash,
chunkLength: 0,
needed: true,
isDownloading: false,
}
downloader.taskList = append(downloader.taskList, task)
if downloader.numberOfActiveChunks < downloader.threads {
downloader.taskQueue <- task
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
downloader.taskList[len(downloader.taskList)-1].isDownloading = true
}
return len(downloader.taskList) - 1
}
// Prefetch adds up to 'threads' chunks needed by a file to the download list
func (downloader *ChunkDownloader) Prefetch(file *Entry) {
// Any chunks before the first chunk of this filea are not needed any more, so they can be reclaimed.
downloader.Reclaim(file.StartChunk)
// Any chunks before the first chunk of this filea are not needed any more, so they can be reclaimed.
downloader.Reclaim(file.StartChunk)
for i := file.StartChunk; i <= file.EndChunk; i++ {
task := &downloader.taskList[i]
if task.needed {
if !task.isDownloading {
if downloader.numberOfActiveChunks >= downloader.threads {
return
}
for i := file.StartChunk; i <= file.EndChunk; i++ {
task := &downloader.taskList[i]
if task.needed {
if !task.isDownloading {
if downloader.numberOfActiveChunks >= downloader.threads {
return
}
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching %s chunk %s", file.Path,
downloader.config.GetChunkIDFromHash(task.chunkHash))
downloader.taskQueue <- *task
task.isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
}
} else{
LOG_DEBUG("DOWNLOAD_PREFETCH", "%s chunk %s is not needed", file.Path,
downloader.config.GetChunkIDFromHash(task.chunkHash))
}
}
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching %s chunk %s", file.Path,
downloader.config.GetChunkIDFromHash(task.chunkHash))
downloader.taskQueue <- *task
task.isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
}
} else {
LOG_DEBUG("DOWNLOAD_PREFETCH", "%s chunk %s is not needed", file.Path,
downloader.config.GetChunkIDFromHash(task.chunkHash))
}
}
}
// Reclaim releases the downloaded chunk to the chunk pool
func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
if downloader.lastChunkIndex == chunkIndex {
return
}
if downloader.lastChunkIndex == chunkIndex {
return
}
for i, _ := range downloader.completedTasks {
if i < chunkIndex && downloader.taskList[i].chunk != nil {
downloader.config.PutChunk(downloader.taskList[i].chunk)
downloader.taskList[i].chunk = nil
delete(downloader.completedTasks, i)
downloader.numberOfActiveChunks--
}
}
for i, _ := range downloader.completedTasks {
if i < chunkIndex && downloader.taskList[i].chunk != nil {
downloader.config.PutChunk(downloader.taskList[i].chunk)
downloader.taskList[i].chunk = nil
delete(downloader.completedTasks, i)
downloader.numberOfActiveChunks--
}
}
downloader.lastChunkIndex = chunkIndex
downloader.lastChunkIndex = chunkIndex
}
// WaitForChunk waits until the specified chunk is ready
func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
// Reclain any chunk not needed
downloader.Reclaim(chunkIndex)
// Reclain any chunk not needed
downloader.Reclaim(chunkIndex)
// If we haven't started download the specified chunk, download it now
if !downloader.taskList[chunkIndex].isDownloading {
LOG_DEBUG("DOWNLOAD_FETCH", "Fetching chunk %s",
downloader.config.GetChunkIDFromHash(downloader.taskList[chunkIndex].chunkHash))
downloader.taskQueue <- downloader.taskList[chunkIndex]
downloader.taskList[chunkIndex].isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
}
// If we haven't started download the specified chunk, download it now
if !downloader.taskList[chunkIndex].isDownloading {
LOG_DEBUG("DOWNLOAD_FETCH", "Fetching chunk %s",
downloader.config.GetChunkIDFromHash(downloader.taskList[chunkIndex].chunkHash))
downloader.taskQueue <- downloader.taskList[chunkIndex]
downloader.taskList[chunkIndex].isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
}
// We also need to look ahead and prefetch other chunks as many as permitted by the number of threads
for i := chunkIndex + 1; i < len(downloader.taskList); i++ {
if downloader.numberOfActiveChunks >= downloader.threads {
break
}
task := &downloader.taskList[i]
if !task.needed {
break
}
// We also need to look ahead and prefetch other chunks as many as permitted by the number of threads
for i := chunkIndex + 1; i < len(downloader.taskList); i++ {
if downloader.numberOfActiveChunks >= downloader.threads {
break
}
task := &downloader.taskList[i]
if !task.needed {
break
}
if !task.isDownloading {
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching chunk %s", downloader.config.GetChunkIDFromHash(task.chunkHash))
downloader.taskQueue <- *task
task.isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
}
}
if !task.isDownloading {
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching chunk %s", downloader.config.GetChunkIDFromHash(task.chunkHash))
downloader.taskQueue <- *task
task.isDownloading = true
downloader.numberOfDownloadingChunks++
downloader.numberOfActiveChunks++
}
}
// Now wait until the chunk to be downloaded appears in the completed tasks
for _, found := downloader.completedTasks[chunkIndex]; !found; _, found = downloader.completedTasks[chunkIndex] {
completion := <- downloader.completionChannel
downloader.completedTasks[completion.chunkIndex] = true
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
downloader.numberOfDownloadedChunks++
downloader.numberOfDownloadingChunks--
}
return downloader.taskList[chunkIndex].chunk
// Now wait until the chunk to be downloaded appears in the completed tasks
for _, found := downloader.completedTasks[chunkIndex]; !found; _, found = downloader.completedTasks[chunkIndex] {
completion := <-downloader.completionChannel
downloader.completedTasks[completion.chunkIndex] = true
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
downloader.numberOfDownloadedChunks++
downloader.numberOfDownloadingChunks--
}
return downloader.taskList[chunkIndex].chunk
}
// Stop terminates all downloading goroutines
func (downloader *ChunkDownloader) Stop() {
for downloader.numberOfDownloadingChunks > 0 {
completion := <- downloader.completionChannel
downloader.completedTasks[completion.chunkIndex] = true
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
downloader.numberOfDownloadedChunks++
downloader.numberOfDownloadingChunks--
}
for downloader.numberOfDownloadingChunks > 0 {
completion := <-downloader.completionChannel
downloader.completedTasks[completion.chunkIndex] = true
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
downloader.numberOfDownloadedChunks++
downloader.numberOfDownloadingChunks--
}
for i, _ := range downloader.completedTasks {
downloader.config.PutChunk(downloader.taskList[i].chunk)
downloader.taskList[i].chunk = nil
downloader.numberOfActiveChunks--
}
for i, _ := range downloader.completedTasks {
downloader.config.PutChunk(downloader.taskList[i].chunk)
downloader.taskList[i].chunk = nil
downloader.numberOfActiveChunks--
}
for i := 0; i < downloader.threads; i++ {
downloader.stopChannel <- true
}
for i := 0; i < downloader.threads; i++ {
downloader.stopChannel <- true
}
}
// Download downloads a chunk from the storage.
func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadTask) bool {
cachedPath := ""
chunk := downloader.config.GetChunk()
chunkID := downloader.config.GetChunkIDFromHash(task.chunkHash)
cachedPath := ""
chunk := downloader.config.GetChunk()
chunkID := downloader.config.GetChunkIDFromHash(task.chunkHash)
if downloader.snapshotCache != nil && downloader.storage.IsCacheNeeded() {
if downloader.snapshotCache != nil && downloader.storage.IsCacheNeeded() {
var exist bool
var err error
var exist bool
var err error
// Reset the chunk with a hasher -- we're reading from the cache where chunk are not encrypted or compressed
chunk.Reset(true)
// Reset the chunk with a hasher -- we're reading from the cache where chunk are not encrypted or compressed
chunk.Reset(true)
cachedPath, exist, _, err = downloader.snapshotCache.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
} else if exist {
err = downloader.snapshotCache.DownloadFile(0, cachedPath, chunk)
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to load the chunk %s from the snapshot cache: %v", chunkID, err)
} else {
actualChunkID := chunk.GetID()
if actualChunkID != chunkID {
LOG_WARN("DOWNLOAD_CACHE_CORRUPTED",
"The chunk %s load from the snapshot cache has a hash id of %s", chunkID, actualChunkID)
} else {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been loaded from the snapshot cache", chunkID)
cachedPath, exist, _, err = downloader.snapshotCache.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
} else if exist {
err = downloader.snapshotCache.DownloadFile(0, cachedPath, chunk)
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to load the chunk %s from the snapshot cache: %v", chunkID, err)
} else {
actualChunkID := chunk.GetID()
if actualChunkID != chunkID {
LOG_WARN("DOWNLOAD_CACHE_CORRUPTED",
"The chunk %s load from the snapshot cache has a hash id of %s", chunkID, actualChunkID)
} else {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been loaded from the snapshot cache", chunkID)
downloader.completionChannel <- ChunkDownloadCompletion{ chunk: chunk, chunkIndex:task.chunkIndex }
return false
}
}
}
}
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
return false
}
}
}
}
// Reset the chunk without a hasher -- the downloaded content will be encrypted and/or compressed and the hasher
// will be set up before the encryption
chunk.Reset(false)
// Reset the chunk without a hasher -- the downloaded content will be encrypted and/or compressed and the hasher
// will be set up before the encryption
chunk.Reset(false)
// Find the chunk by ID first.
chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return false
}
// Find the chunk by ID first.
chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return false
}
if !exist {
// No chunk is found. Have to find it in the fossil pool again.
chunkPath, exist, _, err = downloader.storage.FindChunk(threadIndex, chunkID, true)
if err != nil {
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return false
}
if !exist {
// No chunk is found. Have to find it in the fossil pool again.
chunkPath, exist, _, err = downloader.storage.FindChunk(threadIndex, chunkID, true)
if err != nil {
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
return false
}
if !exist {
// A chunk is not found. This is a serious error and hopefully it will never happen.
if err != nil {
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
} else {
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
}
return false
}
LOG_DEBUG("CHUNK_FOSSIL", "Chunk %s has been marked as a fossil", chunkID)
}
if !exist {
// A chunk is not found. This is a serious error and hopefully it will never happen.
if err != nil {
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
} else {
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
}
return false
}
LOG_DEBUG("CHUNK_FOSSIL", "Chunk %s has been marked as a fossil", chunkID)
}
err = downloader.storage.DownloadFile(threadIndex, chunkPath, chunk)
if err != nil {
LOG_ERROR("UPLOAD_FATAL", "Failed to download the chunk %s: %v", chunkID, err)
return false
}
err = downloader.storage.DownloadFile(threadIndex, chunkPath, chunk)
if err != nil {
LOG_ERROR("UPLOAD_FATAL", "Failed to download the chunk %s: %v", chunkID, err)
return false
}
err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash)
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to decrypt the chunk %s: %v", chunkID, err)
return false
}
err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash)
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to decrypt the chunk %s: %v", chunkID, err)
return false
}
actualChunkID := chunk.GetID()
if actualChunkID != chunkID {
LOG_FATAL("UPLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
return false
}
actualChunkID := chunk.GetID()
if actualChunkID != chunkID {
LOG_FATAL("UPLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
return false
}
if len(cachedPath) > 0 {
// Save a copy to the local snapshot cache
err = downloader.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes())
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to add the chunk %s to the snapshot cache: %v", chunkID, err)
}
}
if len(cachedPath) > 0 {
// Save a copy to the local snapshot cache
err = downloader.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes())
if err != nil {
LOG_WARN("DOWNLOAD_CACHE", "Failed to add the chunk %s to the snapshot cache: %v", chunkID, err)
}
}
downloadedChunkSize := atomic.AddInt64(&downloader.downloadedChunkSize, int64(chunk.GetLength()))
downloadedChunkSize := atomic.AddInt64(&downloader.downloadedChunkSize, int64(chunk.GetLength()))
if (downloader.showStatistics || IsTracing()) && downloader.totalChunkSize > 0 {
if (downloader.showStatistics || IsTracing()) && downloader.totalChunkSize > 0 {
now := time.Now().Unix()
if now <= downloader.startTime {
now = downloader.startTime + 1
}
speed := downloadedChunkSize / (now - downloader.startTime)
remainingTime := int64(0)
if speed > 0 {
remainingTime = (downloader.totalChunkSize-downloadedChunkSize)/speed + 1
}
percentage := float32(downloadedChunkSize * 1000 / downloader.totalChunkSize)
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
task.chunkIndex+1, chunk.GetLength(),
PrettySize(speed), PrettyTime(remainingTime), percentage/10)
} else {
LOG_DEBUG("CHUNK_DOWNLOAD", "Chunk %s has been downloaded", chunkID)
}
now := time.Now().Unix()
if now <= downloader.startTime {
now = downloader.startTime + 1
}
speed := downloadedChunkSize / (now - downloader.startTime)
remainingTime := int64(0)
if speed > 0 {
remainingTime = (downloader.totalChunkSize - downloadedChunkSize) / speed + 1
}
percentage := float32(downloadedChunkSize * 1000 / downloader.totalChunkSize)
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
task.chunkIndex + 1, chunk.GetLength(),
PrettySize(speed), PrettyTime(remainingTime), percentage / 10)
} else {
LOG_DEBUG("CHUNK_DOWNLOAD", "Chunk %s has been downloaded", chunkID)
}
downloader.completionChannel <- ChunkDownloadCompletion{ chunk: chunk, chunkIndex:task.chunkIndex }
return true
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
return true
}

View File

@@ -5,294 +5,293 @@
package duplicacy
import (
"io"
"crypto/sha256"
"encoding/hex"
"encoding/binary"
"crypto/sha256"
"encoding/binary"
"encoding/hex"
"io"
)
// ChunkMaker breaks data into chunks using buzhash. To save memory, the chunk maker only use a circular buffer
// whose size is double the minimum chunk size.
type ChunkMaker struct {
maximumChunkSize int
minimumChunkSize int
bufferCapacity int
maximumChunkSize int
minimumChunkSize int
bufferCapacity int
hashMask uint64
randomTable [256]uint64
hashMask uint64
randomTable [256]uint64
buffer []byte
bufferSize int
bufferStart int
buffer []byte
bufferSize int
bufferStart int
config *Config
config *Config
hashOnly bool
hashOnlyChunk *Chunk
hashOnly bool
hashOnlyChunk *Chunk
}
// CreateChunkMaker creates a chunk maker. 'randomSeed' is used to generate the character-to-integer table needed by
// buzhash.
func CreateChunkMaker(config *Config, hashOnly bool) *ChunkMaker {
size := 1
for size * 2 <= config.AverageChunkSize {
size *= 2
}
size := 1
for size*2 <= config.AverageChunkSize {
size *= 2
}
if size != config.AverageChunkSize {
LOG_FATAL("CHUNK_SIZE", "Invalid average chunk size: %d is not a power of 2", config.AverageChunkSize)
return nil
}
if size != config.AverageChunkSize {
LOG_FATAL("CHUNK_SIZE", "Invalid average chunk size: %d is not a power of 2", config.AverageChunkSize)
return nil
}
maker := &ChunkMaker {
hashMask: uint64(config.AverageChunkSize - 1),
maximumChunkSize: config.MaximumChunkSize,
minimumChunkSize: config.MinimumChunkSize,
bufferCapacity: 2 * config.MinimumChunkSize,
config: config,
hashOnly: hashOnly,
}
maker := &ChunkMaker{
hashMask: uint64(config.AverageChunkSize - 1),
maximumChunkSize: config.MaximumChunkSize,
minimumChunkSize: config.MinimumChunkSize,
bufferCapacity: 2 * config.MinimumChunkSize,
config: config,
hashOnly: hashOnly,
}
if hashOnly {
maker.hashOnlyChunk = CreateChunk(config, false)
}
if hashOnly {
maker.hashOnlyChunk = CreateChunk(config, false)
}
randomData := sha256.Sum256(config.ChunkSeed)
randomData := sha256.Sum256(config.ChunkSeed)
for i := 0; i < 64; i++ {
for j := 0; j < 4; j++ {
maker.randomTable[4 * i + j] = binary.LittleEndian.Uint64(randomData[8 * j : 8 * j + 8])
}
randomData = sha256.Sum256(randomData[:])
}
for i := 0; i < 64; i++ {
for j := 0; j < 4; j++ {
maker.randomTable[4*i+j] = binary.LittleEndian.Uint64(randomData[8*j : 8*j+8])
}
randomData = sha256.Sum256(randomData[:])
}
maker.buffer = make([]byte, 2 * config.MinimumChunkSize)
maker.buffer = make([]byte, 2*config.MinimumChunkSize)
return maker
return maker
}
func rotateLeft(value uint64, bits uint) uint64 {
return (value << (bits & 0x3f)) | (value >> (64 - (bits & 0x3f)))
return (value << (bits & 0x3f)) | (value >> (64 - (bits & 0x3f)))
}
func rotateLeftByOne(value uint64) uint64 {
return (value << 1) | (value >> 63)
return (value << 1) | (value >> 63)
}
func (maker *ChunkMaker) buzhashSum(sum uint64, data [] byte) uint64 {
for i := 0; i < len(data); i++ {
sum = rotateLeftByOne(sum) ^ maker.randomTable[data[i]]
}
return sum
func (maker *ChunkMaker) buzhashSum(sum uint64, data []byte) uint64 {
for i := 0; i < len(data); i++ {
sum = rotateLeftByOne(sum) ^ maker.randomTable[data[i]]
}
return sum
}
func (maker *ChunkMaker) buzhashUpdate(sum uint64, out byte, in byte, length int) uint64 {
return rotateLeftByOne(sum) ^ rotateLeft(maker.randomTable[out], uint(length)) ^ maker.randomTable[in]
return rotateLeftByOne(sum) ^ rotateLeft(maker.randomTable[out], uint(length)) ^ maker.randomTable[in]
}
// ForEachChunk reads data from 'reader'. If EOF is encountered, it will call 'nextReader' to ask for next file. If
// 'nextReader' returns false, it will process remaining data in the buffer and then quit. When a chunk is identified,
// it will call 'endOfChunk' to return the chunk size and a boolean flag indicating if it is the last chunk.
func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *Chunk, final bool),
nextReader func(size int64, hash string)(io.Reader, bool)) {
nextReader func(size int64, hash string) (io.Reader, bool)) {
maker.bufferStart = 0
maker.bufferSize = 0
maker.bufferStart = 0
maker.bufferSize = 0
var minimumReached bool
var hashSum uint64
var chunk *Chunk
var minimumReached bool
var hashSum uint64
var chunk *Chunk
fileSize := int64(0)
fileHasher := maker.config.NewFileHasher()
fileSize := int64(0)
fileHasher := maker.config.NewFileHasher()
// Start a new chunk.
startNewChunk := func() {
hashSum = 0
minimumReached = false
if maker.hashOnly {
chunk = maker.hashOnlyChunk
chunk.Reset(true)
} else {
chunk = maker.config.GetChunk()
chunk.Reset(true)
}
}
// Start a new chunk.
startNewChunk := func() {
hashSum = 0
minimumReached = false
if maker.hashOnly {
chunk = maker.hashOnlyChunk
chunk.Reset(true)
} else {
chunk = maker.config.GetChunk()
chunk.Reset(true)
}
}
// Move data from the buffer to the chunk.
fill := func(count int) {
if maker.bufferStart + count < maker.bufferCapacity {
chunk.Write(maker.buffer[maker.bufferStart : maker.bufferStart + count])
maker.bufferStart += count
maker.bufferSize -= count
} else {
chunk.Write(maker.buffer[maker.bufferStart :])
chunk.Write(maker.buffer[: count - (maker.bufferCapacity - maker.bufferStart)])
maker.bufferStart = count - (maker.bufferCapacity - maker.bufferStart)
maker.bufferSize -= count
}
}
// Move data from the buffer to the chunk.
fill := func(count int) {
if maker.bufferStart+count < maker.bufferCapacity {
chunk.Write(maker.buffer[maker.bufferStart : maker.bufferStart+count])
maker.bufferStart += count
maker.bufferSize -= count
} else {
chunk.Write(maker.buffer[maker.bufferStart:])
chunk.Write(maker.buffer[:count-(maker.bufferCapacity-maker.bufferStart)])
maker.bufferStart = count - (maker.bufferCapacity - maker.bufferStart)
maker.bufferSize -= count
}
}
startNewChunk()
startNewChunk()
var err error
var err error
isEOF := false
isEOF := false
if maker.minimumChunkSize == maker.maximumChunkSize {
if maker.minimumChunkSize == maker.maximumChunkSize {
if maker.bufferCapacity < maker.minimumChunkSize {
maker.buffer = make([]byte, maker.minimumChunkSize)
}
if maker.bufferCapacity < maker.minimumChunkSize {
maker.buffer = make([]byte, maker.minimumChunkSize)
}
for {
maker.bufferStart = 0
for maker.bufferStart < maker.minimumChunkSize && !isEOF {
count, err := reader.Read(maker.buffer[maker.bufferStart : maker.minimumChunkSize])
for {
maker.bufferStart = 0
for maker.bufferStart < maker.minimumChunkSize && !isEOF {
count, err := reader.Read(maker.buffer[maker.bufferStart:maker.minimumChunkSize])
if err != nil {
if err != io.EOF {
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
return
} else {
isEOF = true
}
}
maker.bufferStart += count
}
if err != nil {
if err != io.EOF {
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
return
} else {
isEOF = true
}
}
maker.bufferStart += count
}
fileHasher.Write(maker.buffer[:maker.bufferStart])
fileSize += int64(maker.bufferStart)
chunk.Write(maker.buffer[:maker.bufferStart])
fileHasher.Write(maker.buffer[:maker.bufferStart])
fileSize += int64(maker.bufferStart)
chunk.Write(maker.buffer[:maker.bufferStart])
if isEOF {
var ok bool
reader, ok = nextReader(fileSize, hex.EncodeToString(fileHasher.Sum(nil)))
if !ok {
endOfChunk(chunk, true)
return
} else {
endOfChunk(chunk, false)
startNewChunk()
fileSize = 0
fileHasher = maker.config.NewFileHasher()
isEOF = false
}
} else {
endOfChunk(chunk, false)
startNewChunk()
}
}
if isEOF {
var ok bool
reader, ok = nextReader(fileSize, hex.EncodeToString(fileHasher.Sum(nil)))
if !ok {
endOfChunk(chunk, true)
return
} else {
endOfChunk(chunk, false)
startNewChunk()
fileSize = 0
fileHasher = maker.config.NewFileHasher()
isEOF = false
}
} else {
endOfChunk(chunk, false)
startNewChunk()
}
}
}
}
for {
for {
// If the buffer still has some space left and EOF is not seen, read more data.
for maker.bufferSize < maker.bufferCapacity && !isEOF {
start := maker.bufferStart + maker.bufferSize
count := maker.bufferCapacity - start
if start >= maker.bufferCapacity {
start -= maker.bufferCapacity
count = maker.bufferStart - start
}
// If the buffer still has some space left and EOF is not seen, read more data.
for maker.bufferSize < maker.bufferCapacity && !isEOF {
start := maker.bufferStart + maker.bufferSize
count := maker.bufferCapacity - start
if start >= maker.bufferCapacity {
start -= maker.bufferCapacity
count = maker.bufferStart - start
}
count, err = reader.Read(maker.buffer[start : start + count])
count, err = reader.Read(maker.buffer[start : start+count])
if err != nil && err != io.EOF {
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
return
}
if err != nil && err != io.EOF {
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
return
}
maker.bufferSize += count
fileHasher.Write(maker.buffer[start : start + count])
fileSize += int64(count)
maker.bufferSize += count
fileHasher.Write(maker.buffer[start : start+count])
fileSize += int64(count)
// if EOF is seen, try to switch to next file and continue
if err == io.EOF {
var ok bool
reader, ok = nextReader(fileSize, hex.EncodeToString(fileHasher.Sum(nil)))
if !ok {
isEOF = true
} else {
fileSize = 0
fileHasher = maker.config.NewFileHasher()
isEOF = false
}
}
}
// if EOF is seen, try to switch to next file and continue
if err == io.EOF {
var ok bool
reader, ok = nextReader(fileSize, hex.EncodeToString(fileHasher.Sum(nil)))
if !ok {
isEOF = true
} else {
fileSize = 0
fileHasher = maker.config.NewFileHasher()
isEOF = false
}
}
}
// No eough data to meet the minimum chunk size requirement, so just return as a chunk.
if maker.bufferSize < maker.minimumChunkSize {
fill(maker.bufferSize)
endOfChunk(chunk, true)
return
}
// No eough data to meet the minimum chunk size requirement, so just return as a chunk.
if maker.bufferSize < maker.minimumChunkSize {
fill(maker.bufferSize)
endOfChunk(chunk, true)
return
}
// Minimum chunk size has been reached. Calculate the buzhash for the minimum size chunk.
if !minimumReached {
// Minimum chunk size has been reached. Calculate the buzhash for the minimum size chunk.
if (!minimumReached) {
bytes := maker.minimumChunkSize
bytes := maker.minimumChunkSize
if maker.bufferStart+bytes < maker.bufferCapacity {
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:maker.bufferStart+bytes])
} else {
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:])
hashSum = maker.buzhashSum(hashSum,
maker.buffer[:bytes-(maker.bufferCapacity-maker.bufferStart)])
}
if maker.bufferStart + bytes < maker.bufferCapacity {
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart : maker.bufferStart + bytes])
} else {
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart :])
hashSum = maker.buzhashSum(hashSum,
maker.buffer[: bytes - (maker.bufferCapacity - maker.bufferStart)])
}
if (hashSum & maker.hashMask) == 0 {
// This is a minimum size chunk
fill(bytes)
endOfChunk(chunk, false)
startNewChunk()
continue
}
if (hashSum & maker.hashMask) == 0 {
// This is a minimum size chunk
fill(bytes)
endOfChunk(chunk, false)
startNewChunk()
continue
}
minimumReached = true
}
minimumReached = true
}
// Now check the buzhash of the data in the buffer, shifting one byte at a time.
bytes := maker.bufferSize - maker.minimumChunkSize
isEOC := false
maxSize := maker.maximumChunkSize - chunk.GetLength()
for i := 0; i < maker.bufferSize-maker.minimumChunkSize; i++ {
out := maker.bufferStart + i
if out >= maker.bufferCapacity {
out -= maker.bufferCapacity
}
in := maker.bufferStart + i + maker.minimumChunkSize
if in >= maker.bufferCapacity {
in -= maker.bufferCapacity
}
// Now check the buzhash of the data in the buffer, shifting one byte at a time.
bytes := maker.bufferSize - maker.minimumChunkSize
isEOC := false
maxSize := maker.maximumChunkSize - chunk.GetLength()
for i := 0; i < maker.bufferSize - maker.minimumChunkSize; i++ {
out := maker.bufferStart + i
if out >= maker.bufferCapacity {
out -= maker.bufferCapacity
}
in := maker.bufferStart + i + maker.minimumChunkSize
if in >= maker.bufferCapacity {
in -= maker.bufferCapacity
}
hashSum = maker.buzhashUpdate(hashSum, maker.buffer[out], maker.buffer[in], maker.minimumChunkSize)
if (hashSum&maker.hashMask) == 0 || i == maxSize-maker.minimumChunkSize-1 {
// A chunk is completed.
bytes = i + 1 + maker.minimumChunkSize
isEOC = true
break
}
}
hashSum = maker.buzhashUpdate(hashSum, maker.buffer[out], maker.buffer[in], maker.minimumChunkSize)
if (hashSum & maker.hashMask) == 0 || i == maxSize - maker.minimumChunkSize - 1 {
// A chunk is completed.
bytes = i + 1 + maker.minimumChunkSize
isEOC = true
break
}
}
fill(bytes)
fill(bytes)
if isEOC {
if isEOF && maker.bufferSize == 0 {
endOfChunk(chunk, true)
return
}
endOfChunk(chunk, false)
startNewChunk()
continue
}
if isEOC {
if isEOF && maker.bufferSize == 0 {
endOfChunk(chunk, true)
return
}
endOfChunk(chunk, false)
startNewChunk()
continue
}
if isEOF {
fill(maker.bufferSize)
endOfChunk(chunk, true)
return
}
}
if isEOF {
fill(maker.bufferSize)
endOfChunk(chunk, true)
return
}
}
}

View File

@@ -5,128 +5,127 @@
package duplicacy
import (
"testing"
"bytes"
crypto_rand "crypto/rand"
"math/rand"
"io"
"sort"
"bytes"
crypto_rand "crypto/rand"
"io"
"math/rand"
"sort"
"testing"
)
func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunkSize,
bufferCapacity int) ([]string, int) {
bufferCapacity int) ([]string, int) {
config := CreateConfig()
config := CreateConfig()
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
config.AverageChunkSize = averageChunkSize
config.MaximumChunkSize = maxChunkSize
config.MinimumChunkSize = minChunkSize
config.ChunkSeed = []byte("duplicacy")
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
config.AverageChunkSize = averageChunkSize
config.MaximumChunkSize = maxChunkSize
config.MinimumChunkSize = minChunkSize
config.ChunkSeed = []byte("duplicacy")
config.HashKey = DEFAULT_KEY
config.IDKey = DEFAULT_KEY
config.HashKey = DEFAULT_KEY
config.IDKey = DEFAULT_KEY
maker := CreateChunkMaker(config, false)
maker := CreateChunkMaker(config, false)
var chunks [] string
totalChunkSize := 0
totalFileSize := int64(0)
var chunks []string
totalChunkSize := 0
totalFileSize := int64(0)
//LOG_INFO("CHUNK_SPLIT", "bufferCapacity: %d", bufferCapacity)
//LOG_INFO("CHUNK_SPLIT", "bufferCapacity: %d", bufferCapacity)
buffers := make([] *bytes.Buffer, n)
sizes := make([] int, n)
sizes[0] = 0
for i := 1; i < n; i++ {
same := true
for same {
same = false
sizes[i] = rand.Int() % n
for j := 0; j < i; j++ {
if sizes[i] == sizes[j] {
same = true
break
}
}
}
}
buffers := make([]*bytes.Buffer, n)
sizes := make([]int, n)
sizes[0] = 0
for i := 1; i < n; i++ {
same := true
for same {
same = false
sizes[i] = rand.Int() % n
for j := 0; j < i; j++ {
if sizes[i] == sizes[j] {
same = true
break
}
}
}
}
sort.Sort(sort.IntSlice(sizes))
sort.Sort(sort.IntSlice(sizes))
for i := 0; i < n - 1; i++ {
buffers[i] = bytes.NewBuffer(content[sizes[i] : sizes[i + 1]])
}
buffers[n - 1] = bytes.NewBuffer(content[sizes[n - 1]:])
for i := 0; i < n-1; i++ {
buffers[i] = bytes.NewBuffer(content[sizes[i]:sizes[i+1]])
}
buffers[n-1] = bytes.NewBuffer(content[sizes[n-1]:])
i := 0
i := 0
maker.ForEachChunk(buffers[0],
func (chunk *Chunk, final bool) {
//LOG_INFO("CHUNK_SPLIT", "i: %d, chunk: %s, size: %d", i, chunk.GetHash(), size)
chunks = append(chunks, chunk.GetHash())
totalChunkSize += chunk.GetLength()
},
func (size int64, hash string) (io.Reader, bool) {
totalFileSize += size
i++
if i >= len(buffers) {
return nil, false
}
return buffers[i], true
})
maker.ForEachChunk(buffers[0],
func(chunk *Chunk, final bool) {
//LOG_INFO("CHUNK_SPLIT", "i: %d, chunk: %s, size: %d", i, chunk.GetHash(), size)
chunks = append(chunks, chunk.GetHash())
totalChunkSize += chunk.GetLength()
},
func(size int64, hash string) (io.Reader, bool) {
totalFileSize += size
i++
if i >= len(buffers) {
return nil, false
}
return buffers[i], true
})
if (totalFileSize != int64(totalChunkSize)) {
LOG_ERROR("CHUNK_SPLIT", "total chunk size: %d, total file size: %d", totalChunkSize, totalFileSize)
}
return chunks, totalChunkSize
if totalFileSize != int64(totalChunkSize) {
LOG_ERROR("CHUNK_SPLIT", "total chunk size: %d, total file size: %d", totalChunkSize, totalFileSize)
}
return chunks, totalChunkSize
}
func TestChunkMaker(t *testing.T) {
//sizes := [...] int { 64 }
sizes := [...]int{64, 256, 1024, 1024 * 10}
//sizes := [...] int { 64 }
sizes := [...] int { 64, 256, 1024, 1024 * 10 }
for _, size := range sizes {
for _, size := range sizes {
content := make([]byte, size)
_, err := crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
continue
}
content := make([]byte, size)
_, err := crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
continue
}
chunkArray1, totalSize1 := splitIntoChunks(content, 10, 32, 64, 16, 32)
chunkArray1, totalSize1 := splitIntoChunks(content, 10, 32, 64, 16, 32)
capacities := [...]int{32, 33, 34, 61, 62, 63, 64, 65, 66, 126, 127, 128, 129, 130,
255, 256, 257, 511, 512, 513, 1023, 1024, 1025,
32, 48, 64, 128, 256, 512, 1024, 2048}
capacities := [...]int { 32, 33, 34, 61, 62, 63, 64, 65, 66, 126, 127, 128, 129, 130,
255, 256, 257, 511, 512, 513, 1023, 1024, 1025,
32, 48, 64, 128, 256, 512, 1024, 2048, }
//capacities := [...]int { 32 }
//capacities := [...]int { 32 }
for _, capacity := range capacities {
for _, capacity := range capacities {
for _, n := range [...]int{6, 7, 8, 9, 10} {
chunkArray2, totalSize2 := splitIntoChunks(content, n, 32, 64, 16, capacity)
for _, n := range [...]int { 6, 7, 8, 9, 10 } {
chunkArray2, totalSize2 := splitIntoChunks(content, n, 32, 64, 16, capacity)
if totalSize1 != totalSize2 {
t.Errorf("[size %d, capacity %d] total size is %d instead of %d",
size, capacity, totalSize2, totalSize1)
}
if totalSize1 != totalSize2 {
t.Errorf("[size %d, capacity %d] total size is %d instead of %d",
size, capacity, totalSize2, totalSize1)
}
if len(chunkArray1) != len(chunkArray2) {
t.Errorf("[size %d, capacity %d] number of chunks is %d instead of %d",
size, capacity, len(chunkArray2), len(chunkArray1))
} else {
for i := 0; i < len(chunkArray1); i++ {
if chunkArray1[i] != chunkArray2[i] {
t.Errorf("[size %d, capacity %d, chunk %d] chunk is different", size, capacity, i)
}
}
}
}
}
}
if len(chunkArray1) != len(chunkArray2) {
t.Errorf("[size %d, capacity %d] number of chunks is %d instead of %d",
size, capacity, len(chunkArray2), len(chunkArray1))
} else {
for i := 0; i < len(chunkArray1); i++ {
if chunkArray1[i] != chunkArray2[i] {
t.Errorf("[size %d, capacity %d, chunk %d] chunk is different", size, capacity, i)
}
}
}
}
}
}
}

View File

@@ -5,14 +5,14 @@
package duplicacy
import (
"sync/atomic"
"time"
"sync/atomic"
"time"
)
// ChunkUploadTask represents a chunk to be uploaded.
type ChunkUploadTask struct {
chunk * Chunk
chunkIndex int
chunk *Chunk
chunkIndex int
}
// ChunkUploader uploads chunks to the storage using one or more uploading goroutines. Chunks are added
@@ -20,132 +20,132 @@ type ChunkUploadTask struct {
// called when the downloading is completed. Note that ChunkUploader does not release chunks to the
// chunk pool; instead
type ChunkUploader struct {
config *Config // Associated config
storage Storage // Download from this storage
snapshotCache *FileStorage // Used as cache if not nil; usually for uploading snapshot chunks
threads int // Number of uploading goroutines
taskQueue chan ChunkUploadTask // Uploading goroutines are listening on this channel for upload jobs
stopChannel chan bool // Used to terminate uploading goroutines
config *Config // Associated config
storage Storage // Download from this storage
snapshotCache *FileStorage // Used as cache if not nil; usually for uploading snapshot chunks
threads int // Number of uploading goroutines
taskQueue chan ChunkUploadTask // Uploading goroutines are listening on this channel for upload jobs
stopChannel chan bool // Used to terminate uploading goroutines
numberOfUploadingTasks int32 // The number of uploading tasks
numberOfUploadingTasks int32 // The number of uploading tasks
// Uploading goroutines call this function after having downloaded chunks
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)
// Uploading goroutines call this function after having downloaded chunks
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)
}
// CreateChunkUploader creates a chunk uploader.
func CreateChunkUploader(config *Config, storage Storage, snapshotCache *FileStorage, threads int,
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)) *ChunkUploader {
uploader := &ChunkUploader {
config: config,
storage: storage,
snapshotCache: snapshotCache,
threads: threads,
taskQueue: make(chan ChunkUploadTask, 1),
stopChannel: make(chan bool),
completionFunc: completionFunc,
}
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)) *ChunkUploader {
uploader := &ChunkUploader{
config: config,
storage: storage,
snapshotCache: snapshotCache,
threads: threads,
taskQueue: make(chan ChunkUploadTask, 1),
stopChannel: make(chan bool),
completionFunc: completionFunc,
}
return uploader
return uploader
}
// Starts starts uploading goroutines.
func (uploader *ChunkUploader) Start() {
for i := 0; i < uploader.threads; i++ {
go func(threadIndex int) {
defer CatchLogException()
for {
select {
case task := <- uploader.taskQueue:
uploader.Upload(threadIndex, task)
case <- uploader.stopChannel:
return
}
}
} (i)
}
for i := 0; i < uploader.threads; i++ {
go func(threadIndex int) {
defer CatchLogException()
for {
select {
case task := <-uploader.taskQueue:
uploader.Upload(threadIndex, task)
case <-uploader.stopChannel:
return
}
}
}(i)
}
}
// StartChunk sends a chunk to be uploaded to a waiting uploading goroutine. It may block if all uploading goroutines are busy.
func (uploader *ChunkUploader) StartChunk(chunk *Chunk, chunkIndex int) {
atomic.AddInt32(&uploader.numberOfUploadingTasks, 1)
uploader.taskQueue <- ChunkUploadTask {
chunk: chunk,
chunkIndex: chunkIndex,
}
atomic.AddInt32(&uploader.numberOfUploadingTasks, 1)
uploader.taskQueue <- ChunkUploadTask{
chunk: chunk,
chunkIndex: chunkIndex,
}
}
// Stop stops all uploading goroutines.
func (uploader *ChunkUploader) Stop() {
for atomic.LoadInt32(&uploader.numberOfUploadingTasks) > 0 {
time.Sleep(100 * time.Millisecond)
}
for i := 0; i < uploader.threads; i++ {
uploader.stopChannel <- false
}
for atomic.LoadInt32(&uploader.numberOfUploadingTasks) > 0 {
time.Sleep(100 * time.Millisecond)
}
for i := 0; i < uploader.threads; i++ {
uploader.stopChannel <- false
}
}
// Upload is called by the uploading goroutines to perform the actual uploading
func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) bool {
chunk := task.chunk
chunkSize := chunk.GetLength()
chunkID := chunk.GetID()
chunk := task.chunk
chunkSize := chunk.GetLength()
chunkID := chunk.GetID()
// For a snapshot chunk, verify that its chunk id is correct
if uploader.snapshotCache != nil {
chunk.VerifyID()
}
// For a snapshot chunk, verify that its chunk id is correct
if uploader.snapshotCache != nil {
chunk.VerifyID()
}
if uploader.snapshotCache != nil && uploader.storage.IsCacheNeeded() {
// Save a copy to the local snapshot.
chunkPath, exist, _, err := uploader.snapshotCache.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_WARN("UPLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
} else if exist {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s already exists in the snapshot cache", chunkID)
} else if err = uploader.snapshotCache.UploadFile(threadIndex, chunkPath, chunk.GetBytes()); err != nil {
LOG_WARN("UPLOAD_CACHE", "Failed to save the chunk %s to the snapshot cache: %v", chunkID, err)
} else {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been saved to the snapshot cache", chunkID)
}
}
if uploader.snapshotCache != nil && uploader.storage.IsCacheNeeded() {
// Save a copy to the local snapshot.
chunkPath, exist, _, err := uploader.snapshotCache.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_WARN("UPLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
} else if exist {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s already exists in the snapshot cache", chunkID)
} else if err = uploader.snapshotCache.UploadFile(threadIndex, chunkPath, chunk.GetBytes()); err != nil {
LOG_WARN("UPLOAD_CACHE", "Failed to save the chunk %s to the snapshot cache: %v", chunkID, err)
} else {
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been saved to the snapshot cache", chunkID)
}
}
// This returns the path the chunk file should be at.
chunkPath, exist, _, err := uploader.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to find the path for the chunk %s: %v", chunkID, err)
return false
}
// This returns the path the chunk file should be at.
chunkPath, exist, _, err := uploader.storage.FindChunk(threadIndex, chunkID, false)
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to find the path for the chunk %s: %v", chunkID, err)
return false
}
if exist {
// Chunk deduplication by name in effect here.
LOG_DEBUG("CHUNK_DUPLICATE", "Chunk %s already exists", chunkID)
if exist {
// Chunk deduplication by name in effect here.
LOG_DEBUG("CHUNK_DUPLICATE", "Chunk %s already exists", chunkID)
uploader.completionFunc(chunk, task.chunkIndex, true, chunkSize, 0)
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
return false
}
uploader.completionFunc(chunk, task.chunkIndex, true, chunkSize, 0)
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
return false
}
// Encrypt the chunk only after we know that it must be uploaded.
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash())
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to encrypt the chunk %s: %v", chunkID, err)
return false
}
// Encrypt the chunk only after we know that it must be uploaded.
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash())
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to encrypt the chunk %s: %v", chunkID, err)
return false
}
if !uploader.config.dryRun {
err = uploader.storage.UploadFile(threadIndex, chunkPath, chunk.GetBytes())
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to upload the chunk %s: %v", chunkID, err)
return false
}
LOG_DEBUG("CHUNK_UPLOAD", "Chunk %s has been uploaded", chunkID)
} else {
LOG_DEBUG("CHUNK_UPLOAD", "Uploading was skipped for chunk %s", chunkID)
}
if !uploader.config.dryRun {
err = uploader.storage.UploadFile(threadIndex, chunkPath, chunk.GetBytes())
if err != nil {
LOG_ERROR("UPLOAD_CHUNK", "Failed to upload the chunk %s: %v", chunkID, err)
return false
}
LOG_DEBUG("CHUNK_UPLOAD", "Chunk %s has been uploaded", chunkID)
} else {
LOG_DEBUG("CHUNK_UPLOAD", "Uploading was skipped for chunk %s", chunkID)
}
uploader.completionFunc(chunk, task.chunkIndex, false, chunkSize, chunk.GetLength())
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
return true
uploader.completionFunc(chunk, task.chunkIndex, false, chunkSize, chunk.GetLength())
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
return true
}

View File

@@ -5,126 +5,124 @@
package duplicacy
import (
"os"
"time"
"path"
"testing"
"runtime/debug"
"os"
"path"
"runtime/debug"
"testing"
"time"
crypto_rand "crypto/rand"
"math/rand"
crypto_rand "crypto/rand"
"math/rand"
)
func TestUploaderAndDownloader(t *testing.T) {
rand.Seed(time.Now().UnixNano())
setTestingT(t)
SetLoggingLevel(INFO)
rand.Seed(time.Now().UnixNano())
setTestingT(t)
SetLoggingLevel(INFO)
defer func() {
if r := recover(); r != nil {
switch e := r.(type) {
case Exception:
t.Errorf("%s %s", e.LogID, e.Message)
debug.PrintStack()
default:
t.Errorf("%v", e)
debug.PrintStack()
}
}
} ()
defer func() {
if r := recover(); r != nil {
switch e := r.(type) {
case Exception:
t.Errorf("%s %s", e.LogID, e.Message)
debug.PrintStack()
default:
t.Errorf("%v", e)
debug.PrintStack()
}
}
}()
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
t.Logf("storage: %s", testStorageName)
t.Logf("storage: %s", testStorageName)
storage, err := loadStorage(testDir, 1)
if err != nil {
t.Errorf("Failed to create storage: %v", err)
return
}
storage.EnableTestMode()
storage.SetRateLimits(testRateLimit, testRateLimit)
storage, err := loadStorage(testDir, 1)
if err != nil {
t.Errorf("Failed to create storage: %v", err)
return
}
storage.EnableTestMode()
storage.SetRateLimits(testRateLimit, testRateLimit)
for _, dir := range []string { "chunks", "snapshots" } {
err = storage.CreateDirectory(0, dir)
if err != nil {
t.Errorf("Failed to create directory %s: %v", dir, err)
return
}
}
for _, dir := range []string{"chunks", "snapshots"} {
err = storage.CreateDirectory(0, dir)
if err != nil {
t.Errorf("Failed to create directory %s: %v", dir, err)
return
}
}
numberOfChunks := 100
maxChunkSize := 64 * 1024
numberOfChunks := 100
maxChunkSize := 64 * 1024
if testQuickMode {
numberOfChunks = 10
}
if testQuickMode {
numberOfChunks = 10
}
var chunks []*Chunk
var chunks []*Chunk
config := CreateConfig()
config.MinimumChunkSize = 100
config.chunkPool = make(chan *Chunk, numberOfChunks*2)
totalFileSize := 0
config := CreateConfig()
config.MinimumChunkSize = 100
config.chunkPool = make(chan *Chunk, numberOfChunks * 2)
totalFileSize := 0
for i := 0; i < numberOfChunks; i++ {
content := make([]byte, rand.Int()%maxChunkSize+1)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
for i := 0; i < numberOfChunks; i++ {
content := make([]byte, rand.Int() % maxChunkSize + 1)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
chunk := CreateChunk(config, true)
chunk.Reset(true)
chunk.Write(content)
chunks = append(chunks, chunk)
chunk := CreateChunk(config, true)
chunk.Reset(true)
chunk.Write(content)
chunks = append(chunks, chunk)
t.Logf("Chunk: %s, size: %d", chunk.GetID(), chunk.GetLength())
totalFileSize += chunk.GetLength()
}
t.Logf("Chunk: %s, size: %d", chunk.GetID(), chunk.GetLength())
totalFileSize += chunk.GetLength()
}
completionFunc := func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
t.Logf("Chunk %s size %d (%d/%d) uploaded", chunk.GetID(), chunkSize, chunkIndex, len(chunks))
}
completionFunc := func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
t.Logf("Chunk %s size %d (%d/%d) uploaded", chunk.GetID(), chunkSize, chunkIndex, len(chunks))
}
chunkUploader := CreateChunkUploader(config, storage, nil, testThreads, nil)
chunkUploader.completionFunc = completionFunc
chunkUploader.Start()
chunkUploader := CreateChunkUploader(config, storage, nil, testThreads, nil)
chunkUploader.completionFunc = completionFunc
chunkUploader.Start()
for i, chunk := range chunks {
chunkUploader.StartChunk(chunk, i)
}
for i, chunk := range chunks {
chunkUploader.StartChunk(chunk, i)
}
chunkUploader.Stop()
chunkUploader.Stop()
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
chunkDownloader.totalChunkSize = int64(totalFileSize)
for _, chunk := range chunks {
chunkDownloader.AddChunk(chunk.GetHash())
}
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
chunkDownloader.totalChunkSize = int64(totalFileSize)
for i, chunk := range chunks {
downloaded := chunkDownloader.WaitForChunk(i)
if downloaded.GetID() != chunk.GetID() {
t.Error("Uploaded: %s, downloaded: %s", chunk.GetID(), downloaded.GetID())
}
}
for _, chunk := range chunks {
chunkDownloader.AddChunk(chunk.GetHash())
}
chunkDownloader.Stop()
for i, chunk := range chunks {
downloaded := chunkDownloader.WaitForChunk(i)
if downloaded.GetID() != chunk.GetID() {
t.Error("Uploaded: %s, downloaded: %s", chunk.GetID(), downloaded.GetID())
}
}
chunkDownloader.Stop()
for _, file := range listChunks(storage) {
err = storage.DeleteFile(0, "chunks/" + file)
if err != nil {
t.Errorf("Failed to delete the file %s: %v", file, err)
return
}
}
for _, file := range listChunks(storage) {
err = storage.DeleteFile(0, "chunks/"+file)
if err != nil {
t.Errorf("Failed to delete the file %s: %v", file, err)
return
}
}
}

View File

@@ -5,20 +5,20 @@
package duplicacy
import (
"encoding/json"
"bytes"
"os"
"fmt"
"hash"
"runtime"
"runtime/debug"
"sync/atomic"
"crypto/rand"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
"bytes"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"hash"
"os"
"runtime"
"runtime/debug"
"sync/atomic"
blake2 "github.com/minio/blake2b-simd"
blake2 "github.com/minio/blake2b-simd"
)
// If encryption is turned off, use this key for HMAC-SHA256 or chunk ID generation etc.
@@ -29,209 +29,209 @@ var DEFAULT_KEY = []byte("duplicacy")
var DEFAULT_COMPRESSION_LEVEL = 100
type Config struct {
CompressionLevel int `json:"compression-level"`
AverageChunkSize int `json:"average-chunk-size"`
MaximumChunkSize int `json:"max-chunk-size"`
MinimumChunkSize int `json:"min-chunk-size"`
CompressionLevel int `json:"compression-level"`
AverageChunkSize int `json:"average-chunk-size"`
MaximumChunkSize int `json:"max-chunk-size"`
MinimumChunkSize int `json:"min-chunk-size"`
ChunkSeed []byte `json:"chunk-seed"`
ChunkSeed []byte `json:"chunk-seed"`
// Use HMAC-SHA256(hashKey, plaintext) as the chunk hash.
// Use HMAC-SHA256(idKey, chunk hash) as the file name of the chunk
// For chunks, use HMAC-SHA256(chunkKey, chunk hash) as the encryption key
// For files, use HMAC-SHA256(fileKey, file path) as the encryption key
// Use HMAC-SHA256(hashKey, plaintext) as the chunk hash.
// Use HMAC-SHA256(idKey, chunk hash) as the file name of the chunk
// For chunks, use HMAC-SHA256(chunkKey, chunk hash) as the encryption key
// For files, use HMAC-SHA256(fileKey, file path) as the encryption key
// the HMAC-SHA256 key of the chunk data
HashKey []byte `json:"-"`
// the HMAC-SHA256 key of the chunk data
HashKey []byte `json:"-"`
// used to generate an id from the chunk hash
IDKey []byte `json:"-"`
// used to generate an id from the chunk hash
IDKey []byte `json:"-"`
// for encrypting a chunk
ChunkKey []byte `json:"-"`
// for encrypting a chunk
ChunkKey []byte `json:"-"`
// for encrypting a non-chunk file
FileKey []byte `json:"-"`
// for encrypting a non-chunk file
FileKey []byte `json:"-"`
chunkPool chan *Chunk `json:"-"`
numberOfChunks int32
dryRun bool
chunkPool chan *Chunk `json:"-"`
numberOfChunks int32
dryRun bool
}
// Create an alias to avoid recursive calls on Config.MarshalJSON
// Create an alias to avoid recursive calls on Config.MarshalJSON
type aliasedConfig Config
type jsonableConfig struct {
*aliasedConfig
ChunkSeed string `json:"chunk-seed"`
HashKey string `json:"hash-key"`
IDKey string `json:"id-key"`
ChunkKey string `json:"chunk-key"`
FileKey string `json:"file-key"`
*aliasedConfig
ChunkSeed string `json:"chunk-seed"`
HashKey string `json:"hash-key"`
IDKey string `json:"id-key"`
ChunkKey string `json:"chunk-key"`
FileKey string `json:"file-key"`
}
func (config *Config) MarshalJSON() ([] byte, error) {
func (config *Config) MarshalJSON() ([]byte, error) {
return json.Marshal(&jsonableConfig {
aliasedConfig: (*aliasedConfig)(config),
ChunkSeed: hex.EncodeToString(config.ChunkSeed),
HashKey: hex.EncodeToString(config.HashKey),
IDKey: hex.EncodeToString(config.IDKey),
ChunkKey: hex.EncodeToString(config.ChunkKey),
FileKey: hex.EncodeToString(config.FileKey),
})
return json.Marshal(&jsonableConfig{
aliasedConfig: (*aliasedConfig)(config),
ChunkSeed: hex.EncodeToString(config.ChunkSeed),
HashKey: hex.EncodeToString(config.HashKey),
IDKey: hex.EncodeToString(config.IDKey),
ChunkKey: hex.EncodeToString(config.ChunkKey),
FileKey: hex.EncodeToString(config.FileKey),
})
}
func (config *Config) UnmarshalJSON(description []byte) (err error) {
aliased := &jsonableConfig {
aliasedConfig: (*aliasedConfig)(config),
}
aliased := &jsonableConfig{
aliasedConfig: (*aliasedConfig)(config),
}
if err = json.Unmarshal(description, &aliased); err != nil {
return err
}
if err = json.Unmarshal(description, &aliased); err != nil {
return err
}
if config.ChunkSeed, err = hex.DecodeString(aliased.ChunkSeed); err != nil {
return fmt.Errorf("Invalid representation of the chunk seed in the config")
}
if config.HashKey, err = hex.DecodeString(aliased.HashKey); err != nil {
return fmt.Errorf("Invalid representation of the hash key in the config")
}
if config.IDKey, err = hex.DecodeString(aliased.IDKey); err != nil {
return fmt.Errorf("Invalid representation of the id key in the config")
}
if config.ChunkKey, err = hex.DecodeString(aliased.ChunkKey); err != nil {
return fmt.Errorf("Invalid representation of the chunk key in the config")
}
if config.FileKey, err = hex.DecodeString(aliased.FileKey); err != nil {
return fmt.Errorf("Invalid representation of the file key in the config")
}
if config.ChunkSeed, err = hex.DecodeString(aliased.ChunkSeed); err != nil {
return fmt.Errorf("Invalid representation of the chunk seed in the config")
}
if config.HashKey, err = hex.DecodeString(aliased.HashKey); err != nil {
return fmt.Errorf("Invalid representation of the hash key in the config")
}
if config.IDKey, err = hex.DecodeString(aliased.IDKey); err != nil {
return fmt.Errorf("Invalid representation of the id key in the config")
}
if config.ChunkKey, err = hex.DecodeString(aliased.ChunkKey); err != nil {
return fmt.Errorf("Invalid representation of the chunk key in the config")
}
if config.FileKey, err = hex.DecodeString(aliased.FileKey); err != nil {
return fmt.Errorf("Invalid representation of the file key in the config")
}
return nil
return nil
}
func (config *Config) IsCompatiableWith(otherConfig *Config) bool {
return config.CompressionLevel == otherConfig.CompressionLevel &&
config.AverageChunkSize == otherConfig.AverageChunkSize &&
config.MaximumChunkSize == otherConfig.MaximumChunkSize &&
config.MinimumChunkSize == otherConfig.MinimumChunkSize &&
bytes.Equal(config.ChunkSeed, otherConfig.ChunkSeed) &&
bytes.Equal(config.HashKey, otherConfig.HashKey)
return config.CompressionLevel == otherConfig.CompressionLevel &&
config.AverageChunkSize == otherConfig.AverageChunkSize &&
config.MaximumChunkSize == otherConfig.MaximumChunkSize &&
config.MinimumChunkSize == otherConfig.MinimumChunkSize &&
bytes.Equal(config.ChunkSeed, otherConfig.ChunkSeed) &&
bytes.Equal(config.HashKey, otherConfig.HashKey)
}
func (config *Config) Print() {
LOG_INFO("CONFIG_INFO", "Compression level: %d", config.CompressionLevel)
LOG_INFO("CONFIG_INFO", "Average chunk size: %d", config.AverageChunkSize)
LOG_INFO("CONFIG_INFO", "Maximum chunk size: %d", config.MaximumChunkSize)
LOG_INFO("CONFIG_INFO", "Minimum chunk size: %d", config.MinimumChunkSize)
LOG_INFO("CONFIG_INFO", "Chunk seed: %x", config.ChunkSeed)
LOG_INFO("CONFIG_INFO", "Compression level: %d", config.CompressionLevel)
LOG_INFO("CONFIG_INFO", "Average chunk size: %d", config.AverageChunkSize)
LOG_INFO("CONFIG_INFO", "Maximum chunk size: %d", config.MaximumChunkSize)
LOG_INFO("CONFIG_INFO", "Minimum chunk size: %d", config.MinimumChunkSize)
LOG_INFO("CONFIG_INFO", "Chunk seed: %x", config.ChunkSeed)
}
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
isEncrypted bool, copyFrom *Config) (config *Config) {
isEncrypted bool, copyFrom *Config) (config *Config) {
config = &Config {
CompressionLevel: compressionLevel,
AverageChunkSize: averageChunkSize,
MaximumChunkSize: maximumChunkSize,
MinimumChunkSize: mininumChunkSize,
}
config = &Config{
CompressionLevel: compressionLevel,
AverageChunkSize: averageChunkSize,
MaximumChunkSize: maximumChunkSize,
MinimumChunkSize: mininumChunkSize,
}
if isEncrypted {
// Randomly generate keys
keys := make([]byte, 32 * 5)
_, err := rand.Read(keys)
if err != nil {
LOG_ERROR("CONFIG_KEY", "Failed to generate random keys: %v", err)
return nil
}
if isEncrypted {
// Randomly generate keys
keys := make([]byte, 32*5)
_, err := rand.Read(keys)
if err != nil {
LOG_ERROR("CONFIG_KEY", "Failed to generate random keys: %v", err)
return nil
}
config.ChunkSeed = keys[:32]
config.HashKey = keys[32:64]
config.IDKey = keys[64:96]
config.ChunkKey = keys[96:128]
config.FileKey = keys[128:]
} else {
config.ChunkSeed = DEFAULT_KEY
config.HashKey = DEFAULT_KEY
config.IDKey = DEFAULT_KEY
}
config.ChunkSeed = keys[:32]
config.HashKey = keys[32:64]
config.IDKey = keys[64:96]
config.ChunkKey = keys[96:128]
config.FileKey = keys[128:]
} else {
config.ChunkSeed = DEFAULT_KEY
config.HashKey = DEFAULT_KEY
config.IDKey = DEFAULT_KEY
}
if copyFrom != nil {
config.CompressionLevel = copyFrom.CompressionLevel
if copyFrom != nil {
config.CompressionLevel = copyFrom.CompressionLevel
config.AverageChunkSize = copyFrom.AverageChunkSize
config.MaximumChunkSize = copyFrom.MaximumChunkSize
config.MinimumChunkSize = copyFrom.MinimumChunkSize
config.AverageChunkSize = copyFrom.AverageChunkSize
config.MaximumChunkSize = copyFrom.MaximumChunkSize
config.MinimumChunkSize = copyFrom.MinimumChunkSize
config.ChunkSeed = copyFrom.ChunkSeed
config.HashKey = copyFrom.HashKey
}
config.ChunkSeed = copyFrom.ChunkSeed
config.HashKey = copyFrom.HashKey
}
config.chunkPool = make(chan *Chunk, runtime.NumCPU() * 16)
config.chunkPool = make(chan *Chunk, runtime.NumCPU()*16)
return config
return config
}
func CreateConfig() (config *Config) {
return &Config {
HashKey: DEFAULT_KEY,
IDKey: DEFAULT_KEY,
CompressionLevel: DEFAULT_COMPRESSION_LEVEL,
chunkPool: make(chan *Chunk, runtime.NumCPU() * 16),
}
return &Config{
HashKey: DEFAULT_KEY,
IDKey: DEFAULT_KEY,
CompressionLevel: DEFAULT_COMPRESSION_LEVEL,
chunkPool: make(chan *Chunk, runtime.NumCPU()*16),
}
}
func (config *Config) GetChunk() (chunk *Chunk) {
select {
case chunk = <- config.chunkPool :
default:
numberOfChunks := atomic.AddInt32(&config.numberOfChunks, 1)
if numberOfChunks >= int32(runtime.NumCPU() * 16) {
LOG_WARN("CONFIG_CHUNK", "%d chunks have been allocated", numberOfChunks)
if _, found := os.LookupEnv("DUPLICACY_CHUNK_DEBUG"); found {
debug.PrintStack()
}
}
chunk = CreateChunk(config, true)
}
return chunk
select {
case chunk = <-config.chunkPool:
default:
numberOfChunks := atomic.AddInt32(&config.numberOfChunks, 1)
if numberOfChunks >= int32(runtime.NumCPU()*16) {
LOG_WARN("CONFIG_CHUNK", "%d chunks have been allocated", numberOfChunks)
if _, found := os.LookupEnv("DUPLICACY_CHUNK_DEBUG"); found {
debug.PrintStack()
}
}
chunk = CreateChunk(config, true)
}
return chunk
}
func (config *Config) PutChunk(chunk *Chunk){
func (config *Config) PutChunk(chunk *Chunk) {
if chunk == nil {
return
}
if chunk == nil {
return
}
select {
case config.chunkPool <- chunk:
default:
LOG_INFO("CHUNK_BUFFER", "Discarding a free chunk due to a full pool")
}
select {
case config.chunkPool <- chunk:
default:
LOG_INFO("CHUNK_BUFFER", "Discarding a free chunk due to a full pool")
}
}
func (config *Config) NewKeyedHasher(key []byte) hash.Hash {
if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
hasher, err := blake2.New(&blake2.Config{ Size: 32, Key:key })
if err != nil {
LOG_ERROR("HASH_KEY", "Invalid hash key: %x", key)
}
return hasher
} else {
return hmac.New(sha256.New, key)
}
if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
hasher, err := blake2.New(&blake2.Config{Size: 32, Key: key})
if err != nil {
LOG_ERROR("HASH_KEY", "Invalid hash key: %x", key)
}
return hasher
} else {
return hmac.New(sha256.New, key)
}
}
var SkipFileHash = false
func init() {
if value, found := os.LookupEnv("DUPLICACY_SKIP_FILE_HASH"); found && value != "" && value != "0" {
SkipFileHash = true
}
if value, found := os.LookupEnv("DUPLICACY_SKIP_FILE_HASH"); found && value != "" && value != "0" {
SkipFileHash = true
}
}
// Implement a dummy hasher to be used when SkipFileHash is true.
@@ -239,190 +239,189 @@ type DummyHasher struct {
}
func (hasher *DummyHasher) Write(p []byte) (int, error) {
return len(p), nil
return len(p), nil
}
func (hasher *DummyHasher) Sum(b []byte) []byte {
return []byte("")
return []byte("")
}
func (hasher *DummyHasher) Reset() {
}
func (hasher *DummyHasher) Size() int {
return 0
return 0
}
func (hasher *DummyHasher) BlockSize() int {
return 0
return 0
}
func (config *Config) NewFileHasher() hash.Hash {
if SkipFileHash {
return &DummyHasher {}
} else if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
hasher, _ := blake2.New(&blake2.Config{ Size: 32 })
return hasher
} else {
return sha256.New()
}
if SkipFileHash {
return &DummyHasher{}
} else if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
hasher, _ := blake2.New(&blake2.Config{Size: 32})
return hasher
} else {
return sha256.New()
}
}
// Calculate the file hash using the corresponding hasher
func (config *Config) ComputeFileHash(path string, buffer []byte) string {
file, err := os.Open(path)
if err != nil {
return ""
}
file, err := os.Open(path)
if err != nil {
return ""
}
hasher := config.NewFileHasher()
defer file.Close()
hasher := config.NewFileHasher()
defer file.Close()
count := 1
for count > 0 {
count, err = file.Read(buffer)
hasher.Write(buffer[:count])
}
count := 1
for count > 0 {
count, err = file.Read(buffer)
hasher.Write(buffer[:count])
}
return hex.EncodeToString(hasher.Sum(nil))
return hex.EncodeToString(hasher.Sum(nil))
}
// GetChunkIDFromHash creates a chunk id from the chunk hash. The chunk id will be used as the name of the chunk
// file, so it is publicly exposed. The chunk hash is the HMAC-SHA256 of what is contained in the chunk and should
// never be exposed.
func (config *Config) GetChunkIDFromHash(hash string) string {
hasher := config.NewKeyedHasher(config.IDKey)
hasher.Write([]byte(hash))
return hex.EncodeToString(hasher.Sum(nil))
hasher := config.NewKeyedHasher(config.IDKey)
hasher.Write([]byte(hash))
return hex.EncodeToString(hasher.Sum(nil))
}
func DownloadConfig(storage Storage, password string) (config *Config, isEncrypted bool, err error) {
// Although the default key is passed to the function call the key is not actually used since there is no need to
// calculate the hash or id of the config file.
configFile := CreateChunk(CreateConfig(), true)
// Although the default key is passed to the function call the key is not actually used since there is no need to
// calculate the hash or id of the config file.
configFile := CreateChunk(CreateConfig(), true)
exist, _, _, err := storage.GetFileInfo(0, "config")
if err != nil {
return nil, false, err
}
exist, _, _, err := storage.GetFileInfo(0, "config")
if err != nil {
return nil, false, err
}
if !exist {
return nil, false, nil
}
if !exist {
return nil, false, nil
}
err = storage.DownloadFile(0, "config", configFile)
if err != nil {
return nil, false, err
}
err = storage.DownloadFile(0, "config", configFile)
if err != nil {
return nil, false, err
}
var masterKey []byte
var masterKey []byte
if len(password) > 0 {
masterKey = GenerateKeyFromPassword(password)
if len(password) > 0 {
masterKey = GenerateKeyFromPassword(password)
// Decrypt the config file. masterKey == nil means no encryption.
err = configFile.Decrypt(masterKey, "")
if err != nil {
return nil, false, fmt.Errorf("Failed to retrieve the config file: %v", err)
}
}
// Decrypt the config file. masterKey == nil means no encryption.
err = configFile.Decrypt(masterKey, "")
if err != nil {
return nil, false, fmt.Errorf("Failed to retrieve the config file: %v", err)
}
}
config = CreateConfig()
config = CreateConfig()
err = json.Unmarshal(configFile.GetBytes(), config)
err = json.Unmarshal(configFile.GetBytes(), config)
if err != nil {
if bytes.Equal(configFile.GetBytes()[:9], []byte("duplicacy")) {
return nil, true, fmt.Errorf("The storage is likely to have been initialized with a password before")
} else {
return nil, false, fmt.Errorf("Failed to parse the config file: %v", err)
}
}
if err != nil {
if bytes.Equal(configFile.GetBytes()[:9], []byte("duplicacy")) {
return nil, true, fmt.Errorf("The storage is likely to have been initialized with a password before")
} else {
return nil, false, fmt.Errorf("Failed to parse the config file: %v", err)
}
}
return config, false, nil
return config, false, nil
}
func UploadConfig(storage Storage, config *Config, password string) (bool) {
func UploadConfig(storage Storage, config *Config, password string) bool {
// This is the key to encrypt the config file.
var masterKey []byte
// This is the key to encrypt the config file.
var masterKey []byte
if len(password) > 0 {
if len(password) > 0 {
if len(password) < 8 {
LOG_ERROR("CONFIG_PASSWORD", "The password must be at least 8 characters")
return false
}
if len(password) < 8 {
LOG_ERROR("CONFIG_PASSWORD", "The password must be at least 8 characters")
return false
}
masterKey = GenerateKeyFromPassword(password)
}
masterKey = GenerateKeyFromPassword(password)
}
description, err := json.MarshalIndent(config, "", " ")
if err != nil {
LOG_ERROR("CONFIG_MARSHAL", "Failed to marshal the config: %v", err)
return false
}
description, err := json.MarshalIndent(config, "", " ")
if err != nil {
LOG_ERROR("CONFIG_MARSHAL", "Failed to marshal the config: %v", err)
return false
}
// Although the default key is passed to the function call the key is not actually used since there is no need to
// calculate the hash or id of the config file.
chunk := CreateChunk(CreateConfig(), true)
chunk.Write(description)
// Although the default key is passed to the function call the key is not actually used since there is no need to
// calculate the hash or id of the config file.
chunk := CreateChunk(CreateConfig(), true)
chunk.Write(description)
if len(password) > 0 {
// Encrypt the config file with masterKey. If masterKey is nil then no encryption is performed.
err = chunk.Encrypt(masterKey, "")
if len(password) > 0 {
// Encrypt the config file with masterKey. If masterKey is nil then no encryption is performed.
err = chunk.Encrypt(masterKey, "")
if err != nil {
LOG_ERROR("CONFIG_CREATE", "Failed to create the config file: %v", err)
return false
}
}
if err != nil {
LOG_ERROR("CONFIG_CREATE", "Failed to create the config file: %v", err)
return false
}
}
err = storage.UploadFile(0, "config", chunk.GetBytes())
if err != nil {
LOG_ERROR("CONFIG_INIT", "Failed to configure the storage: %v", err)
return false
}
err = storage.UploadFile(0, "config", chunk.GetBytes())
if err != nil {
LOG_ERROR("CONFIG_INIT", "Failed to configure the storage: %v", err)
return false
}
if IsTracing() {
config.Print()
}
if IsTracing() {
config.Print()
}
for _, subDir := range []string {"chunks", "snapshots"} {
err = storage.CreateDirectory(0, subDir)
if err != nil {
LOG_ERROR("CONFIG_MKDIR", "Failed to create storage subdirectory: %v", err)
}
}
for _, subDir := range []string{"chunks", "snapshots"} {
err = storage.CreateDirectory(0, subDir)
if err != nil {
LOG_ERROR("CONFIG_MKDIR", "Failed to create storage subdirectory: %v", err)
}
}
return true
return true
}
// ConfigStorage makes the general storage space available for storing duplicacy format snapshots. In essence,
// it simply creates a file named 'config' that stores various parameters as well as a set of keys if encryption
// is enabled.
func ConfigStorage(storage Storage, compressionLevel int, averageChunkSize int, maximumChunkSize int,
minimumChunkSize int, password string, copyFrom *Config) bool {
minimumChunkSize int, password string, copyFrom *Config) bool {
exist, _, _, err := storage.GetFileInfo(0, "config")
if err != nil {
LOG_ERROR("CONFIG_INIT", "Failed to check if there is an existing config file: %v", err)
return false
}
exist, _, _, err := storage.GetFileInfo(0, "config")
if err != nil {
LOG_ERROR("CONFIG_INIT", "Failed to check if there is an existing config file: %v", err)
return false
}
if exist {
LOG_INFO("CONFIG_EXIST", "The storage has already been configured")
return false
}
if exist {
LOG_INFO("CONFIG_EXIST", "The storage has already been configured")
return false
}
config := CreateConfigFromParameters(compressionLevel, averageChunkSize, maximumChunkSize, minimumChunkSize, len(password) > 0,
copyFrom)
if config == nil {
return false
}
config := CreateConfigFromParameters(compressionLevel, averageChunkSize, maximumChunkSize, minimumChunkSize, len(password) > 0,
copyFrom)
if config == nil {
return false
}
return UploadConfig(storage, config, password)
return UploadConfig(storage, config, password)
}

View File

@@ -5,292 +5,293 @@
package duplicacy
import (
"fmt"
"path"
"strings"
"github.com/gilbertchen/go-dropbox"
"fmt"
"path"
"strings"
"github.com/gilbertchen/go-dropbox"
)
type DropboxStorage struct {
RateLimitedStorage
RateLimitedStorage
clients []*dropbox.Files
storageDir string
clients []*dropbox.Files
storageDir string
}
// CreateDropboxStorage creates a dropbox storage object.
func CreateDropboxStorage(accessToken string, storageDir string, threads int) (storage *DropboxStorage, err error) {
var clients []*dropbox.Files
for i := 0; i < threads; i++ {
client := dropbox.NewFiles(dropbox.NewConfig(accessToken))
clients = append(clients, client)
}
var clients []*dropbox.Files
for i := 0; i < threads; i++ {
client := dropbox.NewFiles(dropbox.NewConfig(accessToken))
clients = append(clients, client)
}
if storageDir == "" || storageDir[0] != '/' {
storageDir = "/" + storageDir
}
if storageDir == "" || storageDir[0] != '/' {
storageDir = "/" + storageDir
}
if len(storageDir) > 1 && storageDir[len(storageDir) - 1] == '/' {
storageDir = storageDir[:len(storageDir) - 1]
}
if len(storageDir) > 1 && storageDir[len(storageDir)-1] == '/' {
storageDir = storageDir[:len(storageDir)-1]
}
storage = &DropboxStorage {
clients: clients,
storageDir: storageDir,
}
storage = &DropboxStorage{
clients: clients,
storageDir: storageDir,
}
err = storage.CreateDirectory(0, "")
if err != nil {
return nil, fmt.Errorf("Can't create storage directory: %v", err)
}
err = storage.CreateDirectory(0, "")
if err != nil {
return nil, fmt.Errorf("Can't create storage directory: %v", err)
}
return storage, nil
return storage, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
func (storage *DropboxStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
if dir != "" && dir[0] != '/' {
dir = "/" + dir
}
if dir != "" && dir[0] != '/' {
dir = "/" + dir
}
if len(dir) > 1 && dir[len(dir) - 1] == '/' {
dir = dir[:len(dir) - 1]
}
if len(dir) > 1 && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}
input := &dropbox.ListFolderInput {
Path : storage.storageDir + dir,
Recursive : false,
IncludeMediaInfo: false,
IncludeDeleted: false,
}
input := &dropbox.ListFolderInput{
Path: storage.storageDir + dir,
Recursive: false,
IncludeMediaInfo: false,
IncludeDeleted: false,
}
output, err := storage.clients[threadIndex].ListFolder(input)
output, err := storage.clients[threadIndex].ListFolder(input)
for {
for {
if err != nil {
return nil, nil, err
}
if err != nil {
return nil, nil, err
}
for _, entry := range output.Entries {
name := entry.Name
if entry.Tag == "folder" {
name += "/"
}
files = append(files, name)
sizes = append(sizes, int64(entry.Size))
}
for _, entry := range output.Entries {
name := entry.Name
if entry.Tag == "folder" {
name += "/"
}
files = append(files, name)
sizes = append(sizes, int64(entry.Size))
}
if output.HasMore {
output, err = storage.clients[threadIndex].ListFolderContinue(
&dropbox.ListFolderContinueInput { Cursor: output.Cursor, })
if output.HasMore {
output, err = storage.clients[threadIndex].ListFolderContinue(
&dropbox.ListFolderContinueInput{Cursor: output.Cursor})
} else {
break
}
} else {
break
}
}
}
return files, sizes, nil
return files, sizes, nil
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *DropboxStorage) DeleteFile(threadIndex int, filePath string) (err error) {
if filePath != "" && filePath[0] != '/' {
filePath = "/" + filePath
}
if filePath != "" && filePath[0] != '/' {
filePath = "/" + filePath
}
input := &dropbox.DeleteInput {
Path: storage.storageDir + filePath,
}
_, err = storage.clients[threadIndex].Delete(input)
if err != nil {
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path_lookup/not_found/") {
return nil
}
}
input := &dropbox.DeleteInput{
Path: storage.storageDir + filePath,
}
_, err = storage.clients[threadIndex].Delete(input)
if err != nil {
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path_lookup/not_found/") {
return nil
}
}
return err
return err
}
// MoveFile renames the file.
func (storage *DropboxStorage) MoveFile(threadIndex int, from string, to string) (err error) {
if from != "" && from[0] != '/' {
from = "/" + from
}
if to != "" && to[0] != '/' {
to = "/" + to
}
input := &dropbox.MoveInput {
FromPath: storage.storageDir + from,
ToPath: storage.storageDir + to,
}
_, err = storage.clients[threadIndex].Move(input)
return err
if from != "" && from[0] != '/' {
from = "/" + from
}
if to != "" && to[0] != '/' {
to = "/" + to
}
input := &dropbox.MoveInput{
FromPath: storage.storageDir + from,
ToPath: storage.storageDir + to,
}
_, err = storage.clients[threadIndex].Move(input)
return err
}
// CreateDirectory creates a new directory.
func (storage *DropboxStorage) CreateDirectory(threadIndex int, dir string) (err error) {
if dir != "" && dir[0] != '/' {
dir = "/" + dir
}
if dir != "" && dir[0] != '/' {
dir = "/" + dir
}
if len(dir) > 1 && dir[len(dir) - 1] == '/' {
dir = dir[:len(dir) - 1]
}
if len(dir) > 1 && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}
input := &dropbox.CreateFolderInput {
Path : storage.storageDir + dir,
}
input := &dropbox.CreateFolderInput{
Path: storage.storageDir + dir,
}
_, err = storage.clients[threadIndex].CreateFolder(input)
if err != nil {
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path/conflict/") {
return nil
}
}
return err
_, err = storage.clients[threadIndex].CreateFolder(input)
if err != nil {
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path/conflict/") {
return nil
}
}
return err
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *DropboxStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
if filePath != "" && filePath[0] != '/' {
filePath = "/" + filePath
}
if filePath != "" && filePath[0] != '/' {
filePath = "/" + filePath
}
input := &dropbox.GetMetadataInput {
Path: storage.storageDir + filePath,
IncludeMediaInfo: false,
}
input := &dropbox.GetMetadataInput{
Path: storage.storageDir + filePath,
IncludeMediaInfo: false,
}
output, err := storage.clients[threadIndex].GetMetadata(input)
if err != nil {
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path/not_found/") {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
output, err := storage.clients[threadIndex].GetMetadata(input)
if err != nil {
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path/not_found/") {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
return true, output.Tag == "folder", int64(output.Size), nil
return true, output.Tag == "folder", int64(output.Size), nil
}
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
// the suffix '.fsl'.
func (storage *DropboxStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
dir := "/chunks"
dir := "/chunks"
suffix := ""
if isFossil {
suffix = ".fsl"
}
suffix := ""
if isFossil {
suffix = ".fsl"
}
// The minimum level of directories to dive into before searching for the chunk file.
minimumLevel := 1
// The minimum level of directories to dive into before searching for the chunk file.
minimumLevel := 1
for level := 0; level * 2 < len(chunkID); level ++ {
if level >= minimumLevel {
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
var size int64
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
if err != nil {
return "", false, 0, err
}
if exist {
return filePath, exist, size, nil
}
}
for level := 0; level*2 < len(chunkID); level++ {
if level >= minimumLevel {
filePath = path.Join(dir, chunkID[2*level:]) + suffix
var size int64
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
if err != nil {
return "", false, 0, err
}
if exist {
return filePath, exist, size, nil
}
}
// Find the subdirectory the chunk file may reside.
subDir := path.Join(dir, chunkID[2 * level: 2 * level + 2])
exist, _, _, err = storage.GetFileInfo(threadIndex, subDir)
if err != nil {
return "", false, 0, err
}
// Find the subdirectory the chunk file may reside.
subDir := path.Join(dir, chunkID[2*level:2*level+2])
exist, _, _, err = storage.GetFileInfo(threadIndex, subDir)
if err != nil {
return "", false, 0, err
}
if exist {
dir = subDir
continue
}
if exist {
dir = subDir
continue
}
if level < minimumLevel {
// Create the subdirectory if it doesn't exist.
err = storage.CreateDirectory(threadIndex, subDir)
if err != nil {
return "", false, 0, err
}
if level < minimumLevel {
// Create the subdirectory if it doesn't exist.
err = storage.CreateDirectory(threadIndex, subDir)
if err != nil {
return "", false, 0, err
}
dir = subDir
continue
}
dir = subDir
continue
}
// Teh chunk must be under this subdirectory but it doesn't exist.
return path.Join(dir, chunkID[2 * level:])[1:] + suffix, false, 0, nil
// Teh chunk must be under this subdirectory but it doesn't exist.
return path.Join(dir, chunkID[2*level:])[1:] + suffix, false, 0, nil
}
}
LOG_FATAL("CHUNK_FIND", "Chunk %s is still not found after having searched a maximum level of directories",
chunkID)
return "", false, 0, nil
LOG_FATAL("CHUNK_FIND", "Chunk %s is still not found after having searched a maximum level of directories",
chunkID)
return "", false, 0, nil
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *DropboxStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
if filePath != "" && filePath[0] != '/' {
filePath = "/" + filePath
}
if filePath != "" && filePath[0] != '/' {
filePath = "/" + filePath
}
input := &dropbox.DownloadInput {
Path: storage.storageDir + filePath,
}
input := &dropbox.DownloadInput{
Path: storage.storageDir + filePath,
}
output, err := storage.clients[threadIndex].Download(input)
if err != nil {
return err
}
output, err := storage.clients[threadIndex].Download(input)
if err != nil {
return err
}
defer output.Body.Close()
defer output.Body.Close()
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit / len(storage.clients))
return err
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.clients))
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *DropboxStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
if filePath != "" && filePath[0] != '/' {
filePath = "/" + filePath
}
if filePath != "" && filePath[0] != '/' {
filePath = "/" + filePath
}
input := &dropbox.UploadInput {
Path: storage.storageDir + filePath,
Mode: dropbox.WriteModeOverwrite,
AutoRename: false,
Mute: true,
Reader: CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.clients)),
}
input := &dropbox.UploadInput{
Path: storage.storageDir + filePath,
Mode: dropbox.WriteModeOverwrite,
AutoRename: false,
Mute: true,
Reader: CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.clients)),
}
_, err = storage.clients[threadIndex].Upload(input)
return err
_, err = storage.clients[threadIndex].Upload(input)
return err
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *DropboxStorage) IsCacheNeeded() (bool) { return true }
func (storage *DropboxStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *DropboxStorage) IsMoveFileImplemented() (bool) { return true }
func (storage *DropboxStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *DropboxStorage) IsStrongConsistent() (bool) { return false }
func (storage *DropboxStorage) IsStrongConsistent() bool { return false }
// If the storage supports fast listing of files names.
func (storage *DropboxStorage) IsFastListing() (bool) { return false }
func (storage *DropboxStorage) IsFastListing() bool { return false }
// Enable the test mode.
func (storage *DropboxStorage) EnableTestMode() {}

View File

@@ -4,22 +4,20 @@
package duplicacy
import (
"os"
"fmt"
"path/filepath"
"io/ioutil"
"sort"
"regexp"
"strconv"
"time"
"encoding/json"
"encoding/base64"
"strings"
"runtime"
"encoding/base64"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
)
// This is the hidden directory in the repository for storing various files.
var DUPLICACY_DIRECTORY = ".duplicacy"
var DUPLICACY_FILE = ".duplicacy"
@@ -29,546 +27,545 @@ var contentRegex = regexp.MustCompile(`^([0-9]+):([0-9]+):([0-9]+):([0-9]+)`)
// Entry encapsulates information about a file or directory.
type Entry struct {
Path string
Size int64
Time int64
Mode uint32
Link string
Hash string
Path string
Size int64
Time int64
Mode uint32
Link string
Hash string
UID int
GID int
UID int
GID int
StartChunk int
StartOffset int
EndChunk int
EndOffset int
StartChunk int
StartOffset int
EndChunk int
EndOffset int
Attributes map[string][]byte
Attributes map[string][]byte
}
// CreateEntry creates an entry from file properties.
func CreateEntry(path string, size int64, time int64, mode uint32) *Entry {
if len(path) > 0 && path[len(path) - 1] != '/' && (mode & uint32(os.ModeDir)) != 0 {
path += "/"
}
if len(path) > 0 && path[len(path)-1] != '/' && (mode&uint32(os.ModeDir)) != 0 {
path += "/"
}
return &Entry {
Path : path,
Size : size,
Time : time,
Mode : mode,
return &Entry{
Path: path,
Size: size,
Time: time,
Mode: mode,
UID : -1,
GID : -1,
}
UID: -1,
GID: -1,
}
}
// CreateEntryFromFileInfo creates an entry from a 'FileInfo' object.
func CreateEntryFromFileInfo(fileInfo os.FileInfo, directory string) *Entry {
path := directory + fileInfo.Name()
path := directory + fileInfo.Name()
mode := fileInfo.Mode()
mode := fileInfo.Mode()
if mode & os.ModeDir != 0 && mode & os.ModeSymlink != 0 {
mode ^= os.ModeDir
}
if mode&os.ModeDir != 0 && mode&os.ModeSymlink != 0 {
mode ^= os.ModeDir
}
if path[len(path) - 1] != '/' && mode & os.ModeDir != 0 {
path += "/"
}
if path[len(path)-1] != '/' && mode&os.ModeDir != 0 {
path += "/"
}
entry := &Entry {
Path: path,
Size: fileInfo.Size(),
Time: fileInfo.ModTime().Unix(),
Mode: uint32(mode),
}
entry := &Entry{
Path: path,
Size: fileInfo.Size(),
Time: fileInfo.ModTime().Unix(),
Mode: uint32(mode),
}
GetOwner(entry, &fileInfo)
GetOwner(entry, &fileInfo)
return entry
return entry
}
// CreateEntryFromJSON creates an entry from a json description.
func (entry *Entry) UnmarshalJSON(description []byte) (err error) {
var object map[string]interface {}
var object map[string]interface{}
err = json.Unmarshal(description, &object)
if err != nil {
return err
}
err = json.Unmarshal(description, &object)
if err != nil {
return err
}
var value interface {}
var ok bool
var value interface{}
var ok bool
if value, ok = object["name"]; ok {
pathInBase64, ok := value.(string)
if !ok {
return fmt.Errorf("Name is not a string for a file in the snapshot")
}
path, err := base64.StdEncoding.DecodeString(pathInBase64)
if err != nil {
return fmt.Errorf("Invalid name '%s' in the snapshot", pathInBase64)
}
entry.Path = string(path)
} else if value, ok = object["path"]; !ok {
return fmt.Errorf("Path is not specified for a file in the snapshot")
} else if entry.Path, ok = value.(string); !ok {
return fmt.Errorf("Path is not a string for a file in the snapshot")
}
if value, ok = object["name"]; ok {
pathInBase64, ok := value.(string)
if !ok {
return fmt.Errorf("Name is not a string for a file in the snapshot")
}
path, err := base64.StdEncoding.DecodeString(pathInBase64)
if err != nil {
return fmt.Errorf("Invalid name '%s' in the snapshot", pathInBase64)
}
entry.Path = string(path)
} else if value, ok = object["path"]; !ok {
return fmt.Errorf("Path is not specified for a file in the snapshot")
} else if entry.Path, ok = value.(string); !ok {
return fmt.Errorf("Path is not a string for a file in the snapshot")
}
if value, ok = object["size"]; !ok {
return fmt.Errorf("Size is not specified for file '%s' in the snapshot", entry.Path)
} else if _, ok = value.(float64); !ok {
return fmt.Errorf("Size is not a valid integer for file '%s' in the snapshot", entry.Path)
}
entry.Size = int64(value.(float64))
if value, ok = object["size"]; !ok {
return fmt.Errorf("Size is not specified for file '%s' in the snapshot", entry.Path)
} else if _, ok = value.(float64); !ok {
return fmt.Errorf("Size is not a valid integer for file '%s' in the snapshot", entry.Path)
}
entry.Size = int64(value.(float64))
if value, ok = object["time"]; !ok {
return fmt.Errorf("Time is not specified for file '%s' in the snapshot", entry.Path)
} else if _, ok = value.(float64); !ok {
return fmt.Errorf("Time is not a valid integer for file '%s' in the snapshot", entry.Path)
}
entry.Time = int64(value.(float64))
if value, ok = object["time"]; !ok {
return fmt.Errorf("Time is not specified for file '%s' in the snapshot", entry.Path)
} else if _, ok = value.(float64); !ok {
return fmt.Errorf("Time is not a valid integer for file '%s' in the snapshot", entry.Path)
}
entry.Time = int64(value.(float64))
if value, ok = object["mode"]; !ok {
return fmt.Errorf("float64 is not specified for file '%s' in the snapshot", entry.Path)
} else if _, ok = value.(float64); !ok {
return fmt.Errorf("Mode is not a valid integer for file '%s' in the snapshot", entry.Path)
}
entry.Mode = uint32(value.(float64))
if value, ok = object["mode"]; !ok {
return fmt.Errorf("float64 is not specified for file '%s' in the snapshot", entry.Path)
} else if _, ok = value.(float64); !ok {
return fmt.Errorf("Mode is not a valid integer for file '%s' in the snapshot", entry.Path)
}
entry.Mode = uint32(value.(float64))
if value, ok = object["hash"]; !ok {
return fmt.Errorf("Hash is not specified for file '%s' in the snapshot", entry.Path)
} else if entry.Hash, ok = value.(string); !ok {
return fmt.Errorf("Hash is not a string for file '%s' in the snapshot", entry.Path)
}
if value, ok = object["hash"]; !ok {
return fmt.Errorf("Hash is not specified for file '%s' in the snapshot", entry.Path)
} else if entry.Hash, ok = value.(string); !ok {
return fmt.Errorf("Hash is not a string for file '%s' in the snapshot", entry.Path)
}
if value, ok = object["link"]; ok {
var link string
if link, ok = value.(string); !ok {
return fmt.Errorf("Symlink is not a valid string for file '%s' in the snapshot", entry.Path)
}
entry.Link = link
}
if value, ok = object["link"]; ok {
var link string
if link, ok = value.(string); !ok {
return fmt.Errorf("Symlink is not a valid string for file '%s' in the snapshot", entry.Path)
}
entry.Link = link
}
entry.UID = -1
if value, ok = object["uid"]; ok {
if _, ok = value.(float64); ok {
entry.UID = int(value.(float64))
}
}
entry.UID = -1
if value, ok = object["uid"]; ok {
if _, ok = value.(float64); ok {
entry.UID = int(value.(float64))
}
}
entry.GID = -1
if value, ok = object["gid"]; ok {
if _, ok = value.(float64); ok {
entry.GID = int(value.(float64))
}
}
entry.GID = -1
if value, ok = object["gid"]; ok {
if _, ok = value.(float64); ok {
entry.GID = int(value.(float64))
}
}
if value, ok = object["attributes"]; ok {
if attributes, ok := value.(map[string]interface {}); !ok {
return fmt.Errorf("Attributes are invalid for file '%s' in the snapshot", entry.Path)
} else {
entry.Attributes = make(map[string][]byte)
for name, object := range attributes {
if object == nil {
entry.Attributes[name] = []byte("")
} else if attributeInBase64, ok := object.(string); !ok {
return fmt.Errorf("Attribute '%s' is invalid for file '%s' in the snapshot", name, entry.Path)
} else if attribute, err := base64.StdEncoding.DecodeString(attributeInBase64); err != nil {
return fmt.Errorf("Failed to decode attribute '%s' for file '%s' in the snapshot: %v",
name, entry.Path, err)
} else {
entry.Attributes[name] = attribute
}
}
}
}
if value, ok = object["attributes"]; ok {
if attributes, ok := value.(map[string]interface{}); !ok {
return fmt.Errorf("Attributes are invalid for file '%s' in the snapshot", entry.Path)
} else {
entry.Attributes = make(map[string][]byte)
for name, object := range attributes {
if object == nil {
entry.Attributes[name] = []byte("")
} else if attributeInBase64, ok := object.(string); !ok {
return fmt.Errorf("Attribute '%s' is invalid for file '%s' in the snapshot", name, entry.Path)
} else if attribute, err := base64.StdEncoding.DecodeString(attributeInBase64); err != nil {
return fmt.Errorf("Failed to decode attribute '%s' for file '%s' in the snapshot: %v",
name, entry.Path, err)
} else {
entry.Attributes[name] = attribute
}
}
}
}
if entry.IsFile() && entry.Size > 0 {
if value, ok = object["content"]; !ok {
return fmt.Errorf("Content is not specified for file '%s' in the snapshot", entry.Path)
}
if entry.IsFile() && entry.Size > 0 {
if value, ok = object["content"]; !ok {
return fmt.Errorf("Content is not specified for file '%s' in the snapshot", entry.Path)
}
if content, ok := value.(string); !ok {
return fmt.Errorf("Content is invalid for file '%s' in the snapshot", entry.Path)
} else {
if content, ok := value.(string); !ok {
return fmt.Errorf("Content is invalid for file '%s' in the snapshot", entry.Path)
} else {
matched := contentRegex.FindStringSubmatch(content)
if matched == nil {
return fmt.Errorf("Content is specified in a wrong format for file '%s' in the snapshot", entry.Path)
}
matched := contentRegex.FindStringSubmatch(content)
if matched == nil {
return fmt.Errorf("Content is specified in a wrong format for file '%s' in the snapshot", entry.Path)
}
entry.StartChunk, _ = strconv.Atoi(matched[1])
entry.StartOffset, _ = strconv.Atoi(matched[2])
entry.EndChunk, _ = strconv.Atoi(matched[3])
entry.EndOffset, _ = strconv.Atoi(matched[4])
}
}
entry.StartChunk, _ = strconv.Atoi(matched[1])
entry.StartOffset, _ = strconv.Atoi(matched[2])
entry.EndChunk, _ = strconv.Atoi(matched[3])
entry.EndOffset, _ = strconv.Atoi(matched[4])
}
}
return nil
return nil
}
func (entry *Entry) convertToObject(encodeName bool) map[string]interface{} {
object := make(map[string]interface{})
object := make(map[string]interface{})
if encodeName {
object["name"] = base64.StdEncoding.EncodeToString([]byte(entry.Path))
} else {
object["path"] = entry.Path
}
object["size"] = entry.Size
object["time"] = entry.Time
object["mode"] = entry.Mode
object["hash"] = entry.Hash
if encodeName {
object["name"] = base64.StdEncoding.EncodeToString([]byte(entry.Path))
} else {
object["path"] = entry.Path
}
object["size"] = entry.Size
object["time"] = entry.Time
object["mode"] = entry.Mode
object["hash"] = entry.Hash
if entry.IsLink() {
object["link"] = entry.Link
}
if entry.IsLink() {
object["link"] = entry.Link
}
if entry.IsFile() && entry.Size > 0 {
object["content"] = fmt.Sprintf("%d:%d:%d:%d",
entry.StartChunk, entry.StartOffset, entry.EndChunk, entry.EndOffset)
}
if entry.IsFile() && entry.Size > 0 {
object["content"] = fmt.Sprintf("%d:%d:%d:%d",
entry.StartChunk, entry.StartOffset, entry.EndChunk, entry.EndOffset)
}
if entry.UID != -1 && entry.GID != -1 {
object["uid"] = entry.UID
object["gid"] = entry.GID
}
if entry.UID != -1 && entry.GID != -1 {
object["uid"] = entry.UID
object["gid"] = entry.GID
}
if len(entry.Attributes) > 0 {
object["attributes"] = entry.Attributes
}
if len(entry.Attributes) > 0 {
object["attributes"] = entry.Attributes
}
return object
return object
}
// MarshalJSON returns the json description of an entry.
func (entry *Entry) MarshalJSON() ([] byte, error) {
func (entry *Entry) MarshalJSON() ([]byte, error) {
object := entry.convertToObject(true)
description, err := json.Marshal(object)
return description, err
object := entry.convertToObject(true)
description, err := json.Marshal(object)
return description, err
}
func (entry *Entry) IsFile() bool {
return entry.Mode & uint32(os.ModeType) == 0
return entry.Mode&uint32(os.ModeType) == 0
}
func (entry *Entry) IsDir() bool {
return entry.Mode & uint32(os.ModeDir) != 0
return entry.Mode&uint32(os.ModeDir) != 0
}
func (entry *Entry) IsLink() bool {
return entry.Mode & uint32(os.ModeSymlink) != 0
return entry.Mode&uint32(os.ModeSymlink) != 0
}
func (entry *Entry) GetPermissions() os.FileMode {
return os.FileMode(entry.Mode) & os.ModePerm
return os.FileMode(entry.Mode) & os.ModePerm
}
func (entry *Entry) IsSameAs(other *Entry) bool {
return entry.Size == other.Size && entry.Time <= other.Time + 1 && entry.Time >= other.Time - 1
return entry.Size == other.Size && entry.Time <= other.Time+1 && entry.Time >= other.Time-1
}
func (entry *Entry) IsSameAsFileInfo(other os.FileInfo) bool {
time := other.ModTime().Unix()
return entry.Size == other.Size() && entry.Time <= time + 1 && entry.Time >= time - 1
time := other.ModTime().Unix()
return entry.Size == other.Size() && entry.Time <= time+1 && entry.Time >= time-1
}
func (entry *Entry) String(maxSizeDigits int) string {
modifiedTime := time.Unix(entry.Time, 0).Format("2006-01-02 15:04:05")
return fmt.Sprintf("%*d %s %64s %s", maxSizeDigits, entry.Size, modifiedTime, entry.Hash, entry.Path)
modifiedTime := time.Unix(entry.Time, 0).Format("2006-01-02 15:04:05")
return fmt.Sprintf("%*d %s %64s %s", maxSizeDigits, entry.Size, modifiedTime, entry.Hash, entry.Path)
}
func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo) bool {
if fileInfo == nil {
stat, err := os.Stat(fullPath)
fileInfo = &stat
if err != nil {
LOG_ERROR("RESTORE_STAT", "Failed to retrieve the file info: %v", err)
return false
}
}
if fileInfo == nil {
stat, err := os.Stat(fullPath)
fileInfo = &stat
if err != nil {
LOG_ERROR("RESTORE_STAT", "Failed to retrieve the file info: %v", err)
return false
}
}
if (*fileInfo).Mode() & os.ModePerm != entry.GetPermissions() {
err := os.Chmod(fullPath, entry.GetPermissions())
if err != nil {
LOG_ERROR("RESTORE_CHMOD", "Failed to set the file permissions: %v", err)
return false
}
}
if (*fileInfo).Mode()&os.ModePerm != entry.GetPermissions() {
err := os.Chmod(fullPath, entry.GetPermissions())
if err != nil {
LOG_ERROR("RESTORE_CHMOD", "Failed to set the file permissions: %v", err)
return false
}
}
if (*fileInfo).ModTime().Unix() != entry.Time {
modifiedTime := time.Unix(entry.Time, 0)
err := os.Chtimes(fullPath, modifiedTime, modifiedTime)
if err != nil {
LOG_ERROR("RESTORE_CHTIME", "Failed to set the modification time: %v", err)
return false
}
}
if (*fileInfo).ModTime().Unix() != entry.Time {
modifiedTime := time.Unix(entry.Time, 0)
err := os.Chtimes(fullPath, modifiedTime, modifiedTime)
if err != nil {
LOG_ERROR("RESTORE_CHTIME", "Failed to set the modification time: %v", err)
return false
}
}
if len(entry.Attributes) > 0 {
entry.SetAttributesToFile(fullPath)
}
if len(entry.Attributes) > 0 {
entry.SetAttributesToFile(fullPath)
}
return SetOwner(fullPath, entry, fileInfo)
return SetOwner(fullPath, entry, fileInfo)
}
// Return -1 if 'left' should appear before 'right', 1 if opposite, and 0 if they are the same.
// Files are always arranged before subdirectories under the same parent directory.
func (left *Entry) Compare(right *Entry) int {
path1 := left.Path
path2 := right.Path
path1 := left.Path
path2 := right.Path
p := 0
for ; p < len(path1) && p < len(path2); p++ {
if path1[p] != path2[p] {
break
}
}
p := 0
for ; p < len(path1) && p < len(path2); p++ {
if path1[p] != path2[p] {
break
}
}
// c1, c2 is the first byte that differs
var c1, c2 byte
if p < len(path1) {
c1 = path1[p]
}
if p < len(path2) {
c2 = path2[p]
}
// c1, c2 is the first byte that differs
var c1, c2 byte
if p < len(path1) {
c1 = path1[p]
}
if p < len(path2) {
c2 = path2[p]
}
// c3, c4 indicates how the current component ends
// c3 == '/': the current component is a directory
// c3 != '/': the current component is the last one
c3 := c1
for i := p; c3 != '/' && i < len(path1); i++ {
c3 = path1[i]
}
// c3, c4 indicates how the current component ends
// c3 == '/': the current component is a directory
// c3 != '/': the current component is the last one
c3 := c1
for i := p; c3 != '/' && i < len(path1); i++ {
c3 = path1[i]
}
c4 := c2
for i := p; c4 != '/' && i < len(path2); i++ {
c4 = path2[i]
}
c4 := c2
for i := p; c4 != '/' && i < len(path2); i++ {
c4 = path2[i]
}
if c3 == '/' {
if c4 == '/' {
// We are comparing two directory components
if c1 == '/' {
// left is shorter
// Note that c2 maybe smaller than c1 but c1 is '/' which is counted
// as 0
return -1
} else if c2 == '/' {
// right is shorter
return 1
} else {
return int(c1) - int(c2)
}
} else {
return 1
}
} else {
// We're at the last component of left and left is a file
if c4 == '/' {
// the current component of right is a directory
return -1
} else {
return int(c1) - int(c2)
}
}
if c3 == '/' {
if c4 == '/' {
// We are comparing two directory components
if c1 == '/' {
// left is shorter
// Note that c2 maybe smaller than c1 but c1 is '/' which is counted
// as 0
return -1
} else if c2 == '/' {
// right is shorter
return 1
} else {
return int(c1) - int(c2)
}
} else {
return 1
}
} else {
// We're at the last component of left and left is a file
if c4 == '/' {
// the current component of right is a directory
return -1
} else {
return int(c1) - int(c2)
}
}
}
// This is used to sort entries by their names.
type ByName []*Entry
func (entries ByName) Len() int { return len(entries) }
func (entries ByName) Len() int { return len(entries) }
func (entries ByName) Swap(i, j int) { entries[i], entries[j] = entries[j], entries[i] }
func (entries ByName) Less(i, j int) bool {
return entries[i].Compare(entries[j]) < 0
return entries[i].Compare(entries[j]) < 0
}
// This is used to sort entries by their starting chunks (and starting offsets if the starting chunks are the same).
type ByChunk []*Entry
func (entries ByChunk) Len() int { return len(entries) }
func (entries ByChunk) Len() int { return len(entries) }
func (entries ByChunk) Swap(i, j int) { entries[i], entries[j] = entries[j], entries[i] }
func (entries ByChunk) Less(i, j int) bool {
return entries[i].StartChunk < entries[j].StartChunk ||
(entries[i].StartChunk == entries[j].StartChunk && entries[i].StartOffset < entries[j].StartOffset)
return entries[i].StartChunk < entries[j].StartChunk ||
(entries[i].StartChunk == entries[j].StartChunk && entries[i].StartOffset < entries[j].StartOffset)
}
// This is used to sort FileInfo objects.
type FileInfoCompare []os.FileInfo
func (files FileInfoCompare) Len() int { return len(files) }
func (files FileInfoCompare) Len() int { return len(files) }
func (files FileInfoCompare) Swap(i, j int) { files[i], files[j] = files[j], files[i] }
func (files FileInfoCompare) Less(i, j int) bool {
left := files[i]
right := files[j]
left := files[i]
right := files[j]
if left.IsDir() && left.Mode() & os.ModeSymlink == 0 {
if right.IsDir() && right.Mode() & os.ModeSymlink == 0 {
return left.Name() < right.Name()
} else {
return false
}
} else {
if right.IsDir() && right.Mode() & os.ModeSymlink == 0 {
return true
} else {
return left.Name() < right.Name()
}
}
if left.IsDir() && left.Mode()&os.ModeSymlink == 0 {
if right.IsDir() && right.Mode()&os.ModeSymlink == 0 {
return left.Name() < right.Name()
} else {
return false
}
} else {
if right.IsDir() && right.Mode()&os.ModeSymlink == 0 {
return true
} else {
return left.Name() < right.Name()
}
}
}
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files.
func ListEntries(top string, path string, fileList *[]*Entry, patterns [] string, discardAttributes bool) (directoryList []*Entry,
skippedFiles [] string, err error) {
func ListEntries(top string, path string, fileList *[]*Entry, patterns []string, discardAttributes bool) (directoryList []*Entry,
skippedFiles []string, err error) {
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
fullPath := joinPath(top, path)
fullPath := joinPath(top, path)
files := make([]os.FileInfo, 0, 1024)
files := make([]os.FileInfo, 0, 1024)
files, err = ioutil.ReadDir(fullPath)
if err != nil {
return directoryList, nil, err
}
files, err = ioutil.ReadDir(fullPath)
if err != nil {
return directoryList, nil, err
}
normalizedPath := path
if len(normalizedPath) > 0 && normalizedPath[len(normalizedPath) - 1] != '/' {
normalizedPath += "/"
}
normalizedPath := path
if len(normalizedPath) > 0 && normalizedPath[len(normalizedPath)-1] != '/' {
normalizedPath += "/"
}
normalizedTop := top
if normalizedTop != "" && normalizedTop[len(normalizedTop) - 1] != '/' {
normalizedTop += "/"
}
normalizedTop := top
if normalizedTop != "" && normalizedTop[len(normalizedTop)-1] != '/' {
normalizedTop += "/"
}
sort.Sort(FileInfoCompare(files))
sort.Sort(FileInfoCompare(files))
entries := make([]*Entry, 0, 4)
entries := make([]*Entry, 0, 4)
for _, f := range files {
if f.Name() == DUPLICACY_DIRECTORY {
continue
}
entry := CreateEntryFromFileInfo(f, normalizedPath)
if len(patterns) > 0 && !MatchPath(entry.Path, patterns) {
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded", entry.Path)
continue
}
if entry.IsLink() {
isRegular := false
isRegular, entry.Link, err = Readlink(filepath.Join(top, entry.Path))
if err != nil {
LOG_WARN("LIST_LINK", "Failed to read the symlink %s: %v", entry.Path, err )
skippedFiles = append(skippedFiles, entry.Path)
continue
}
for _, f := range files {
if f.Name() == DUPLICACY_DIRECTORY {
continue
}
entry := CreateEntryFromFileInfo(f, normalizedPath)
if len(patterns) > 0 && !MatchPath(entry.Path, patterns) {
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded", entry.Path)
continue
}
if entry.IsLink() {
isRegular := false
isRegular, entry.Link, err = Readlink(filepath.Join(top, entry.Path))
if err != nil {
LOG_WARN("LIST_LINK", "Failed to read the symlink %s: %v", entry.Path, err)
skippedFiles = append(skippedFiles, entry.Path)
continue
}
if isRegular {
entry.Mode ^= uint32(os.ModeSymlink)
} else if path == "" && filepath.IsAbs(entry.Link) && !strings.HasPrefix(entry.Link, normalizedTop) {
stat, err := os.Stat(filepath.Join(top, entry.Path))
if err != nil {
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err )
skippedFiles = append(skippedFiles, entry.Path)
continue
}
if isRegular {
entry.Mode ^= uint32(os.ModeSymlink)
} else if path == "" && filepath.IsAbs(entry.Link) && !strings.HasPrefix(entry.Link, normalizedTop) {
stat, err := os.Stat(filepath.Join(top, entry.Path))
if err != nil {
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err)
skippedFiles = append(skippedFiles, entry.Path)
continue
}
newEntry := CreateEntryFromFileInfo(stat, "")
if runtime.GOOS == "windows" {
// On Windows, stat.Name() is the last component of the target, so we need to construct the correct
// path from f.Name(); note that a "/" is append assuming a symbolic link is always a directory
newEntry.Path = filepath.Join(normalizedPath, f.Name()) + "/"
}
entry = newEntry
}
}
newEntry := CreateEntryFromFileInfo(stat, "")
if runtime.GOOS == "windows" {
// On Windows, stat.Name() is the last component of the target, so we need to construct the correct
// path from f.Name(); note that a "/" is append assuming a symbolic link is always a directory
newEntry.Path = filepath.Join(normalizedPath, f.Name()) + "/"
}
entry = newEntry
}
}
if !discardAttributes {
entry.ReadAttributes(top)
}
if !discardAttributes {
entry.ReadAttributes(top)
}
if f.Mode() & (os.ModeNamedPipe | os.ModeSocket | os.ModeDevice) != 0 {
LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
skippedFiles = append(skippedFiles, entry.Path)
continue
}
if f.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
skippedFiles = append(skippedFiles, entry.Path)
continue
}
entries = append(entries, entry)
}
entries = append(entries, entry)
}
// For top level directory we need to sort again because symlinks may have been changed
if path == "" {
sort.Sort(ByName(entries))
}
// For top level directory we need to sort again because symlinks may have been changed
if path == "" {
sort.Sort(ByName(entries))
}
for _, entry := range entries {
if entry.IsDir() {
directoryList = append(directoryList, entry)
} else {
*fileList = append(*fileList, entry)
}
}
for _, entry := range entries {
if entry.IsDir() {
directoryList = append(directoryList, entry)
} else {
*fileList = append(*fileList, entry)
}
}
for i, j := 0, len(directoryList) - 1; i < j; i, j = i + 1, j - 1 {
directoryList[i], directoryList[j] = directoryList[j], directoryList[i]
}
for i, j := 0, len(directoryList)-1; i < j; i, j = i+1, j-1 {
directoryList[i], directoryList[j] = directoryList[j], directoryList[i]
}
return directoryList, skippedFiles, nil
return directoryList, skippedFiles, nil
}
// Diff returns how many bytes remain unmodifiled between two files.
func (entry *Entry) Diff(chunkHashes[]string, chunkLengths[]int,
otherHashes[]string, otherLengths [] int) (modifiedLength int64) {
func (entry *Entry) Diff(chunkHashes []string, chunkLengths []int,
otherHashes []string, otherLengths []int) (modifiedLength int64) {
var offset1, offset2 int64
i1 := entry.StartChunk
i2 := 0
for i1 <= entry.EndChunk && i2 < len(otherHashes) {
var offset1, offset2 int64
i1 := entry.StartChunk
i2 := 0
for i1 <= entry.EndChunk && i2 < len(otherHashes) {
start := 0
if i1 == entry.StartChunk {
start = entry.StartOffset
}
end := chunkLengths[i1]
if i1 == entry.EndChunk {
end = entry.EndOffset
}
start := 0
if i1 == entry.StartChunk {
start = entry.StartOffset
}
end := chunkLengths[i1]
if i1 == entry.EndChunk {
end = entry.EndOffset
}
if offset1 < offset2 {
modifiedLength += int64(end - start)
offset1 += int64(end - start)
i1++
} else if offset1 > offset2 {
offset2 += int64(otherLengths[i2])
i2++
} else {
if chunkHashes[i1] == otherHashes[i2] && end - start == otherLengths[i2] {
} else {
modifiedLength += int64(chunkLengths[i1])
}
offset1 += int64(end - start)
offset2 += int64(otherLengths[i2])
i1++
i2++
}
}
if offset1 < offset2 {
modifiedLength += int64(end - start)
offset1 += int64(end - start)
i1++
} else if offset1 > offset2 {
offset2 += int64(otherLengths[i2])
i2++
} else {
if chunkHashes[i1] == otherHashes[i2] && end-start == otherLengths[i2] {
} else {
modifiedLength += int64(chunkLengths[i1])
}
offset1 += int64(end - start)
offset2 += int64(otherLengths[i2])
i1++
i2++
}
}
return modifiedLength
return modifiedLength
}

View File

@@ -5,216 +5,214 @@
package duplicacy
import (
"testing"
"io/ioutil"
"os"
"path/filepath"
"math/rand"
"sort"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"sort"
"testing"
)
func TestEntrySort(t *testing.T) {
DATA := [...]string {
"ab",
"ab-",
"ab0",
"ab1",
"\xBB\xDDfile",
"\xFF\xDDfile",
"ab/",
"ab/c",
"ab+/c-",
"ab+/c0",
"ab+/c/",
"ab+/c/d",
"ab+/c+/",
"ab+/c+/d",
"ab+/c0/",
"ab+/c0/d",
"ab-/",
"ab-/c",
"ab0/",
"ab1/",
"ab1/c",
"ab1/\xBB\xDDfile",
"ab1/\xFF\xDDfile",
}
DATA := [...]string{
"ab",
"ab-",
"ab0",
"ab1",
"\xBB\xDDfile",
"\xFF\xDDfile",
"ab/",
"ab/c",
"ab+/c-",
"ab+/c0",
"ab+/c/",
"ab+/c/d",
"ab+/c+/",
"ab+/c+/d",
"ab+/c0/",
"ab+/c0/d",
"ab-/",
"ab-/c",
"ab0/",
"ab1/",
"ab1/c",
"ab1/\xBB\xDDfile",
"ab1/\xFF\xDDfile",
}
var entry1, entry2 *Entry
var entry1, entry2 *Entry
for i, p1 := range DATA {
if p1[len(p1) - 1] == '/' {
entry1 = CreateEntry(p1, 0, 0, 0700 | uint32(os.ModeDir))
} else {
entry1 = CreateEntry(p1, 0, 0, 0700)
}
for j, p2 := range DATA {
for i, p1 := range DATA {
if p1[len(p1)-1] == '/' {
entry1 = CreateEntry(p1, 0, 0, 0700|uint32(os.ModeDir))
} else {
entry1 = CreateEntry(p1, 0, 0, 0700)
}
for j, p2 := range DATA {
if p2[len(p2) - 1] == '/' {
entry2 = CreateEntry(p2, 0, 0, 0700 | uint32(os.ModeDir))
} else {
entry2 = CreateEntry(p2, 0, 0, 0700)
}
if p2[len(p2)-1] == '/' {
entry2 = CreateEntry(p2, 0, 0, 0700|uint32(os.ModeDir))
} else {
entry2 = CreateEntry(p2, 0, 0, 0700)
}
compared := entry1.Compare(entry2)
compared := entry1.Compare(entry2)
if compared < 0 {
compared = -1
} else if compared > 0 {
compared = 1
}
if compared < 0 {
compared = -1
} else if compared > 0 {
compared = 1
}
var expected int
if i < j {
expected = -1
} else if i > j {
expected = 1
} else {
expected = 0
}
var expected int
if i < j {
expected = -1
} else if i > j {
expected = 1
} else {
expected = 0
}
if compared != expected {
t.Errorf("%s vs %s: %d, expected: %d", p1, p2, compared, expected)
}
if compared != expected {
t.Errorf("%s vs %s: %d, expected: %d", p1, p2, compared, expected)
}
}
}
}
}
}
func TestEntryList(t *testing.T) {
testDir := filepath.Join(os.TempDir(), "duplicacy_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
testDir := filepath.Join(os.TempDir(), "duplicacy_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
DATA := [...]string {
"ab",
"ab-",
"ab0",
"ab1",
"ab+/",
"ab+/c",
"ab+/c+",
"ab+/c1",
"ab+/c-/",
"ab+/c-/d",
"ab+/c0/",
"ab+/c0/d",
"ab2/",
"ab2/c",
"ab3/",
"ab3/c",
}
DATA := [...]string{
"ab",
"ab-",
"ab0",
"ab1",
"ab+/",
"ab+/c",
"ab+/c+",
"ab+/c1",
"ab+/c-/",
"ab+/c-/d",
"ab+/c0/",
"ab+/c0/d",
"ab2/",
"ab2/c",
"ab3/",
"ab3/c",
}
var entry1, entry2 *Entry
var entry1, entry2 *Entry
for i, p1 := range DATA {
if p1[len(p1)-1] == '/' {
entry1 = CreateEntry(p1, 0, 0, 0700|uint32(os.ModeDir))
} else {
entry1 = CreateEntry(p1, 0, 0, 0700)
}
for j, p2 := range DATA {
for i, p1 := range DATA {
if p1[len(p1) - 1] == '/' {
entry1 = CreateEntry(p1, 0, 0, 0700 | uint32(os.ModeDir))
} else {
entry1 = CreateEntry(p1, 0, 0, 0700)
}
for j, p2 := range DATA {
if p2[len(p2)-1] == '/' {
entry2 = CreateEntry(p2, 0, 0, 0700|uint32(os.ModeDir))
} else {
entry2 = CreateEntry(p2, 0, 0, 0700)
}
if p2[len(p2) - 1] == '/' {
entry2 = CreateEntry(p2, 0, 0, 0700 | uint32(os.ModeDir))
} else {
entry2 = CreateEntry(p2, 0, 0, 0700)
}
compared := entry1.Compare(entry2)
compared := entry1.Compare(entry2)
if compared < 0 {
compared = -1
} else if compared > 0 {
compared = 1
}
if compared < 0 {
compared = -1
} else if compared > 0 {
compared = 1
}
var expected int
if i < j {
expected = -1
} else if i > j {
expected = 1
} else {
expected = 0
}
var expected int
if i < j {
expected = -1
} else if i > j {
expected = 1
} else {
expected = 0
}
if compared != expected {
t.Errorf("%s vs %s: %d, expected: %d", p1, p2, compared, expected)
}
if compared != expected {
t.Errorf("%s vs %s: %d, expected: %d", p1, p2, compared, expected)
}
}
}
}
}
for _, file := range DATA {
for _, file := range DATA {
fullPath := filepath.Join(testDir, file)
if file[len(file)-1] == '/' {
err := os.Mkdir(fullPath, 0700)
if err != nil {
t.Errorf("Mkdir(%s) returned an error: %s", fullPath, err)
}
continue
}
fullPath := filepath.Join(testDir, file)
if file[len(file) - 1] == '/' {
err := os.Mkdir(fullPath, 0700)
if err != nil {
t.Errorf("Mkdir(%s) returned an error: %s", fullPath, err)
}
continue
}
err := ioutil.WriteFile(fullPath, []byte(file), 0700)
if err != nil {
t.Errorf("WriteFile(%s) returned an error: %s", fullPath, err)
}
}
err := ioutil.WriteFile(fullPath, []byte(file), 0700)
if err != nil {
t.Errorf("WriteFile(%s) returned an error: %s", fullPath, err)
}
}
directories := make([]*Entry, 0, 4)
directories = append(directories, CreateEntry("", 0, 0, 0))
directories := make([]*Entry, 0, 4)
directories = append(directories, CreateEntry("", 0, 0, 0))
entries := make([]*Entry, 0, 4)
entries := make([]*Entry, 0, 4)
for len(directories) > 0 {
directory := directories[len(directories)-1]
directories = directories[:len(directories)-1]
entries = append(entries, directory)
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, false)
if err != nil {
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
}
directories = append(directories, subdirectories...)
}
for len(directories) > 0 {
directory := directories[len(directories) - 1]
directories = directories[:len(directories) - 1]
entries = append(entries, directory)
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, false)
if err != nil {
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
}
directories = append(directories, subdirectories...)
}
entries = entries[1:]
entries = entries[1:]
for _, entry := range entries {
t.Logf("entry: %s", entry.Path)
}
for _, entry := range entries {
t.Logf("entry: %s", entry.Path)
}
if len(entries) != len(DATA) {
t.Errorf("Got %d entries instead of %d", len(entries), len(DATA))
return
}
if len(entries) != len(DATA) {
t.Errorf("Got %d entries instead of %d", len(entries), len(DATA))
return
}
for i := 0; i < len(entries); i++ {
if entries[i].Path != DATA[i] {
t.Errorf("entry: %s, expected: %s", entries[i].Path, DATA[i])
}
}
for i := 0; i < len(entries); i++ {
if entries[i].Path != DATA[i] {
t.Errorf("entry: %s, expected: %s", entries[i].Path, DATA[i])
}
}
t.Logf("shuffling %d entries", len(entries))
for i := range entries {
j := rand.Intn(i + 1)
entries[i], entries[j] = entries[j], entries[i]
}
t.Logf("shuffling %d entries", len(entries))
for i := range entries {
j := rand.Intn(i + 1)
entries[i], entries[j] = entries[j], entries[i]
}
sort.Sort(ByName(entries))
sort.Sort(ByName(entries))
for i := 0; i < len(entries); i++ {
if entries[i].Path != DATA[i] {
t.Errorf("entry: %s, expected: %s", entries[i].Path, DATA[i])
}
}
for i := 0; i < len(entries); i++ {
if entries[i].Path != DATA[i] {
t.Errorf("entry: %s, expected: %s", entries[i].Path, DATA[i])
}
}
if !t.Failed() {
os.RemoveAll(testDir)
}
if !t.Failed() {
os.RemoveAll(testDir)
}
}

View File

@@ -5,70 +5,66 @@
package duplicacy
import (
"os"
"os"
)
// FileReader wraps a number of files and turns them into a series of readers.
type FileReader struct {
top string
files [] *Entry
top string
files []*Entry
CurrentFile *os.File
CurrentIndex int
CurrentEntry *Entry
CurrentFile *os.File
CurrentIndex int
CurrentEntry *Entry
SkippedFiles [] string
SkippedFiles []string
}
// CreateFileReader creates a file reader.
func CreateFileReader(top string, files[] *Entry) (*FileReader) {
func CreateFileReader(top string, files []*Entry) *FileReader {
reader := &FileReader {
top: top,
files: files,
CurrentIndex: -1,
}
reader := &FileReader{
top: top,
files: files,
CurrentIndex: -1,
}
reader.NextFile()
reader.NextFile()
return reader
return reader
}
// NextFile switchs to the next file in the file reader.
func (reader *FileReader) NextFile() bool{
func (reader *FileReader) NextFile() bool {
if reader.CurrentFile != nil {
reader.CurrentFile.Close()
}
if reader.CurrentFile != nil {
reader.CurrentFile.Close()
}
reader.CurrentIndex++
for reader.CurrentIndex < len(reader.files) {
reader.CurrentIndex++
for reader.CurrentIndex < len(reader.files) {
reader.CurrentEntry = reader.files[reader.CurrentIndex]
if !reader.CurrentEntry.IsFile() || reader.CurrentEntry.Size == 0 {
reader.CurrentIndex++
continue
}
reader.CurrentEntry = reader.files[reader.CurrentIndex]
if !reader.CurrentEntry.IsFile() || reader.CurrentEntry.Size == 0 {
reader.CurrentIndex++
continue
}
var err error
var err error
fullPath := joinPath(reader.top, reader.CurrentEntry.Path)
reader.CurrentFile, err = os.OpenFile(fullPath, os.O_RDONLY, 0)
if err != nil {
LOG_WARN("OPEN_FAILURE", "Failed to open file for reading: %v", err)
reader.CurrentEntry.Size = 0
reader.SkippedFiles = append(reader.SkippedFiles, reader.CurrentEntry.Path)
reader.CurrentIndex++
continue
}
fullPath := joinPath(reader.top, reader.CurrentEntry.Path)
reader.CurrentFile, err = os.OpenFile(fullPath, os.O_RDONLY, 0)
if err != nil {
LOG_WARN("OPEN_FAILURE", "Failed to open file for reading: %v", err)
reader.CurrentEntry.Size = 0
reader.SkippedFiles = append(reader.SkippedFiles, reader.CurrentEntry.Path)
reader.CurrentIndex++
continue
}
return true
}
return true
}
reader.CurrentFile = nil
return false
reader.CurrentFile = nil
return false
}

View File

@@ -5,259 +5,259 @@
package duplicacy
import (
"os"
"fmt"
"path"
"io"
"io/ioutil"
"time"
"math/rand"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path"
"time"
)
// FileStorage is a local on-disk file storage implementing the Storage interface.
type FileStorage struct {
RateLimitedStorage
RateLimitedStorage
minimumLevel int // The minimum level of directories to dive into before searching for the chunk file.
isCacheNeeded bool // Network storages require caching
storageDir string
numberOfThreads int
minimumLevel int // The minimum level of directories to dive into before searching for the chunk file.
isCacheNeeded bool // Network storages require caching
storageDir string
numberOfThreads int
}
// CreateFileStorage creates a file storage.
func CreateFileStorage(storageDir string, minimumLevel int, isCacheNeeded bool, threads int) (storage *FileStorage, err error) {
var stat os.FileInfo
var stat os.FileInfo
stat, err = os.Stat(storageDir)
if err != nil {
if os.IsNotExist(err) {
err = os.MkdirAll(storageDir, 0744)
if err != nil {
return nil, err
}
} else {
return nil, err
}
} else {
if !stat.IsDir() {
return nil, fmt.Errorf("The storage path %s is a file", storageDir)
}
}
stat, err = os.Stat(storageDir)
if err != nil {
if os.IsNotExist(err) {
err = os.MkdirAll(storageDir, 0744)
if err != nil {
return nil, err
}
} else {
return nil, err
}
} else {
if !stat.IsDir() {
return nil, fmt.Errorf("The storage path %s is a file", storageDir)
}
}
for storageDir[len(storageDir) - 1] == '/' {
storageDir = storageDir[:len(storageDir) - 1]
}
for storageDir[len(storageDir)-1] == '/' {
storageDir = storageDir[:len(storageDir)-1]
}
storage = &FileStorage {
storageDir : storageDir,
minimumLevel: minimumLevel,
isCacheNeeded: isCacheNeeded,
numberOfThreads: threads,
}
storage = &FileStorage{
storageDir: storageDir,
minimumLevel: minimumLevel,
isCacheNeeded: isCacheNeeded,
numberOfThreads: threads,
}
// Random number fo generating the temporary chunk file suffix.
rand.Seed(time.Now().UnixNano())
// Random number fo generating the temporary chunk file suffix.
rand.Seed(time.Now().UnixNano())
return storage, nil
return storage, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively).
func (storage *FileStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
fullPath := path.Join(storage.storageDir, dir)
fullPath := path.Join(storage.storageDir, dir)
list, err := ioutil.ReadDir(fullPath)
if err != nil {
if os.IsNotExist(err) {
return nil, nil, nil
}
return nil, nil, err
}
list, err := ioutil.ReadDir(fullPath)
if err != nil {
if os.IsNotExist(err) {
return nil, nil, nil
}
return nil, nil, err
}
for _, f := range list {
name := f.Name()
if f.IsDir() && name[len(name) - 1] != '/' {
name += "/"
}
files = append(files, name)
sizes = append(sizes, f.Size())
}
for _, f := range list {
name := f.Name()
if f.IsDir() && name[len(name)-1] != '/' {
name += "/"
}
files = append(files, name)
sizes = append(sizes, f.Size())
}
return files, sizes, nil
return files, sizes, nil
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *FileStorage) DeleteFile(threadIndex int, filePath string) (err error) {
err = os.Remove(path.Join(storage.storageDir, filePath))
if err == nil || os.IsNotExist(err) {
return nil
} else {
return err
}
err = os.Remove(path.Join(storage.storageDir, filePath))
if err == nil || os.IsNotExist(err) {
return nil
} else {
return err
}
}
// MoveFile renames the file.
func (storage *FileStorage) MoveFile(threadIndex int, from string, to string) (err error) {
return os.Rename(path.Join(storage.storageDir, from), path.Join(storage.storageDir, to))
return os.Rename(path.Join(storage.storageDir, from), path.Join(storage.storageDir, to))
}
// CreateDirectory creates a new directory.
func (storage *FileStorage) CreateDirectory(threadIndex int, dir string) (err error) {
err = os.Mkdir(path.Join(storage.storageDir, dir), 0744)
if err != nil && os.IsExist(err) {
return nil
} else {
return err
}
err = os.Mkdir(path.Join(storage.storageDir, dir), 0744)
if err != nil && os.IsExist(err) {
return nil
} else {
return err
}
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *FileStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
stat, err := os.Stat(path.Join(storage.storageDir, filePath))
if err != nil {
if os.IsNotExist(err) {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
stat, err := os.Stat(path.Join(storage.storageDir, filePath))
if err != nil {
if os.IsNotExist(err) {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
return true, stat.IsDir(), stat.Size(), nil
return true, stat.IsDir(), stat.Size(), nil
}
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with the
// suffix '.fsl'.
func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
dir := path.Join(storage.storageDir, "chunks")
dir := path.Join(storage.storageDir, "chunks")
suffix := ""
if isFossil {
suffix = ".fsl"
}
suffix := ""
if isFossil {
suffix = ".fsl"
}
for level := 0; level * 2 < len(chunkID); level ++ {
if level >= storage.minimumLevel {
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
// Use Lstat() instead of Stat() since 1) Stat() doesn't work for deduplicated disks on Windows and 2) there isn't
// really a need to follow the link if filePath is a link.
stat, err := os.Lstat(filePath)
if err != nil {
LOG_DEBUG("FS_FIND", "File %s can't be found: %v", filePath, err)
} else if stat.IsDir() {
return filePath[len(storage.storageDir) + 1:], false, 0, fmt.Errorf("The path %s is a directory", filePath)
} else {
return filePath[len(storage.storageDir) + 1:], true, stat.Size(), nil
}
}
for level := 0; level*2 < len(chunkID); level++ {
if level >= storage.minimumLevel {
filePath = path.Join(dir, chunkID[2*level:]) + suffix
// Use Lstat() instead of Stat() since 1) Stat() doesn't work for deduplicated disks on Windows and 2) there isn't
// really a need to follow the link if filePath is a link.
stat, err := os.Lstat(filePath)
if err != nil {
LOG_DEBUG("FS_FIND", "File %s can't be found: %v", filePath, err)
} else if stat.IsDir() {
return filePath[len(storage.storageDir)+1:], false, 0, fmt.Errorf("The path %s is a directory", filePath)
} else {
return filePath[len(storage.storageDir)+1:], true, stat.Size(), nil
}
}
// Find the subdirectory the chunk file may reside.
subDir := path.Join(dir, chunkID[2 * level: 2 * level + 2])
stat, err := os.Stat(subDir)
if err == nil && stat.IsDir() {
dir = subDir
continue
}
// Find the subdirectory the chunk file may reside.
subDir := path.Join(dir, chunkID[2*level:2*level+2])
stat, err := os.Stat(subDir)
if err == nil && stat.IsDir() {
dir = subDir
continue
}
if level < storage.minimumLevel {
// Create the subdirectory if it doesn't exist.
if level < storage.minimumLevel {
// Create the subdirectory if it doesn't exist.
if err == nil && !stat.IsDir() {
return "", false, 0, fmt.Errorf("The path %s is not a directory", subDir)
}
if err == nil && !stat.IsDir() {
return "", false, 0, fmt.Errorf("The path %s is not a directory", subDir)
}
err = os.Mkdir(subDir, 0744)
if err != nil {
// The directory may have been created by other threads so check it again.
stat, _ := os.Stat(subDir)
if stat == nil || !stat.IsDir() {
return "", false, 0, err
}
}
dir = subDir
continue
}
err = os.Mkdir(subDir, 0744)
if err != nil {
// The directory may have been created by other threads so check it again.
stat, _ := os.Stat(subDir)
if stat == nil || !stat.IsDir() {
return "", false, 0, err
}
}
dir = subDir
continue
}
// The chunk must be under this subdirectory but it doesn't exist.
return path.Join(dir, chunkID[2 * level:])[len(storage.storageDir) + 1:] + suffix, false, 0, nil
// The chunk must be under this subdirectory but it doesn't exist.
return path.Join(dir, chunkID[2*level:])[len(storage.storageDir)+1:] + suffix, false, 0, nil
}
}
return "", false, 0, fmt.Errorf("The maximum level of directories searched")
return "", false, 0, fmt.Errorf("The maximum level of directories searched")
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *FileStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
file, err := os.Open(path.Join(storage.storageDir, filePath))
file, err := os.Open(path.Join(storage.storageDir, filePath))
if err != nil {
return err
}
if err != nil {
return err
}
defer file.Close()
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit / storage.numberOfThreads); err != nil {
return err
}
defer file.Close()
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
return err
}
return nil
return nil
}
// UploadFile writes 'content' to the file at 'filePath'
func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
fullPath := path.Join(storage.storageDir, filePath)
fullPath := path.Join(storage.storageDir, filePath)
letters := "abcdefghijklmnopqrstuvwxyz"
suffix := make([]byte, 8)
for i := range suffix {
suffix[i] = letters[rand.Intn(len(letters))]
}
letters := "abcdefghijklmnopqrstuvwxyz"
suffix := make([]byte, 8)
for i := range suffix {
suffix[i] = letters[rand.Intn(len(letters))]
}
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
file, err := os.OpenFile(temporaryFile, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0644)
if err != nil {
return err
}
file, err := os.OpenFile(temporaryFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / storage.numberOfThreads)
_, err = io.Copy(file, reader)
if err != nil {
file.Close()
return err
}
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
_, err = io.Copy(file, reader)
if err != nil {
file.Close()
return err
}
file.Close()
file.Close()
err = os.Rename(temporaryFile, fullPath)
if err != nil {
err = os.Rename(temporaryFile, fullPath)
if err != nil {
if _, e := os.Stat(fullPath); e == nil {
os.Remove(temporaryFile)
return nil
} else {
return err
}
}
if _, e := os.Stat(fullPath); e == nil {
os.Remove(temporaryFile)
return nil
} else {
return err
}
}
return nil
return nil
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *FileStorage) IsCacheNeeded () (bool) { return storage.isCacheNeeded }
func (storage *FileStorage) IsCacheNeeded() bool { return storage.isCacheNeeded }
// If the 'MoveFile' method is implemented.
func (storage *FileStorage) IsMoveFileImplemented() (bool) { return true }
func (storage *FileStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *FileStorage) IsStrongConsistent() (bool) { return true }
func (storage *FileStorage) IsStrongConsistent() bool { return true }
// If the storage supports fast listing of files names.
func (storage *FileStorage) IsFastListing() (bool) { return false }
func (storage *FileStorage) IsFastListing() bool { return false }
// Enable the test mode.
func (storage *FileStorage) EnableTestMode() {}

File diff suppressed because it is too large Load Diff

View File

@@ -5,65 +5,64 @@
package duplicacy
import (
"io"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/url"
"time"
"net/url"
"math/rand"
"io/ioutil"
"encoding/json"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
gcs "cloud.google.com/go/storage"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
"google.golang.org/api/googleapi"
)
type GCSStorage struct {
RateLimitedStorage
RateLimitedStorage
bucket *gcs.BucketHandle
bucket *gcs.BucketHandle
storageDir string
numberOfThreads int
TestMode bool
numberOfThreads int
TestMode bool
}
type GCSConfig struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
Endpoint oauth2.Endpoint `json:"end_point"`
Token oauth2.Token `json:"token"`
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
Endpoint oauth2.Endpoint `json:"end_point"`
Token oauth2.Token `json:"token"`
}
// CreateGCSStorage creates a GCD storage object.
func CreateGCSStorage(tokenFile string, bucketName string, storageDir string, threads int) (storage *GCSStorage, err error) {
ctx := context.Background()
ctx := context.Background()
description, err := ioutil.ReadFile(tokenFile)
if err != nil {
return nil, err
}
description, err := ioutil.ReadFile(tokenFile)
if err != nil {
return nil, err
}
var object map[string]interface {}
var object map[string]interface{}
err = json.Unmarshal(description, &object)
if err != nil {
return nil, err
}
err = json.Unmarshal(description, &object)
if err != nil {
return nil, err
}
isServiceAccount := false
if value, ok := object["type"]; ok {
if value, ok := object["type"]; ok {
if authType, ok := value.(string); ok && authType == "service_account" {
isServiceAccount = true
}
}
}
var tokenSource oauth2.TokenSource
@@ -74,7 +73,7 @@ func CreateGCSStorage(tokenFile string, bucketName string, storageDir string, th
}
tokenSource = config.TokenSource(ctx)
} else {
gcsConfig := &GCSConfig {}
gcsConfig := &GCSConfig{}
if err := json.Unmarshal(description, gcsConfig); err != nil {
return nil, err
}
@@ -87,82 +86,81 @@ func CreateGCSStorage(tokenFile string, bucketName string, storageDir string, th
tokenSource = config.TokenSource(ctx, &gcsConfig.Token)
}
options := option.WithTokenSource(tokenSource)
client, err := gcs.NewClient(ctx, options)
options := option.WithTokenSource(tokenSource)
client, err := gcs.NewClient(ctx, options)
bucket := client.Bucket(bucketName)
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
storageDir += "/"
}
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
storageDir += "/"
}
storage = &GCSStorage {
bucket: bucket,
storageDir: storageDir,
numberOfThreads: threads,
}
storage = &GCSStorage{
bucket: bucket,
storageDir: storageDir,
numberOfThreads: threads,
}
return storage, nil
return storage, nil
}
func (storage *GCSStorage) shouldRetry(backoff *int, err error) (bool, error) {
retry := false
message := ""
if err == nil {
return false, nil
} else if e, ok := err.(*googleapi.Error); ok {
if 500 <= e.Code && e.Code < 600 {
// Retry for 5xx response codes.
message = fmt.Sprintf("HTTP status code %d", e.Code)
retry = true
} else if e.Code == 429 {
// Too many requests{
message = "HTTP status code 429"
retry = true
} else if e.Code == 403 {
// User Rate Limit Exceeded
message = "User Rate Limit Exceeded"
retry = true
}
} else if e, ok := err.(*url.Error); ok {
message = e.Error()
retry = true
} else if err == io.ErrUnexpectedEOF {
// Retry on unexpected EOFs and temporary network errors.
message = "Unexpected EOF"
retry = true
} else if err, ok := err.(net.Error); ok {
message = "Temporary network error"
retry = err.Temporary()
}
retry := false
message := ""
if err == nil {
return false, nil
} else if e, ok := err.(*googleapi.Error); ok {
if 500 <= e.Code && e.Code < 600 {
// Retry for 5xx response codes.
message = fmt.Sprintf("HTTP status code %d", e.Code)
retry = true
} else if e.Code == 429 {
// Too many requests{
message = "HTTP status code 429"
retry = true
} else if e.Code == 403 {
// User Rate Limit Exceeded
message = "User Rate Limit Exceeded"
retry = true
}
} else if e, ok := err.(*url.Error); ok {
message = e.Error()
retry = true
} else if err == io.ErrUnexpectedEOF {
// Retry on unexpected EOFs and temporary network errors.
message = "Unexpected EOF"
retry = true
} else if err, ok := err.(net.Error); ok {
message = "Temporary network error"
retry = err.Temporary()
}
if !retry || *backoff >= 256 {
return false, err
}
if !retry || *backoff >= 256 {
return false, err
}
delay := float32(*backoff) * rand.Float32()
LOG_INFO("GCS_RETRY", "%s; retrying after %.2f seconds", message, delay)
time.Sleep(time.Duration(float32(*backoff) * float32(time.Second)))
*backoff *= 2
return true, nil
delay := float32(*backoff) * rand.Float32()
LOG_INFO("GCS_RETRY", "%s; retrying after %.2f seconds", message, delay)
time.Sleep(time.Duration(float32(*backoff) * float32(time.Second)))
*backoff *= 2
return true, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
func (storage *GCSStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
dir = dir[:len(dir) - 1]
}
for len(dir) > 0 && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}
query := gcs.Query {
query := gcs.Query{
Prefix: storage.storageDir + dir + "/",
}
dirOnly := false
prefixLength := len(query.Prefix)
if dir == "snapshots" {
if dir == "snapshots" {
query.Delimiter = "/"
dirOnly = true
}
@@ -174,7 +172,7 @@ func (storage *GCSStorage) ListFiles(threadIndex int, dir string) ([]string, []i
attributes, err := iter.Next()
if err == iterator.Done {
break
}
}
if err != nil {
return nil, nil, err
}
@@ -198,7 +196,7 @@ func (storage *GCSStorage) ListFiles(threadIndex int, dir string) ([]string, []i
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *GCSStorage) DeleteFile(threadIndex int, filePath string) (err error) {
err = storage.bucket.Object(storage.storageDir + filePath).Delete(context.Background())
if err == gcs.ErrObjectNotExist {
if err == gcs.ErrObjectNotExist {
return nil
}
return err
@@ -243,10 +241,10 @@ func (storage *GCSStorage) GetFileInfo(threadIndex int, filePath string) (exist
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
// the suffix '.fsl'.
func (storage *GCSStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
filePath = "chunks/" + chunkID
if isFossil {
filePath += ".fsl"
}
filePath = "chunks/" + chunkID
if isFossil {
filePath += ".fsl"
}
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
@@ -259,9 +257,9 @@ func (storage *GCSStorage) DownloadFile(threadIndex int, filePath string, chunk
if err != nil {
return err
}
defer readCloser.Close()
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / storage.numberOfThreads)
return err
defer readCloser.Close()
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThreads)
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
@@ -271,7 +269,7 @@ func (storage *GCSStorage) UploadFile(threadIndex int, filePath string, content
for {
writeCloser := storage.bucket.Object(storage.storageDir + filePath).NewWriter(context.Background())
defer writeCloser.Close()
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / storage.numberOfThreads)
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
_, err = io.Copy(writeCloser, reader)
if retry, e := storage.shouldRetry(&backoff, err); e == nil && !retry {
@@ -282,22 +280,22 @@ func (storage *GCSStorage) UploadFile(threadIndex int, filePath string, content
return err
}
}
return err
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *GCSStorage) IsCacheNeeded() (bool) { return true }
func (storage *GCSStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *GCSStorage) IsMoveFileImplemented() (bool) { return true }
func (storage *GCSStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *GCSStorage) IsStrongConsistent() (bool) { return true }
func (storage *GCSStorage) IsStrongConsistent() bool { return true }
// If the storage supports fast listing of files names.
func (storage *GCSStorage) IsFastListing() (bool) { return true }
func (storage *GCSStorage) IsFastListing() bool { return true }
// Enable the test mode.
func (storage *GCSStorage) EnableTestMode() { storage.TestMode = true }

View File

@@ -5,456 +5,456 @@
package duplicacy
import (
"fmt"
"net"
"time"
"sync"
"bytes"
"strings"
"io/ioutil"
"encoding/json"
"io"
"net/http"
net_url "net/url"
"math/rand"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
net_url "net/url"
"strings"
"sync"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2"
)
type HubicError struct {
Status int
Message string
Status int
Message string
}
func (err HubicError) Error() string {
return fmt.Sprintf("%d %s", err.Status, err.Message)
return fmt.Sprintf("%d %s", err.Status, err.Message)
}
var HubicRefreshTokenURL = "https://duplicacy.com/hubic_refresh"
var HubicCredentialURL = "https://api.hubic.com/1.0/account/credentials"
type HubicCredential struct {
Token string
Endpoint string
Expires time.Time
Token string
Endpoint string
Expires time.Time
}
type HubicClient struct {
HTTPClient *http.Client
HTTPClient *http.Client
TokenFile string
Token *oauth2.Token
TokenLock *sync.Mutex
TokenFile string
Token *oauth2.Token
TokenLock *sync.Mutex
Credential HubicCredential
CredentialLock *sync.Mutex
Credential HubicCredential
CredentialLock *sync.Mutex
TestMode bool
TestMode bool
}
func NewHubicClient(tokenFile string) (*HubicClient, error) {
description, err := ioutil.ReadFile(tokenFile)
if err != nil {
return nil, err
}
description, err := ioutil.ReadFile(tokenFile)
if err != nil {
return nil, err
}
token := new(oauth2.Token)
if err := json.Unmarshal(description, token); err != nil {
return nil, fmt.Errorf("%v: %s", err, description)
}
token := new(oauth2.Token)
if err := json.Unmarshal(description, token); err != nil {
return nil, fmt.Errorf("%v: %s", err, description)
}
client := &HubicClient{
HTTPClient: &http.Client {
Transport: &http.Transport {
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 60 * time.Second,
ResponseHeaderTimeout: 30 * time.Second,
ExpectContinueTimeout: 10 * time.Second,
},
},
TokenFile: tokenFile,
Token: token,
TokenLock: &sync.Mutex{},
CredentialLock: &sync.Mutex{},
}
client := &HubicClient{
HTTPClient: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 60 * time.Second,
ResponseHeaderTimeout: 30 * time.Second,
ExpectContinueTimeout: 10 * time.Second,
},
},
TokenFile: tokenFile,
Token: token,
TokenLock: &sync.Mutex{},
CredentialLock: &sync.Mutex{},
}
err = client.RefreshToken()
if err != nil {
return nil, err
}
err = client.RefreshToken()
if err != nil {
return nil, err
}
err = client.GetCredential()
if err != nil {
return nil, err
}
err = client.GetCredential()
if err != nil {
return nil, err
}
return client, nil
return client, nil
}
func (client *HubicClient) call(url string, method string, input interface{}, extraHeader map[string]string) (io.ReadCloser, int64, string, error) {
var response *http.Response
var response *http.Response
backoff := 1
for i := 0; i < 8; i++ {
backoff := 1
for i := 0; i < 8; i++ {
LOG_DEBUG("HUBIC_CALL", "%s %s", method, url)
LOG_DEBUG("HUBIC_CALL", "%s %s", method, url)
//fmt.Printf("%s %s\n", method, url)
//fmt.Printf("%s %s\n", method, url)
var inputReader io.Reader
var inputReader io.Reader
switch input.(type) {
default:
jsonInput, err := json.Marshal(input)
if err != nil {
return nil, 0, "", err
}
inputReader = bytes.NewReader(jsonInput)
case []byte:
inputReader = bytes.NewReader(input.([]byte))
case int:
inputReader = bytes.NewReader([]byte(""))
case *bytes.Buffer:
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
case *RateLimitedReader:
input.(*RateLimitedReader).Reset()
inputReader = input.(*RateLimitedReader)
}
switch input.(type) {
default:
jsonInput, err := json.Marshal(input)
if err != nil {
return nil, 0, "", err
}
inputReader = bytes.NewReader(jsonInput)
case []byte:
inputReader = bytes.NewReader(input.([]byte))
case int:
inputReader = bytes.NewReader([]byte(""))
case *bytes.Buffer:
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
case *RateLimitedReader:
input.(*RateLimitedReader).Reset()
inputReader = input.(*RateLimitedReader)
}
request, err := http.NewRequest(method, url, inputReader)
if err != nil {
return nil, 0, "", err
}
request, err := http.NewRequest(method, url, inputReader)
if err != nil {
return nil, 0, "", err
}
if reader, ok := inputReader.(*RateLimitedReader); ok {
request.ContentLength = reader.Length()
}
if reader, ok := inputReader.(*RateLimitedReader); ok {
request.ContentLength = reader.Length()
}
if url == HubicCredentialURL {
client.TokenLock.Lock()
request.Header.Set("Authorization", "Bearer " + client.Token.AccessToken)
client.TokenLock.Unlock()
} else if url != HubicRefreshTokenURL {
client.CredentialLock.Lock()
request.Header.Set("X-Auth-Token", client.Credential.Token)
client.CredentialLock.Unlock()
}
if url == HubicCredentialURL {
client.TokenLock.Lock()
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
client.TokenLock.Unlock()
} else if url != HubicRefreshTokenURL {
client.CredentialLock.Lock()
request.Header.Set("X-Auth-Token", client.Credential.Token)
client.CredentialLock.Unlock()
}
for key, value := range extraHeader {
request.Header.Set(key, value)
}
for key, value := range extraHeader {
request.Header.Set(key, value)
}
response, err = client.HTTPClient.Do(request)
if err != nil {
return nil, 0, "", err
}
response, err = client.HTTPClient.Do(request)
if err != nil {
return nil, 0, "", err
}
contentType := ""
if len(response.Header["Content-Type"]) > 0 {
contentType = response.Header["Content-Type"][0]
}
contentType := ""
if len(response.Header["Content-Type"]) > 0 {
contentType = response.Header["Content-Type"][0]
}
if response.StatusCode < 400 {
return response.Body, response.ContentLength, contentType, nil
}
if response.StatusCode < 400 {
return response.Body, response.ContentLength, contentType, nil
}
/*buffer := bytes.NewBufferString("")
io.Copy(buffer, response.Body)
fmt.Printf("%s\n", buffer.String())*/
/*buffer := bytes.NewBufferString("")
io.Copy(buffer, response.Body)
fmt.Printf("%s\n", buffer.String())*/
response.Body.Close()
response.Body.Close()
if response.StatusCode == 401 {
if response.StatusCode == 401 {
if url == HubicRefreshTokenURL {
return nil, 0, "", HubicError { Status: response.StatusCode, Message: "Authorization error when refreshing token"}
}
if url == HubicRefreshTokenURL {
return nil, 0, "", HubicError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
}
if url == HubicCredentialURL {
return nil, 0, "", HubicError { Status: response.StatusCode, Message: "Authorization error when retrieving credentials"}
}
if url == HubicCredentialURL {
return nil, 0, "", HubicError{Status: response.StatusCode, Message: "Authorization error when retrieving credentials"}
}
err = client.RefreshToken()
if err != nil {
return nil, 0, "", err
}
err = client.RefreshToken()
if err != nil {
return nil, 0, "", err
}
err = client.GetCredential()
if err != nil {
return nil, 0, "", err
}
continue
} else if response.StatusCode >= 500 && response.StatusCode < 600 {
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
LOG_INFO("HUBIC_RETRY", "Response status: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
time.Sleep(retryAfter * time.Millisecond)
backoff *= 2
continue
} else {
return nil, 0, "", HubicError { Status: response.StatusCode, Message: "Hubic API error"}
}
}
err = client.GetCredential()
if err != nil {
return nil, 0, "", err
}
continue
} else if response.StatusCode >= 500 && response.StatusCode < 600 {
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
LOG_INFO("HUBIC_RETRY", "Response status: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
time.Sleep(retryAfter * time.Millisecond)
backoff *= 2
continue
} else {
return nil, 0, "", HubicError{Status: response.StatusCode, Message: "Hubic API error"}
}
}
return nil, 0, "", fmt.Errorf("Maximum number of retries reached")
return nil, 0, "", fmt.Errorf("Maximum number of retries reached")
}
func (client *HubicClient) RefreshToken() (err error) {
client.TokenLock.Lock()
defer client.TokenLock.Unlock()
client.TokenLock.Lock()
defer client.TokenLock.Unlock()
if client.Token.Valid() {
return nil
}
if client.Token.Valid() {
return nil
}
readCloser, _, _, err := client.call(HubicRefreshTokenURL, "POST", client.Token, nil)
if err != nil {
return err
}
readCloser, _, _, err := client.call(HubicRefreshTokenURL, "POST", client.Token, nil)
if err != nil {
return err
}
defer readCloser.Close()
defer readCloser.Close()
if err = json.NewDecoder(readCloser).Decode(&client.Token); err != nil {
return err
}
if err = json.NewDecoder(readCloser).Decode(&client.Token); err != nil {
return err
}
description, err := json.Marshal(client.Token)
if err != nil {
return err
}
description, err := json.Marshal(client.Token)
if err != nil {
return err
}
err = ioutil.WriteFile(client.TokenFile, description, 0644)
if err != nil {
return err
}
err = ioutil.WriteFile(client.TokenFile, description, 0644)
if err != nil {
return err
}
return nil
return nil
}
func (client *HubicClient) GetCredential() (err error) {
client.CredentialLock.Lock()
defer client.CredentialLock.Unlock()
client.CredentialLock.Lock()
defer client.CredentialLock.Unlock()
readCloser, _, _, err := client.call(HubicCredentialURL, "GET", 0, nil)
if err != nil {
return err
}
readCloser, _, _, err := client.call(HubicCredentialURL, "GET", 0, nil)
if err != nil {
return err
}
buffer := bytes.NewBufferString("")
io.Copy(buffer, readCloser)
readCloser.Close()
buffer := bytes.NewBufferString("")
io.Copy(buffer, readCloser)
readCloser.Close()
if err = json.NewDecoder(buffer).Decode(&client.Credential); err != nil {
return fmt.Errorf("%v (response: %s)", err, buffer)
}
if err = json.NewDecoder(buffer).Decode(&client.Credential); err != nil {
return fmt.Errorf("%v (response: %s)", err, buffer)
}
return nil
return nil
}
type HubicEntry struct {
Name string `json:"name"`
Size int64 `json:"bytes"`
Type string `json:"content_type"`
Subdir string `json:"subdir"`
Name string `json:"name"`
Size int64 `json:"bytes"`
Type string `json:"content_type"`
Subdir string `json:"subdir"`
}
func (client *HubicClient) ListEntries(path string) ([]HubicEntry, error) {
if len(path) > 0 && path[len(path) - 1] != '/' {
path += "/"
}
if len(path) > 0 && path[len(path)-1] != '/' {
path += "/"
}
count := 1000
if client.TestMode {
count = 8
}
count := 1000
if client.TestMode {
count = 8
}
marker := ""
marker := ""
var entries []HubicEntry
var entries []HubicEntry
for {
for {
client.CredentialLock.Lock()
url := client.Credential.Endpoint + "/default"
client.CredentialLock.Unlock()
url += fmt.Sprintf("?format=json&limit=%d&delimiter=%%2f", count)
if path != "" {
url += "&prefix=" + net_url.QueryEscape(path)
}
if marker != "" {
url += "&marker=" + net_url.QueryEscape(marker)
}
client.CredentialLock.Lock()
url := client.Credential.Endpoint + "/default"
client.CredentialLock.Unlock()
url += fmt.Sprintf("?format=json&limit=%d&delimiter=%%2f", count)
if path != "" {
url += "&prefix=" + net_url.QueryEscape(path)
}
if marker != "" {
url += "&marker=" + net_url.QueryEscape(marker)
}
readCloser, _, _, err := client.call(url, "GET", 0, nil)
if err != nil {
return nil, err
}
readCloser, _, _, err := client.call(url, "GET", 0, nil)
if err != nil {
return nil, err
}
defer readCloser.Close()
defer readCloser.Close()
var output []HubicEntry
var output []HubicEntry
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return nil, err
}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return nil, err
}
for _, entry := range output {
if entry.Subdir == "" {
marker = entry.Name
} else {
marker = entry.Subdir
for len(entry.Subdir) > 0 && entry.Subdir[len(entry.Subdir) - 1] == '/' {
entry.Subdir = entry.Subdir[:len(entry.Subdir) - 1]
}
entry.Name = entry.Subdir
entry.Type = "application/directory"
}
if path != "" && strings.HasPrefix(entry.Name, path) {
entry.Name = entry.Name[len(path):]
}
entries = append(entries, entry)
}
if len(output) < count {
break
}
}
for _, entry := range output {
if entry.Subdir == "" {
marker = entry.Name
} else {
marker = entry.Subdir
for len(entry.Subdir) > 0 && entry.Subdir[len(entry.Subdir)-1] == '/' {
entry.Subdir = entry.Subdir[:len(entry.Subdir)-1]
}
entry.Name = entry.Subdir
entry.Type = "application/directory"
}
if path != "" && strings.HasPrefix(entry.Name, path) {
entry.Name = entry.Name[len(path):]
}
entries = append(entries, entry)
}
if len(output) < count {
break
}
}
return entries, nil
return entries, nil
}
func (client *HubicClient) GetFileInfo(path string) (bool, bool, int64, error) {
for len(path) > 0 && path[len(path) - 1] == '/' {
path = path[:len(path) - 1]
}
for len(path) > 0 && path[len(path)-1] == '/' {
path = path[:len(path)-1]
}
client.CredentialLock.Lock()
url := client.Credential.Endpoint + "/default/" + path
client.CredentialLock.Unlock()
client.CredentialLock.Lock()
url := client.Credential.Endpoint + "/default/" + path
client.CredentialLock.Unlock()
readCloser, size, contentType, err := client.call(url, "HEAD", 0, nil)
if err != nil {
if e, ok := err.(HubicError); ok && e.Status == 404 {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
readCloser, size, contentType, err := client.call(url, "HEAD", 0, nil)
if err != nil {
if e, ok := err.(HubicError); ok && e.Status == 404 {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
readCloser.Close()
readCloser.Close()
return true, contentType == "application/directory", size, nil
return true, contentType == "application/directory", size, nil
}
func (client *HubicClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
for len(path) > 0 && path[len(path) - 1] == '/' {
path = path[:len(path) - 1]
}
for len(path) > 0 && path[len(path)-1] == '/' {
path = path[:len(path)-1]
}
client.CredentialLock.Lock()
url := client.Credential.Endpoint + "/default/" + path
client.CredentialLock.Unlock()
client.CredentialLock.Lock()
url := client.Credential.Endpoint + "/default/" + path
client.CredentialLock.Unlock()
readCloser, size, _, err := client.call(url, "GET", 0, nil)
return readCloser, size, err
readCloser, size, _, err := client.call(url, "GET", 0, nil)
return readCloser, size, err
}
func (client *HubicClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
for len(path) > 0 && path[len(path) - 1] == '/' {
path = path[:len(path) - 1]
}
for len(path) > 0 && path[len(path)-1] == '/' {
path = path[:len(path)-1]
}
client.CredentialLock.Lock()
url := client.Credential.Endpoint + "/default/" + path
client.CredentialLock.Unlock()
client.CredentialLock.Lock()
url := client.Credential.Endpoint + "/default/" + path
client.CredentialLock.Unlock()
header := make(map[string]string)
header["Content-Type"] = "application/octet-stream"
header := make(map[string]string)
header["Content-Type"] = "application/octet-stream"
readCloser, _, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), header)
readCloser, _, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), header)
if err != nil {
return err
}
if err != nil {
return err
}
readCloser.Close()
return nil
readCloser.Close()
return nil
}
func (client *HubicClient) DeleteFile(path string) error {
for len(path) > 0 && path[len(path) - 1] == '/' {
path = path[:len(path) - 1]
}
for len(path) > 0 && path[len(path)-1] == '/' {
path = path[:len(path)-1]
}
client.CredentialLock.Lock()
url := client.Credential.Endpoint + "/default/" + path
client.CredentialLock.Unlock()
client.CredentialLock.Lock()
url := client.Credential.Endpoint + "/default/" + path
client.CredentialLock.Unlock()
readCloser, _, _, err := client.call(url, "DELETE", 0, nil)
readCloser, _, _, err := client.call(url, "DELETE", 0, nil)
if err != nil {
return err
}
if err != nil {
return err
}
readCloser.Close()
return nil
readCloser.Close()
return nil
}
func (client *HubicClient) MoveFile(from string, to string) error {
for len(from) > 0 && from[len(from) - 1] == '/' {
from = from[:len(from) - 1]
}
for len(from) > 0 && from[len(from)-1] == '/' {
from = from[:len(from)-1]
}
for len(to) > 0 && to[len(to) - 1] == '/' {
to = to[:len(to) - 1]
}
for len(to) > 0 && to[len(to)-1] == '/' {
to = to[:len(to)-1]
}
client.CredentialLock.Lock()
url := client.Credential.Endpoint + "/default/" + from
client.CredentialLock.Unlock()
client.CredentialLock.Lock()
url := client.Credential.Endpoint + "/default/" + from
client.CredentialLock.Unlock()
header := make(map[string]string)
header["Destination"] = "default/" + to
header := make(map[string]string)
header["Destination"] = "default/" + to
readCloser, _, _, err := client.call(url, "COPY", 0, header)
readCloser, _, _, err := client.call(url, "COPY", 0, header)
if err != nil {
return err
}
if err != nil {
return err
}
readCloser.Close()
readCloser.Close()
return client.DeleteFile(from)
return client.DeleteFile(from)
}
func (client *HubicClient) CreateDirectory(path string) (error) {
func (client *HubicClient) CreateDirectory(path string) error {
for len(path) > 0 && path[len(path) - 1] == '/' {
path = path[:len(path) - 1]
}
for len(path) > 0 && path[len(path)-1] == '/' {
path = path[:len(path)-1]
}
client.CredentialLock.Lock()
url := client.Credential.Endpoint + "/default/" + path
client.CredentialLock.Unlock()
client.CredentialLock.Lock()
url := client.Credential.Endpoint + "/default/" + path
client.CredentialLock.Unlock()
header := make(map[string]string)
header["Content-Type"] = "application/directory"
header := make(map[string]string)
header["Content-Type"] = "application/directory"
readCloser, _, _, err := client.call(url, "PUT", "", header)
readCloser, _, _, err := client.call(url, "PUT", "", header)
if err != nil {
return err
}
if err != nil {
return err
}
readCloser.Close()
return nil
readCloser.Close()
return nil
}

View File

@@ -5,145 +5,145 @@
package duplicacy
import (
"io"
"fmt"
"testing"
"crypto/sha256"
"encoding/hex"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"testing"
crypto_rand "crypto/rand"
"math/rand"
crypto_rand "crypto/rand"
"math/rand"
)
func TestHubicClient(t *testing.T) {
hubicClient, err := NewHubicClient("hubic-token.json")
if err != nil {
t.Errorf("Failed to create the Hubic client: %v", err)
return
}
hubicClient, err := NewHubicClient("hubic-token.json")
if err != nil {
t.Errorf("Failed to create the Hubic client: %v", err)
return
}
hubicClient.TestMode = true
hubicClient.TestMode = true
existingFiles, err := hubicClient.ListEntries("")
for _, file := range existingFiles {
fmt.Printf("name: %s, isDir: %t\n", file.Name, file.Type == "application/directory")
}
existingFiles, err := hubicClient.ListEntries("")
for _, file := range existingFiles {
fmt.Printf("name: %s, isDir: %t\n", file.Name, file.Type == "application/directory")
}
testExists, _, _, err := hubicClient.GetFileInfo("test")
if err != nil {
t.Errorf("Failed to list the test directory: %v", err)
return
}
if !testExists {
err = hubicClient.CreateDirectory("test")
if err != nil {
t.Errorf("Failed to create the test directory: %v", err)
return
}
}
testExists, _, _, err := hubicClient.GetFileInfo("test")
if err != nil {
t.Errorf("Failed to list the test directory: %v", err)
return
}
if !testExists {
err = hubicClient.CreateDirectory("test")
if err != nil {
t.Errorf("Failed to create the test directory: %v", err)
return
}
}
test1Exists, _, _, err := hubicClient.GetFileInfo("test/test1")
if err != nil {
t.Errorf("Failed to list the test1 directory: %v", err)
return
}
if !test1Exists {
err = hubicClient.CreateDirectory("test/test1")
if err != nil {
t.Errorf("Failed to create the test1 directory: %v", err)
return
}
}
test1Exists, _, _, err := hubicClient.GetFileInfo("test/test1")
if err != nil {
t.Errorf("Failed to list the test1 directory: %v", err)
return
}
if !test1Exists {
err = hubicClient.CreateDirectory("test/test1")
if err != nil {
t.Errorf("Failed to create the test1 directory: %v", err)
return
}
}
test2Exists, _, _, err := hubicClient.GetFileInfo("test/test2")
if err != nil {
t.Errorf("Failed to list the test2 directory: %v", err)
return
}
if !test2Exists {
err = hubicClient.CreateDirectory("test/test2")
if err != nil {
t.Errorf("Failed to create the test2 directory: %v", err)
return
}
}
test2Exists, _, _, err := hubicClient.GetFileInfo("test/test2")
if err != nil {
t.Errorf("Failed to list the test2 directory: %v", err)
return
}
if !test2Exists {
err = hubicClient.CreateDirectory("test/test2")
if err != nil {
t.Errorf("Failed to create the test2 directory: %v", err)
return
}
}
numberOfFiles := 20
maxFileSize := 64 * 1024
numberOfFiles := 20
maxFileSize := 64 * 1024
for i := 0; i < numberOfFiles; i++ {
content := make([]byte, rand.Int() % maxFileSize + 1)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
for i := 0; i < numberOfFiles; i++ {
content := make([]byte, rand.Int()%maxFileSize+1)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
hasher := sha256.New()
hasher.Write(content)
filename := hex.EncodeToString(hasher.Sum(nil))
hasher := sha256.New()
hasher.Write(content)
filename := hex.EncodeToString(hasher.Sum(nil))
fmt.Printf("file: %s\n", filename)
fmt.Printf("file: %s\n", filename)
err = hubicClient.UploadFile("test/test1/" + filename, content, 100)
if err != nil {
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
t.Errorf("Failed to upload the file %s: %v", filename, err)
return
}
}
}
err = hubicClient.UploadFile("test/test1/"+filename, content, 100)
if err != nil {
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
t.Errorf("Failed to upload the file %s: %v", filename, err)
return
}
}
}
entries, err := hubicClient.ListEntries("test/test1")
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
entries, err := hubicClient.ListEntries("test/test1")
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
for _, entry := range entries {
for _, entry := range entries {
exists, isDir, size, err := hubicClient.GetFileInfo("test/test1/" + entry.Name)
fmt.Printf("%s exists: %t, isDir: %t, size: %d, err: %v\n", "test/test1/" + entry.Name, exists, isDir, size, err)
exists, isDir, size, err := hubicClient.GetFileInfo("test/test1/" + entry.Name)
fmt.Printf("%s exists: %t, isDir: %t, size: %d, err: %v\n", "test/test1/"+entry.Name, exists, isDir, size, err)
err = hubicClient.MoveFile("test/test1/" + entry.Name, "test/test2/" + entry.Name)
if err != nil {
t.Errorf("Failed to move %s: %v", entry.Name, err)
return
}
}
err = hubicClient.MoveFile("test/test1/"+entry.Name, "test/test2/"+entry.Name)
if err != nil {
t.Errorf("Failed to move %s: %v", entry.Name, err)
return
}
}
entries, err = hubicClient.ListEntries("test/test2")
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
entries, err = hubicClient.ListEntries("test/test2")
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
for _, entry := range entries {
readCloser, _, err := hubicClient.DownloadFile("test/test2/" + entry.Name)
if err != nil {
t.Errorf("Error downloading file %s: %v", entry.Name, err)
return
}
for _, entry := range entries {
readCloser, _, err := hubicClient.DownloadFile("test/test2/" + entry.Name)
if err != nil {
t.Errorf("Error downloading file %s: %v", entry.Name, err)
return
}
hasher := sha256.New()
io.Copy(hasher, readCloser)
hash := hex.EncodeToString(hasher.Sum(nil))
hasher := sha256.New()
io.Copy(hasher, readCloser)
hash := hex.EncodeToString(hasher.Sum(nil))
if hash != entry.Name {
t.Errorf("File %s, hash %s", entry.Name, hash)
}
if hash != entry.Name {
t.Errorf("File %s, hash %s", entry.Name, hash)
}
readCloser.Close()
}
readCloser.Close()
}
for _, entry := range entries {
for _, entry := range entries {
err = hubicClient.DeleteFile("test/test2/" + entry.Name)
if err != nil {
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
return
}
}
err = hubicClient.DeleteFile("test/test2/" + entry.Name)
if err != nil {
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
return
}
}
}

View File

@@ -5,203 +5,203 @@
package duplicacy
import (
"fmt"
"strings"
"fmt"
"strings"
)
type HubicStorage struct {
RateLimitedStorage
RateLimitedStorage
client *HubicClient
storageDir string
numberOfThreads int
client *HubicClient
storageDir string
numberOfThreads int
}
// CreateHubicStorage creates an Hubic storage object.
func CreateHubicStorage(tokenFile string, storagePath string, threads int) (storage *HubicStorage, err error) {
for len(storagePath) > 0 && storagePath[len(storagePath) - 1] == '/' {
storagePath = storagePath[:len(storagePath) - 1]
}
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
storagePath = storagePath[:len(storagePath)-1]
}
client, err := NewHubicClient(tokenFile)
if err != nil {
return nil, err
}
client, err := NewHubicClient(tokenFile)
if err != nil {
return nil, err
}
exists, isDir, _, err := client.GetFileInfo(storagePath)
if err != nil {
return nil, err
}
exists, isDir, _, err := client.GetFileInfo(storagePath)
if err != nil {
return nil, err
}
if !exists {
return nil, fmt.Errorf("Path '%s' doesn't exist", storagePath)
}
if !exists {
return nil, fmt.Errorf("Path '%s' doesn't exist", storagePath)
}
if !isDir {
return nil, fmt.Errorf("Path '%s' is not a directory", storagePath)
}
if !isDir {
return nil, fmt.Errorf("Path '%s' is not a directory", storagePath)
}
storage = &HubicStorage {
client: client,
storageDir: storagePath,
numberOfThreads: threads,
}
storage = &HubicStorage{
client: client,
storageDir: storagePath,
numberOfThreads: threads,
}
for _, path := range []string { "chunks", "snapshots" } {
dir := storagePath + "/" + path
exists, isDir, _, err := client.GetFileInfo(dir)
if err != nil {
return nil, err
}
if !exists {
err = client.CreateDirectory(storagePath + "/" + path)
if err != nil {
return nil, err
}
} else if !isDir {
return nil, fmt.Errorf("%s is not a directory", dir)
}
}
for _, path := range []string{"chunks", "snapshots"} {
dir := storagePath + "/" + path
exists, isDir, _, err := client.GetFileInfo(dir)
if err != nil {
return nil, err
}
if !exists {
err = client.CreateDirectory(storagePath + "/" + path)
if err != nil {
return nil, err
}
} else if !isDir {
return nil, fmt.Errorf("%s is not a directory", dir)
}
}
return storage, nil
return storage, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
func (storage *HubicStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
dir = dir[:len(dir) - 1]
}
for len(dir) > 0 && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}
if dir == "snapshots" {
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
if err != nil {
return nil, nil, err
}
if dir == "snapshots" {
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
if err != nil {
return nil, nil, err
}
subDirs := []string{}
for _, entry := range entries {
if entry.Type == "application/directory" {
subDirs = append(subDirs, entry.Name + "/")
}
}
return subDirs, nil, nil
} else if strings.HasPrefix(dir, "snapshots/") {
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
if err != nil {
return nil, nil, err
}
subDirs := []string{}
for _, entry := range entries {
if entry.Type == "application/directory" {
subDirs = append(subDirs, entry.Name+"/")
}
}
return subDirs, nil, nil
} else if strings.HasPrefix(dir, "snapshots/") {
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
if err != nil {
return nil, nil, err
}
files := []string{}
files := []string{}
for _, entry := range entries {
if entry.Type == "application/directory" {
continue
}
files = append(files, entry.Name)
}
return files, nil, nil
} else {
files := []string{}
sizes := []int64{}
entries, err := storage.client.ListEntries(storage.storageDir + "/chunks")
if err != nil {
return nil, nil, err
}
for _, entry := range entries {
if entry.Type == "application/directory" {
continue
}
files = append(files, entry.Name)
}
return files, nil, nil
} else {
files := []string{}
sizes := []int64{}
entries, err := storage.client.ListEntries(storage.storageDir + "/chunks")
if err != nil {
return nil, nil, err
}
for _, entry := range entries {
if entry.Type == "application/directory" {
continue
}
files = append(files, entry.Name)
sizes = append(sizes, entry.Size)
}
return files, sizes, nil
}
for _, entry := range entries {
if entry.Type == "application/directory" {
continue
}
files = append(files, entry.Name)
sizes = append(sizes, entry.Size)
}
return files, sizes, nil
}
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *HubicStorage) DeleteFile(threadIndex int, filePath string) (err error) {
err = storage.client.DeleteFile(storage.storageDir + "/" + filePath)
if e, ok := err.(HubicError); ok && e.Status == 404 {
LOG_DEBUG("HUBIC_DELETE", "Ignore 404 error")
return nil
}
return err
err = storage.client.DeleteFile(storage.storageDir + "/" + filePath)
if e, ok := err.(HubicError); ok && e.Status == 404 {
LOG_DEBUG("HUBIC_DELETE", "Ignore 404 error")
return nil
}
return err
}
// MoveFile renames the file.
func (storage *HubicStorage) MoveFile(threadIndex int, from string, to string) (err error) {
fromPath := storage.storageDir + "/" + from
toPath := storage.storageDir + "/" + to
fromPath := storage.storageDir + "/" + from
toPath := storage.storageDir + "/" + to
return storage.client.MoveFile(fromPath, toPath)
return storage.client.MoveFile(fromPath, toPath)
}
// CreateDirectory creates a new directory.
func (storage *HubicStorage) CreateDirectory(threadIndex int, dir string) (err error) {
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
dir = dir[:len(dir) - 1]
}
for len(dir) > 0 && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}
return storage.client.CreateDirectory(storage.storageDir + "/" + dir)
return storage.client.CreateDirectory(storage.storageDir + "/" + dir)
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *HubicStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
for len(filePath) > 0 && filePath[len(filePath) - 1] == '/' {
filePath = filePath[:len(filePath) - 1]
}
return storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
filePath = filePath[:len(filePath)-1]
}
return storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
}
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
// the suffix '.fsl'.
func (storage *HubicStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
filePath = "chunks/" + chunkID
if isFossil {
filePath += ".fsl"
}
filePath = "chunks/" + chunkID
if isFossil {
filePath += ".fsl"
}
exist, _, size, err = storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
return filePath, exist, size, err
exist, _, size, err = storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
return filePath, exist, size, err
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *HubicStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
readCloser, _, err := storage.client.DownloadFile(storage.storageDir + "/" + filePath)
if err != nil {
return err
}
readCloser, _, err := storage.client.DownloadFile(storage.storageDir + "/" + filePath)
if err != nil {
return err
}
defer readCloser.Close()
defer readCloser.Close()
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / storage.numberOfThreads)
return err
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThreads)
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *HubicStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
return storage.client.UploadFile(storage.storageDir + "/" + filePath, content, storage.UploadRateLimit / storage.numberOfThreads)
return storage.client.UploadFile(storage.storageDir+"/"+filePath, content, storage.UploadRateLimit/storage.numberOfThreads)
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *HubicStorage) IsCacheNeeded() (bool) { return true }
func (storage *HubicStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *HubicStorage) IsMoveFileImplemented() (bool) { return true }
func (storage *HubicStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *HubicStorage) IsStrongConsistent() (bool) { return false }
func (storage *HubicStorage) IsStrongConsistent() bool { return false }
// If the storage supports fast listing of files names.
func (storage *HubicStorage) IsFastListing() (bool) { return true }
func (storage *HubicStorage) IsFastListing() bool { return true }
// Enable the test mode.
func (storage *HubicStorage) EnableTestMode() {
storage.client.TestMode = true
storage.client.TestMode = true
}

View File

@@ -7,24 +7,24 @@
package duplicacy
import (
"github.com/gilbertchen/keyring"
"github.com/gilbertchen/keyring"
)
func SetKeyringFile(path string) {
// We only use keyring file on Windows
// We only use keyring file on Windows
}
func keyringGet(key string) (value string) {
value, err := keyring.Get("duplicacy", key)
if err != nil {
LOG_DEBUG("KEYRING_GET", "Failed to get the value from the keyring: %v", err)
}
return value
value, err := keyring.Get("duplicacy", key)
if err != nil {
LOG_DEBUG("KEYRING_GET", "Failed to get the value from the keyring: %v", err)
}
return value
}
func keyringSet(key string, value string) {
err := keyring.Set("duplicacy", key, value)
if err != nil {
LOG_DEBUG("KEYRING_GET", "Failed to store the value to the keyring: %v", err)
}
err := keyring.Set("duplicacy", key, value)
if err != nil {
LOG_DEBUG("KEYRING_GET", "Failed to store the value to the keyring: %v", err)
}
}

View File

@@ -5,156 +5,156 @@
package duplicacy
import (
"syscall"
"unsafe"
"io/ioutil"
"encoding/json"
"encoding/json"
"io/ioutil"
"syscall"
"unsafe"
)
var keyringFile string
var (
dllcrypt32 = syscall.NewLazyDLL("Crypt32.dll")
dllkernel32 = syscall.NewLazyDLL("Kernel32.dll")
dllcrypt32 = syscall.NewLazyDLL("Crypt32.dll")
dllkernel32 = syscall.NewLazyDLL("Kernel32.dll")
procEncryptData = dllcrypt32.NewProc("CryptProtectData")
procDecryptData = dllcrypt32.NewProc("CryptUnprotectData")
procLocalFree = dllkernel32.NewProc("LocalFree")
procEncryptData = dllcrypt32.NewProc("CryptProtectData")
procDecryptData = dllcrypt32.NewProc("CryptUnprotectData")
procLocalFree = dllkernel32.NewProc("LocalFree")
)
type DATA_BLOB struct {
cbData uint32
pbData *byte
cbData uint32
pbData *byte
}
func SetKeyringFile(path string) {
keyringFile = path
keyringFile = path
}
func keyringEncrypt(value []byte) ([]byte, error) {
dataIn := DATA_BLOB {
pbData: &value[0],
cbData: uint32(len(value)),
}
dataOut := DATA_BLOB {}
dataIn := DATA_BLOB{
pbData: &value[0],
cbData: uint32(len(value)),
}
dataOut := DATA_BLOB{}
r, _, err := procEncryptData.Call(uintptr(unsafe.Pointer(&dataIn)),
0, 0, 0, 0, 0, uintptr(unsafe.Pointer(&dataOut)))
if r == 0 {
return nil, err
}
r, _, err := procEncryptData.Call(uintptr(unsafe.Pointer(&dataIn)),
0, 0, 0, 0, 0, uintptr(unsafe.Pointer(&dataOut)))
if r == 0 {
return nil, err
}
address := uintptr(unsafe.Pointer(dataOut.pbData))
defer procLocalFree.Call(address)
address := uintptr(unsafe.Pointer(dataOut.pbData))
defer procLocalFree.Call(address)
encryptedData := make([]byte, dataOut.cbData)
for i := 0; i < len(encryptedData); i++ {
encryptedData[i] = *(*byte)(unsafe.Pointer(uintptr(int(address) + i)))
}
return encryptedData, nil
encryptedData := make([]byte, dataOut.cbData)
for i := 0; i < len(encryptedData); i++ {
encryptedData[i] = *(*byte)(unsafe.Pointer(uintptr(int(address) + i)))
}
return encryptedData, nil
}
func keyringDecrypt(value []byte) ([]byte, error) {
dataIn := DATA_BLOB {
pbData: &value[0],
cbData: uint32(len(value)),
}
dataOut := DATA_BLOB {}
dataIn := DATA_BLOB{
pbData: &value[0],
cbData: uint32(len(value)),
}
dataOut := DATA_BLOB{}
r, _, err := procDecryptData.Call(uintptr(unsafe.Pointer(&dataIn)),
0, 0, 0, 0, 0, uintptr(unsafe.Pointer(&dataOut)))
if r == 0 {
return nil, err
}
r, _, err := procDecryptData.Call(uintptr(unsafe.Pointer(&dataIn)),
0, 0, 0, 0, 0, uintptr(unsafe.Pointer(&dataOut)))
if r == 0 {
return nil, err
}
address := uintptr(unsafe.Pointer(dataOut.pbData))
defer procLocalFree.Call(address)
address := uintptr(unsafe.Pointer(dataOut.pbData))
defer procLocalFree.Call(address)
decryptedData := make([]byte, dataOut.cbData)
for i := 0; i < len(decryptedData); i++ {
address := int(uintptr(unsafe.Pointer(dataOut.pbData)))
decryptedData[i] = *(*byte)(unsafe.Pointer(uintptr(int(address) + i)))
}
return decryptedData, nil
decryptedData := make([]byte, dataOut.cbData)
for i := 0; i < len(decryptedData); i++ {
address := int(uintptr(unsafe.Pointer(dataOut.pbData)))
decryptedData[i] = *(*byte)(unsafe.Pointer(uintptr(int(address) + i)))
}
return decryptedData, nil
}
func keyringGet(key string) (value string) {
if keyringFile == "" {
LOG_DEBUG("KEYRING_NOT_INITIALIZED", "Keyring file not set")
return ""
}
if keyringFile == "" {
LOG_DEBUG("KEYRING_NOT_INITIALIZED", "Keyring file not set")
return ""
}
description, err := ioutil.ReadFile(keyringFile)
if err != nil {
LOG_DEBUG("KEYRING_READ", "Keyring file not read: %v", err)
return ""
}
description, err := ioutil.ReadFile(keyringFile)
if err != nil {
LOG_DEBUG("KEYRING_READ", "Keyring file not read: %v", err)
return ""
}
var keyring map[string][]byte
err = json.Unmarshal(description, &keyring)
if err != nil {
LOG_DEBUG("KEYRING_PARSE", "Failed to parse the keyring storage file %s: %v", keyringFile, err)
return ""
}
var keyring map[string][]byte
err = json.Unmarshal(description, &keyring)
if err != nil {
LOG_DEBUG("KEYRING_PARSE", "Failed to parse the keyring storage file %s: %v", keyringFile, err)
return ""
}
encryptedValue := keyring[key]
encryptedValue := keyring[key]
if len(encryptedValue) == 0 {
return ""
}
if len(encryptedValue) == 0 {
return ""
}
valueInBytes, err := keyringDecrypt(encryptedValue)
if err != nil {
LOG_DEBUG("KEYRING_DECRYPT", "Failed to decrypt the value: %v", err)
return ""
}
valueInBytes, err := keyringDecrypt(encryptedValue)
if err != nil {
LOG_DEBUG("KEYRING_DECRYPT", "Failed to decrypt the value: %v", err)
return ""
}
return string(valueInBytes)
return string(valueInBytes)
}
func keyringSet(key string, value string) bool {
if value == "" {
return false
}
if keyringFile == "" {
LOG_DEBUG("KEYRING_NOT_INITIALIZED", "Keyring file not set")
return false
}
if value == "" {
return false
}
if keyringFile == "" {
LOG_DEBUG("KEYRING_NOT_INITIALIZED", "Keyring file not set")
return false
}
keyring := make(map[string][]byte)
keyring := make(map[string][]byte)
description, err := ioutil.ReadFile(keyringFile)
if err == nil {
err = json.Unmarshal(description, &keyring)
if err != nil {
LOG_DEBUG("KEYRING_PARSE", "Failed to parse the keyring storage file %s: %v", keyringFile, err)
}
}
description, err := ioutil.ReadFile(keyringFile)
if err == nil {
err = json.Unmarshal(description, &keyring)
if err != nil {
LOG_DEBUG("KEYRING_PARSE", "Failed to parse the keyring storage file %s: %v", keyringFile, err)
}
}
if value == "" {
keyring[key] = nil
} else {
encryptedValue, err := keyringEncrypt([]byte(value))
if err != nil {
LOG_DEBUG("KEYRING_ENCRYPT", "Failed to encrypt the value: %v", err)
return false
}
keyring[key] = encryptedValue
}
if value == "" {
keyring[key] = nil
} else {
encryptedValue, err := keyringEncrypt([]byte(value))
if err != nil {
LOG_DEBUG("KEYRING_ENCRYPT", "Failed to encrypt the value: %v", err)
return false
}
keyring[key] = encryptedValue
}
description, err = json.MarshalIndent(keyring, "", " ")
if err != nil {
LOG_DEBUG("KEYRING_MARSHAL", "Failed to marshal the keyring storage: %v", err)
return false
}
description, err = json.MarshalIndent(keyring, "", " ")
if err != nil {
LOG_DEBUG("KEYRING_MARSHAL", "Failed to marshal the keyring storage: %v", err)
return false
}
err = ioutil.WriteFile(keyringFile, description, 0600)
if err != nil {
LOG_DEBUG("KEYRING_WRITE", "Failed to save the keyring storage to file %s: %v", keyringFile, err)
return false
}
err = ioutil.WriteFile(keyringFile, description, 0600)
if err != nil {
LOG_DEBUG("KEYRING_WRITE", "Failed to save the keyring storage to file %s: %v", keyringFile, err)
return false
}
return true
return true
}

View File

@@ -5,22 +5,22 @@
package duplicacy
import (
"os"
"fmt"
"time"
"sync"
"testing"
"runtime/debug"
"fmt"
"os"
"runtime/debug"
"sync"
"testing"
"time"
)
const (
DEBUG = -2
TRACE = -1
INFO = 0
WARN = 1
ERROR = 2
FATAL = 3
ASSERT = 4
DEBUG = -2
TRACE = -1
INFO = 0
WARN = 1
ERROR = 2
FATAL = 3
ASSERT = 4
)
var LogFunction func(level int, logID string, message string)
@@ -28,161 +28,161 @@ var LogFunction func(level int, logID string, message string)
var printLogHeader = false
func EnableLogHeader() {
printLogHeader = true
printLogHeader = true
}
var printStackTrace = false
func EnableStackTrace() {
printStackTrace = true
printStackTrace = true
}
var testingT *testing.T
func setTestingT(t *testing.T) {
testingT = t
testingT = t
}
func getLevelName(level int) string {
switch level {
case DEBUG:
return "DEBUG"
case TRACE:
return "TRACE"
case INFO:
return "INFO"
case WARN:
return "WARN"
case ERROR:
return "ERROR"
case FATAL:
return "FATAL"
case ASSERT:
return "ASSERT"
default:
return fmt.Sprintf("[%d]", level)
}
switch level {
case DEBUG:
return "DEBUG"
case TRACE:
return "TRACE"
case INFO:
return "INFO"
case WARN:
return "WARN"
case ERROR:
return "ERROR"
case FATAL:
return "FATAL"
case ASSERT:
return "ASSERT"
default:
return fmt.Sprintf("[%d]", level)
}
}
var loggingLevel int
func IsDebugging() bool {
return loggingLevel <= DEBUG
return loggingLevel <= DEBUG
}
func IsTracing() bool {
return loggingLevel <= TRACE
return loggingLevel <= TRACE
}
func SetLoggingLevel(level int) {
loggingLevel = level
loggingLevel = level
}
func LOG_DEBUG(logID string, format string, v ...interface{}) {
logf(DEBUG, logID, format, v...)
logf(DEBUG, logID, format, v...)
}
func LOG_TRACE(logID string, format string, v ...interface{}) {
logf(TRACE, logID, format, v...)
logf(TRACE, logID, format, v...)
}
func LOG_INFO(logID string, format string, v ...interface{}) {
logf(INFO, logID, format, v...)
logf(INFO, logID, format, v...)
}
func LOG_WARN(logID string, format string, v ...interface{}) {
logf(WARN, logID, format, v...)
logf(WARN, logID, format, v...)
}
func LOG_ERROR(logID string, format string, v ...interface{}) {
logf(ERROR, logID, format, v...)
logf(ERROR, logID, format, v...)
}
func LOG_FATAL(logID string, format string, v ...interface{}) {
logf(FATAL, logID, format, v...)
logf(FATAL, logID, format, v...)
}
func LOG_ASSERT(logID string, format string, v ...interface{}) {
logf(ASSERT, logID, format, v...)
logf(ASSERT, logID, format, v...)
}
type Exception struct {
Level int
LogID string
Message string
Level int
LogID string
Message string
}
var logMutex sync.Mutex
func logf(level int, logID string, format string, v ...interface{}) {
message := fmt.Sprintf(format, v...)
message := fmt.Sprintf(format, v...)
if LogFunction != nil {
LogFunction(level, logID, message)
return
}
if LogFunction != nil {
LogFunction(level, logID, message)
return
}
now := time.Now()
now := time.Now()
// Uncomment this line to enable unbufferred logging for tests
// fmt.Printf("%s %s %s %s\n", now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
// Uncomment this line to enable unbufferred logging for tests
// fmt.Printf("%s %s %s %s\n", now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
if testingT != nil {
if level < WARN {
if level >= loggingLevel {
testingT.Logf("%s %s %s %s\n",
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
}
} else {
testingT.Errorf("%s %s %s %s\n",
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
}
} else {
logMutex.Lock()
defer logMutex.Unlock()
if testingT != nil {
if level < WARN {
if level >= loggingLevel {
testingT.Logf("%s %s %s %s\n",
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
}
} else {
testingT.Errorf("%s %s %s %s\n",
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
}
} else {
logMutex.Lock()
defer logMutex.Unlock()
if level >= loggingLevel {
if printLogHeader {
fmt.Printf("%s %s %s %s\n",
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
} else {
fmt.Printf("%s\n", message)
}
}
}
if level >= loggingLevel {
if printLogHeader {
fmt.Printf("%s %s %s %s\n",
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
} else {
fmt.Printf("%s\n", message)
}
}
}
if level > WARN {
panic(Exception{
Level: level,
LogID: logID,
Message: message,
})
}
if level > WARN {
panic(Exception{
Level: level,
LogID: logID,
Message: message,
})
}
}
const (
duplicacyExitCode = 100
otherExitCode = 101
duplicacyExitCode = 100
otherExitCode = 101
)
// This is the function to be called before exiting when an error occurs.
var RunAtError func() = func() {}
func CatchLogException() {
if r := recover(); r != nil {
switch e := r.(type) {
case Exception:
if printStackTrace {
debug.PrintStack()
}
RunAtError()
os.Exit(duplicacyExitCode)
default:
fmt.Fprintf(os.Stderr, "%v\n", e)
debug.PrintStack()
RunAtError()
os.Exit(otherExitCode)
}
}
if r := recover(); r != nil {
switch e := r.(type) {
case Exception:
if printStackTrace {
debug.PrintStack()
}
RunAtError()
os.Exit(duplicacyExitCode)
default:
fmt.Fprintf(os.Stderr, "%v\n", e)
debug.PrintStack()
RunAtError()
os.Exit(otherExitCode)
}
}
}

View File

@@ -5,372 +5,372 @@
package duplicacy
import (
"fmt"
"time"
"sync"
"bytes"
"strings"
"io/ioutil"
"encoding/json"
"io"
"net/http"
"math/rand"
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"strings"
"sync"
"time"
"golang.org/x/oauth2"
"golang.org/x/oauth2"
)
type OneDriveError struct {
Status int
Message string
Status int
Message string
}
func (err OneDriveError) Error() string {
return fmt.Sprintf("%d %s", err.Status, err.Message)
return fmt.Sprintf("%d %s", err.Status, err.Message)
}
type OneDriveErrorResponse struct {
Error OneDriveError `json:"error"`
Error OneDriveError `json:"error"`
}
var OneDriveRefreshTokenURL = "https://duplicacy.com/one_refresh"
var OneDriveAPIURL = "https://api.onedrive.com/v1.0"
type OneDriveClient struct {
HTTPClient *http.Client
HTTPClient *http.Client
TokenFile string
Token *oauth2.Token
TokenLock *sync.Mutex
TokenFile string
Token *oauth2.Token
TokenLock *sync.Mutex
IsConnected bool
TestMode bool
IsConnected bool
TestMode bool
}
func NewOneDriveClient(tokenFile string) (*OneDriveClient, error) {
description, err := ioutil.ReadFile(tokenFile)
if err != nil {
return nil, err
}
description, err := ioutil.ReadFile(tokenFile)
if err != nil {
return nil, err
}
token := new(oauth2.Token)
if err := json.Unmarshal(description, token); err != nil {
return nil, err
}
token := new(oauth2.Token)
if err := json.Unmarshal(description, token); err != nil {
return nil, err
}
client := &OneDriveClient{
HTTPClient: http.DefaultClient,
TokenFile: tokenFile,
Token: token,
TokenLock: &sync.Mutex{},
}
client := &OneDriveClient{
HTTPClient: http.DefaultClient,
TokenFile: tokenFile,
Token: token,
TokenLock: &sync.Mutex{},
}
return client, nil
return client, nil
}
func (client *OneDriveClient) call(url string, method string, input interface{}, contentType string) (io.ReadCloser, int64, error) {
var response *http.Response
var response *http.Response
backoff := 1
for i := 0; i < 8; i++ {
backoff := 1
for i := 0; i < 8; i++ {
LOG_DEBUG("ONEDRIVE_CALL", "%s %s", method, url)
LOG_DEBUG("ONEDRIVE_CALL", "%s %s", method, url)
var inputReader io.Reader
var inputReader io.Reader
switch input.(type) {
default:
jsonInput, err := json.Marshal(input)
if err != nil {
return nil, 0, err
}
inputReader = bytes.NewReader(jsonInput)
case []byte:
inputReader = bytes.NewReader(input.([]byte))
case int:
inputReader = nil
case *bytes.Buffer:
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
case *RateLimitedReader:
input.(*RateLimitedReader).Reset()
inputReader = input.(*RateLimitedReader)
}
switch input.(type) {
default:
jsonInput, err := json.Marshal(input)
if err != nil {
return nil, 0, err
}
inputReader = bytes.NewReader(jsonInput)
case []byte:
inputReader = bytes.NewReader(input.([]byte))
case int:
inputReader = nil
case *bytes.Buffer:
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
case *RateLimitedReader:
input.(*RateLimitedReader).Reset()
inputReader = input.(*RateLimitedReader)
}
request, err := http.NewRequest(method, url, inputReader)
if err != nil {
return nil, 0, err
}
request, err := http.NewRequest(method, url, inputReader)
if err != nil {
return nil, 0, err
}
if reader, ok := inputReader.(*RateLimitedReader); ok {
request.ContentLength = reader.Length()
}
if reader, ok := inputReader.(*RateLimitedReader); ok {
request.ContentLength = reader.Length()
}
if url != OneDriveRefreshTokenURL {
client.TokenLock.Lock()
request.Header.Set("Authorization", "Bearer " + client.Token.AccessToken)
client.TokenLock.Unlock()
}
if contentType != "" {
request.Header.Set("Content-Type", contentType)
}
if url != OneDriveRefreshTokenURL {
client.TokenLock.Lock()
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
client.TokenLock.Unlock()
}
if contentType != "" {
request.Header.Set("Content-Type", contentType)
}
response, err = client.HTTPClient.Do(request)
if err != nil {
if client.IsConnected {
if strings.Contains(err.Error(), "TLS handshake timeout") {
// Give a long timeout regardless of backoff when a TLS timeout happens, hoping that
// idle connections are not to be reused on reconnect.
retryAfter := time.Duration(rand.Float32() * 60000 + 180000)
LOG_INFO("ONEDRIVE_RETRY", "TLS handshake timeout; retry after %d milliseconds", retryAfter)
time.Sleep(retryAfter * time.Millisecond)
} else {
// For all other errors just blindly retry until the maximum is reached
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
LOG_INFO("ONEDRIVE_RETRY", "%v; retry after %d milliseconds", err, retryAfter)
time.Sleep(retryAfter * time.Millisecond)
}
backoff *= 2
continue
}
return nil, 0, err
}
response, err = client.HTTPClient.Do(request)
if err != nil {
if client.IsConnected {
if strings.Contains(err.Error(), "TLS handshake timeout") {
// Give a long timeout regardless of backoff when a TLS timeout happens, hoping that
// idle connections are not to be reused on reconnect.
retryAfter := time.Duration(rand.Float32()*60000 + 180000)
LOG_INFO("ONEDRIVE_RETRY", "TLS handshake timeout; retry after %d milliseconds", retryAfter)
time.Sleep(retryAfter * time.Millisecond)
} else {
// For all other errors just blindly retry until the maximum is reached
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
LOG_INFO("ONEDRIVE_RETRY", "%v; retry after %d milliseconds", err, retryAfter)
time.Sleep(retryAfter * time.Millisecond)
}
backoff *= 2
continue
}
return nil, 0, err
}
client.IsConnected = true
client.IsConnected = true
if response.StatusCode < 400 {
return response.Body, response.ContentLength, nil
}
if response.StatusCode < 400 {
return response.Body, response.ContentLength, nil
}
defer response.Body.Close()
defer response.Body.Close()
errorResponse := &OneDriveErrorResponse {
Error: OneDriveError { Status: response.StatusCode },
}
errorResponse := &OneDriveErrorResponse{
Error: OneDriveError{Status: response.StatusCode},
}
if response.StatusCode == 401 {
if response.StatusCode == 401 {
if url == OneDriveRefreshTokenURL {
return nil, 0, OneDriveError { Status: response.StatusCode, Message: "Authorization error when refreshing token"}
}
if url == OneDriveRefreshTokenURL {
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
}
err = client.RefreshToken()
if err != nil {
return nil, 0, err
}
continue
} else if response.StatusCode > 401 && response.StatusCode != 404 {
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
time.Sleep(retryAfter * time.Millisecond)
backoff *= 2
continue
} else {
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
return nil, 0, OneDriveError { Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response"), }
}
err = client.RefreshToken()
if err != nil {
return nil, 0, err
}
continue
} else if response.StatusCode > 401 && response.StatusCode != 404 {
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
time.Sleep(retryAfter * time.Millisecond)
backoff *= 2
continue
} else {
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
return nil, 0, OneDriveError{Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response")}
}
errorResponse.Error.Status = response.StatusCode
return nil, 0, errorResponse.Error
}
}
errorResponse.Error.Status = response.StatusCode
return nil, 0, errorResponse.Error
}
}
return nil, 0, fmt.Errorf("Maximum number of retries reached")
return nil, 0, fmt.Errorf("Maximum number of retries reached")
}
func (client *OneDriveClient) RefreshToken() (err error) {
client.TokenLock.Lock()
defer client.TokenLock.Unlock()
client.TokenLock.Lock()
defer client.TokenLock.Unlock()
if client.Token.Valid() {
return nil
}
if client.Token.Valid() {
return nil
}
readCloser, _, err := client.call(OneDriveRefreshTokenURL, "POST", client.Token, "")
if err != nil {
return fmt.Errorf("failed to refresh the access token: %v", err)
}
readCloser, _, err := client.call(OneDriveRefreshTokenURL, "POST", client.Token, "")
if err != nil {
return fmt.Errorf("failed to refresh the access token: %v", err)
}
defer readCloser.Close()
defer readCloser.Close()
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
return err
}
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
return err
}
description, err := json.Marshal(client.Token)
if err != nil {
return err
}
description, err := json.Marshal(client.Token)
if err != nil {
return err
}
err = ioutil.WriteFile(client.TokenFile, description, 0644)
if err != nil {
return err
}
err = ioutil.WriteFile(client.TokenFile, description, 0644)
if err != nil {
return err
}
return nil
return nil
}
type OneDriveEntry struct {
ID string
Name string
Folder map[string] interface {}
Size int64
ID string
Name string
Folder map[string]interface{}
Size int64
}
type OneDriveListEntriesOutput struct {
Entries []OneDriveEntry `json:"value"`
NextLink string `json:"@odata.nextLink"`
Entries []OneDriveEntry `json:"value"`
NextLink string `json:"@odata.nextLink"`
}
func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error) {
entries := []OneDriveEntry{}
entries := []OneDriveEntry{}
url := OneDriveAPIURL + "/drive/root:/" + path + ":/children"
if path == "" {
url = OneDriveAPIURL + "/drive/root/children"
}
if client.TestMode {
url += "?top=8"
} else {
url += "?top=1000"
}
url += "&select=name,size,folder"
url := OneDriveAPIURL + "/drive/root:/" + path + ":/children"
if path == "" {
url = OneDriveAPIURL + "/drive/root/children"
}
if client.TestMode {
url += "?top=8"
} else {
url += "?top=1000"
}
url += "&select=name,size,folder"
for {
readCloser, _, err := client.call(url, "GET", 0, "")
if err != nil {
return nil, err
}
for {
readCloser, _, err := client.call(url, "GET", 0, "")
if err != nil {
return nil, err
}
defer readCloser.Close()
defer readCloser.Close()
output := &OneDriveListEntriesOutput {}
output := &OneDriveListEntriesOutput{}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return nil, err
}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return nil, err
}
entries = append(entries, output.Entries...)
entries = append(entries, output.Entries...)
url = output.NextLink
if url == "" {
break
}
}
url = output.NextLink
if url == "" {
break
}
}
return entries, nil
return entries, nil
}
func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) {
url := OneDriveAPIURL + "/drive/root:/" + path
url += "?select=id,name,size,folder"
url := OneDriveAPIURL + "/drive/root:/" + path
url += "?select=id,name,size,folder"
readCloser, _, err := client.call(url, "GET", 0, "")
if err != nil {
if e, ok := err.(OneDriveError); ok && e.Status == 404 {
return "", false, 0, nil
} else {
return "", false, 0, err
}
}
readCloser, _, err := client.call(url, "GET", 0, "")
if err != nil {
if e, ok := err.(OneDriveError); ok && e.Status == 404 {
return "", false, 0, nil
} else {
return "", false, 0, err
}
}
defer readCloser.Close()
defer readCloser.Close()
output := &OneDriveEntry{}
output := &OneDriveEntry{}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return "", false, 0, err
}
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
return "", false, 0, err
}
return output.ID, len(output.Folder) != 0, output.Size, nil
return output.ID, len(output.Folder) != 0, output.Size, nil
}
func (client *OneDriveClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
url := OneDriveAPIURL + "/drive/items/root:/" + path + ":/content"
url := OneDriveAPIURL + "/drive/items/root:/" + path + ":/content"
return client.call(url, "GET", 0, "")
return client.call(url, "GET", 0, "")
}
func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
url := OneDriveAPIURL + "/drive/root:/" + path + ":/content"
url := OneDriveAPIURL + "/drive/root:/" + path + ":/content"
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
if err != nil {
return err
}
if err != nil {
return err
}
readCloser.Close()
return nil
readCloser.Close()
return nil
}
func (client *OneDriveClient) DeleteFile(path string) error {
url := OneDriveAPIURL + "/drive/root:/" + path
url := OneDriveAPIURL + "/drive/root:/" + path
readCloser, _, err := client.call(url, "DELETE", 0, "")
if err != nil {
return err
}
readCloser, _, err := client.call(url, "DELETE", 0, "")
if err != nil {
return err
}
readCloser.Close()
return nil
readCloser.Close()
return nil
}
func (client *OneDriveClient) MoveFile(path string, parent string) error {
url := OneDriveAPIURL + "/drive/root:/" + path
url := OneDriveAPIURL + "/drive/root:/" + path
parentReference := make(map[string]string)
parentReference["path"] = "/drive/root:/" + parent
parentReference := make(map[string]string)
parentReference["path"] = "/drive/root:/" + parent
parameters := make(map[string]interface{})
parameters["parentReference"] = parentReference
parameters := make(map[string]interface{})
parameters["parentReference"] = parentReference
readCloser, _, err := client.call(url, "PATCH", parameters, "application/json")
if err != nil {
return err
}
readCloser, _, err := client.call(url, "PATCH", parameters, "application/json")
if err != nil {
return err
}
readCloser.Close()
return nil
readCloser.Close()
return nil
}
func (client *OneDriveClient) CreateDirectory(path string, name string) (error) {
func (client *OneDriveClient) CreateDirectory(path string, name string) error {
url := OneDriveAPIURL + "/root/children"
url := OneDriveAPIURL + "/root/children"
if path != "" {
if path != "" {
parentID, isDir, _, err := client.GetFileInfo(path)
if err != nil {
return err
}
parentID, isDir, _, err := client.GetFileInfo(path)
if err != nil {
return err
}
if parentID == "" {
return fmt.Errorf("The path '%s' does not exist", path)
}
if parentID == "" {
return fmt.Errorf("The path '%s' does not exist", path)
}
if !isDir {
return fmt.Errorf("The path '%s' is not a directory", path)
}
if !isDir {
return fmt.Errorf("The path '%s' is not a directory", path)
}
url = OneDriveAPIURL + "/drive/items/" + parentID + "/children"
}
url = OneDriveAPIURL + "/drive/items/" + parentID + "/children"
}
parameters := make(map[string]interface{})
parameters["name"] = name
parameters["folder"] = make(map[string]int)
parameters := make(map[string]interface{})
parameters["name"] = name
parameters["folder"] = make(map[string]int)
readCloser, _, err := client.call(url, "POST", parameters, "application/json")
if err != nil {
return err
}
readCloser, _, err := client.call(url, "POST", parameters, "application/json")
if err != nil {
return err
}
readCloser.Close()
return nil
readCloser.Close()
return nil
}

View File

@@ -5,142 +5,141 @@
package duplicacy
import (
"io"
"fmt"
"testing"
"crypto/sha256"
"encoding/hex"
"crypto/sha256"
"encoding/hex"
"fmt"
"io"
"testing"
crypto_rand "crypto/rand"
"math/rand"
crypto_rand "crypto/rand"
"math/rand"
)
func TestOneDriveClient(t *testing.T) {
oneDriveClient, err := NewOneDriveClient("one-token.json")
if err != nil {
t.Errorf("Failed to create the OneDrive client: %v", err)
return
}
oneDriveClient, err := NewOneDriveClient("one-token.json")
if err != nil {
t.Errorf("Failed to create the OneDrive client: %v", err)
return
}
oneDriveClient.TestMode = true
oneDriveClient.TestMode = true
existingFiles, err := oneDriveClient.ListEntries("")
for _, file := range existingFiles {
fmt.Printf("name: %s, isDir: %t\n", file.Name, len(file.Folder) != 0)
}
existingFiles, err := oneDriveClient.ListEntries("")
for _, file := range existingFiles {
fmt.Printf("name: %s, isDir: %t\n", file.Name, len(file.Folder) != 0)
}
testID, _, _, err := oneDriveClient.GetFileInfo("test")
if err != nil {
t.Errorf("Failed to list the test directory: %v", err)
return
}
if testID == "" {
err = oneDriveClient.CreateDirectory("", "test")
if err != nil {
t.Errorf("Failed to create the test directory: %v", err)
return
}
}
testID, _, _, err := oneDriveClient.GetFileInfo("test")
if err != nil {
t.Errorf("Failed to list the test directory: %v", err)
return
}
if testID == "" {
err = oneDriveClient.CreateDirectory("", "test")
if err != nil {
t.Errorf("Failed to create the test directory: %v", err)
return
}
}
test1ID, _, _, err := oneDriveClient.GetFileInfo("test/test1")
if err != nil {
t.Errorf("Failed to list the test1 directory: %v", err)
return
}
if test1ID == "" {
err = oneDriveClient.CreateDirectory("test", "test1")
if err != nil {
t.Errorf("Failed to create the test1 directory: %v", err)
return
}
}
test1ID, _, _, err := oneDriveClient.GetFileInfo("test/test1")
if err != nil {
t.Errorf("Failed to list the test1 directory: %v", err)
return
}
if test1ID == "" {
err = oneDriveClient.CreateDirectory("test", "test1")
if err != nil {
t.Errorf("Failed to create the test1 directory: %v", err)
return
}
}
test2ID, _, _, err := oneDriveClient.GetFileInfo("test/test2")
if err != nil {
t.Errorf("Failed to list the test2 directory: %v", err)
return
}
if test2ID == "" {
err = oneDriveClient.CreateDirectory("test", "test2")
if err != nil {
t.Errorf("Failed to create the test2 directory: %v", err)
return
}
}
test2ID, _, _, err := oneDriveClient.GetFileInfo("test/test2")
if err != nil {
t.Errorf("Failed to list the test2 directory: %v", err)
return
}
if test2ID == "" {
err = oneDriveClient.CreateDirectory("test", "test2")
if err != nil {
t.Errorf("Failed to create the test2 directory: %v", err)
return
}
}
numberOfFiles := 20
maxFileSize := 64 * 1024
numberOfFiles := 20
maxFileSize := 64 * 1024
for i := 0; i < numberOfFiles; i++ {
content := make([]byte, rand.Int()%maxFileSize+1)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
for i := 0; i < numberOfFiles; i++ {
content := make([]byte, rand.Int() % maxFileSize + 1)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
hasher := sha256.New()
hasher.Write(content)
filename := hex.EncodeToString(hasher.Sum(nil))
hasher := sha256.New()
hasher.Write(content)
filename := hex.EncodeToString(hasher.Sum(nil))
fmt.Printf("file: %s\n", filename)
fmt.Printf("file: %s\n", filename)
err = oneDriveClient.UploadFile("test/test1/"+filename, content, 100)
if err != nil {
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
t.Errorf("Failed to upload the file %s: %v", filename, err)
return
}
}
}
err = oneDriveClient.UploadFile("test/test1/" + filename, content, 100)
if err != nil {
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
t.Errorf("Failed to upload the file %s: %v", filename, err)
return
}
}
}
entries, err := oneDriveClient.ListEntries("test/test1")
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
entries, err := oneDriveClient.ListEntries("test/test1")
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
for _, entry := range entries {
err = oneDriveClient.MoveFile("test/test1/"+entry.Name, "test/test2")
if err != nil {
t.Errorf("Failed to move %s: %v", entry.Name, err)
return
}
}
for _, entry := range entries {
err = oneDriveClient.MoveFile("test/test1/" + entry.Name, "test/test2")
if err != nil {
t.Errorf("Failed to move %s: %v", entry.Name, err)
return
}
}
entries, err = oneDriveClient.ListEntries("test/test2")
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
entries, err = oneDriveClient.ListEntries("test/test2")
if err != nil {
t.Errorf("Error list randomly generated files: %v", err)
return
}
for _, entry := range entries {
readCloser, _, err := oneDriveClient.DownloadFile("test/test2/" + entry.Name)
if err != nil {
t.Errorf("Error downloading file %s: %v", entry.Name, err)
return
}
for _, entry := range entries {
readCloser, _, err := oneDriveClient.DownloadFile("test/test2/" + entry.Name)
if err != nil {
t.Errorf("Error downloading file %s: %v", entry.Name, err)
return
}
hasher := sha256.New()
io.Copy(hasher, readCloser)
hash := hex.EncodeToString(hasher.Sum(nil))
hasher := sha256.New()
io.Copy(hasher, readCloser)
hash := hex.EncodeToString(hasher.Sum(nil))
if hash != entry.Name {
t.Errorf("File %s, hash %s", entry.Name, hash)
}
if hash != entry.Name {
t.Errorf("File %s, hash %s", entry.Name, hash)
}
readCloser.Close()
}
readCloser.Close()
}
for _, entry := range entries {
for _, entry := range entries {
err = oneDriveClient.DeleteFile("test/test2/" + entry.Name)
if err != nil {
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
return
}
}
err = oneDriveClient.DeleteFile("test/test2/" + entry.Name)
if err != nil {
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
return
}
}
}

View File

@@ -5,238 +5,238 @@
package duplicacy
import (
"fmt"
"path"
"strings"
"fmt"
"path"
"strings"
)
type OneDriveStorage struct {
RateLimitedStorage
RateLimitedStorage
client *OneDriveClient
storageDir string
numberOfThread int
client *OneDriveClient
storageDir string
numberOfThread int
}
// CreateOneDriveStorage creates an OneDrive storage object.
func CreateOneDriveStorage(tokenFile string, storagePath string, threads int) (storage *OneDriveStorage, err error) {
for len(storagePath) > 0 && storagePath[len(storagePath) - 1] == '/' {
storagePath = storagePath[:len(storagePath) - 1]
}
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
storagePath = storagePath[:len(storagePath)-1]
}
client, err := NewOneDriveClient(tokenFile)
if err != nil {
return nil, err
}
client, err := NewOneDriveClient(tokenFile)
if err != nil {
return nil, err
}
fileID, isDir, _, err := client.GetFileInfo(storagePath)
if err != nil {
return nil, err
}
fileID, isDir, _, err := client.GetFileInfo(storagePath)
if err != nil {
return nil, err
}
if fileID == "" {
return nil, fmt.Errorf("Path '%s' doesn't exist", storagePath)
}
if fileID == "" {
return nil, fmt.Errorf("Path '%s' doesn't exist", storagePath)
}
if !isDir {
return nil, fmt.Errorf("Path '%s' is not a directory", storagePath)
}
if !isDir {
return nil, fmt.Errorf("Path '%s' is not a directory", storagePath)
}
storage = &OneDriveStorage {
client: client,
storageDir: storagePath,
numberOfThread: threads,
}
storage = &OneDriveStorage{
client: client,
storageDir: storagePath,
numberOfThread: threads,
}
for _, path := range []string { "chunks", "fossils", "snapshots" } {
dir := storagePath + "/" + path
dirID, isDir, _, err := client.GetFileInfo(dir)
if err != nil {
return nil, err
}
if dirID == "" {
err = client.CreateDirectory(storagePath, path)
if err != nil {
return nil, err
}
} else if !isDir {
return nil, fmt.Errorf("%s is not a directory", dir)
}
}
for _, path := range []string{"chunks", "fossils", "snapshots"} {
dir := storagePath + "/" + path
dirID, isDir, _, err := client.GetFileInfo(dir)
if err != nil {
return nil, err
}
if dirID == "" {
err = client.CreateDirectory(storagePath, path)
if err != nil {
return nil, err
}
} else if !isDir {
return nil, fmt.Errorf("%s is not a directory", dir)
}
}
return storage, nil
return storage, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
dir = dir[:len(dir) - 1]
}
for len(dir) > 0 && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}
if dir == "snapshots" {
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
if err != nil {
return nil, nil, err
}
if dir == "snapshots" {
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
if err != nil {
return nil, nil, err
}
subDirs := []string{}
for _, entry := range entries {
if len(entry.Folder) > 0 {
subDirs = append(subDirs, entry.Name + "/")
}
}
return subDirs, nil, nil
} else if strings.HasPrefix(dir, "snapshots/") {
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
if err != nil {
return nil, nil, err
}
subDirs := []string{}
for _, entry := range entries {
if len(entry.Folder) > 0 {
subDirs = append(subDirs, entry.Name+"/")
}
}
return subDirs, nil, nil
} else if strings.HasPrefix(dir, "snapshots/") {
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
if err != nil {
return nil, nil, err
}
files := []string{}
files := []string{}
for _, entry := range entries {
if len(entry.Folder) == 0 {
files = append(files, entry.Name)
}
}
return files, nil, nil
} else {
files := []string{}
sizes := []int64{}
for _, parent := range []string {"chunks", "fossils" } {
entries, err := storage.client.ListEntries(storage.storageDir + "/" + parent)
if err != nil {
return nil, nil, err
}
for _, entry := range entries {
if len(entry.Folder) == 0 {
files = append(files, entry.Name)
}
}
return files, nil, nil
} else {
files := []string{}
sizes := []int64{}
for _, parent := range []string{"chunks", "fossils"} {
entries, err := storage.client.ListEntries(storage.storageDir + "/" + parent)
if err != nil {
return nil, nil, err
}
for _, entry := range entries {
name := entry.Name
if parent == "fossils" {
name += ".fsl"
}
files = append(files, name)
sizes = append(sizes, entry.Size)
}
}
return files, sizes, nil
}
for _, entry := range entries {
name := entry.Name
if parent == "fossils" {
name += ".fsl"
}
files = append(files, name)
sizes = append(sizes, entry.Size)
}
}
return files, sizes, nil
}
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *OneDriveStorage) DeleteFile(threadIndex int, filePath string) (err error) {
if strings.HasSuffix(filePath, ".fsl") && strings.HasPrefix(filePath, "chunks/") {
filePath = "fossils/" + filePath[len("chunks/"):len(filePath) - len(".fsl")]
}
if strings.HasSuffix(filePath, ".fsl") && strings.HasPrefix(filePath, "chunks/") {
filePath = "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
}
err = storage.client.DeleteFile(storage.storageDir + "/" + filePath)
if e, ok := err.(OneDriveError); ok && e.Status == 404 {
LOG_DEBUG("ONEDRIVE_DELETE", "Ignore 404 error")
return nil
}
return err
err = storage.client.DeleteFile(storage.storageDir + "/" + filePath)
if e, ok := err.(OneDriveError); ok && e.Status == 404 {
LOG_DEBUG("ONEDRIVE_DELETE", "Ignore 404 error")
return nil
}
return err
}
// MoveFile renames the file.
func (storage *OneDriveStorage) MoveFile(threadIndex int, from string, to string) (err error) {
fromPath := storage.storageDir + "/" + from
toParent := storage.storageDir + "/fossils"
if strings.HasSuffix(from, ".fsl") {
fromPath = storage.storageDir + "/fossils/" + from[len("chunks/"):len(from) - len(".fsl")]
toParent = storage.storageDir + "/chunks"
}
fromPath := storage.storageDir + "/" + from
toParent := storage.storageDir + "/fossils"
if strings.HasSuffix(from, ".fsl") {
fromPath = storage.storageDir + "/fossils/" + from[len("chunks/"):len(from)-len(".fsl")]
toParent = storage.storageDir + "/chunks"
}
err = storage.client.MoveFile(fromPath, toParent)
if err != nil {
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
LOG_DEBUG("ONEDRIVE_MOVE", "Ignore 409 conflict error")
} else {
return err
}
}
return nil
err = storage.client.MoveFile(fromPath, toParent)
if err != nil {
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
LOG_DEBUG("ONEDRIVE_MOVE", "Ignore 409 conflict error")
} else {
return err
}
}
return nil
}
// CreateDirectory creates a new directory.
func (storage *OneDriveStorage) CreateDirectory(threadIndex int, dir string) (err error) {
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
dir = dir[:len(dir) - 1]
}
for len(dir) > 0 && dir[len(dir)-1] == '/' {
dir = dir[:len(dir)-1]
}
parent := path.Dir(dir)
parent := path.Dir(dir)
if parent == "." {
return storage.client.CreateDirectory(storage.storageDir, dir)
} else {
return storage.client.CreateDirectory(storage.storageDir + "/" + parent, path.Base(dir))
}
if parent == "." {
return storage.client.CreateDirectory(storage.storageDir, dir)
} else {
return storage.client.CreateDirectory(storage.storageDir+"/"+parent, path.Base(dir))
}
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *OneDriveStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
for len(filePath) > 0 && filePath[len(filePath) - 1] == '/' {
filePath = filePath[:len(filePath) - 1]
}
fileID, isDir, size, err := storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
return fileID != "", isDir, size, err
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
filePath = filePath[:len(filePath)-1]
}
fileID, isDir, size, err := storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
return fileID != "", isDir, size, err
}
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
// the suffix '.fsl'.
func (storage *OneDriveStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
filePath = "chunks/" + chunkID
realPath := storage.storageDir + "/" + filePath
if isFossil {
filePath += ".fsl"
realPath = storage.storageDir + "/fossils/" + chunkID
}
filePath = "chunks/" + chunkID
realPath := storage.storageDir + "/" + filePath
if isFossil {
filePath += ".fsl"
realPath = storage.storageDir + "/fossils/" + chunkID
}
fileID, _, size, err := storage.client.GetFileInfo(realPath)
return filePath, fileID != "", size, err
fileID, _, size, err := storage.client.GetFileInfo(realPath)
return filePath, fileID != "", size, err
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *OneDriveStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
readCloser, _, err := storage.client.DownloadFile(storage.storageDir + "/" + filePath)
if err != nil {
return err
}
readCloser, _, err := storage.client.DownloadFile(storage.storageDir + "/" + filePath)
if err != nil {
return err
}
defer readCloser.Close()
defer readCloser.Close()
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / storage.numberOfThread)
return err
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThread)
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *OneDriveStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
err = storage.client.UploadFile(storage.storageDir + "/" + filePath, content, storage.UploadRateLimit / storage.numberOfThread)
err = storage.client.UploadFile(storage.storageDir+"/"+filePath, content, storage.UploadRateLimit/storage.numberOfThread)
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
LOG_TRACE("ONEDRIVE_UPLOAD", "File %s already exists", filePath)
return nil
} else {
return err
}
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
LOG_TRACE("ONEDRIVE_UPLOAD", "File %s already exists", filePath)
return nil
} else {
return err
}
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *OneDriveStorage) IsCacheNeeded() (bool) { return true }
func (storage *OneDriveStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *OneDriveStorage) IsMoveFileImplemented() (bool) { return true }
func (storage *OneDriveStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *OneDriveStorage) IsStrongConsistent() (bool) { return false }
func (storage *OneDriveStorage) IsStrongConsistent() bool { return false }
// If the storage supports fast listing of files names.
func (storage *OneDriveStorage) IsFastListing() (bool) { return true }
func (storage *OneDriveStorage) IsFastListing() bool { return true }
// Enable the test mode.
func (storage *OneDriveStorage) EnableTestMode() {
storage.client.TestMode = true
storage.client.TestMode = true
}

View File

@@ -5,119 +5,119 @@
package duplicacy
import (
"strings"
"encoding/json"
"path"
"io/ioutil"
"reflect"
"os"
"encoding/json"
"io/ioutil"
"os"
"path"
"reflect"
"strings"
)
// Preference stores options for each storage.
type Preference struct {
Name string `json:"name"`
SnapshotID string `json:"id"`
StorageURL string `json:"storage"`
Encrypted bool `json:"encrypted"`
BackupProhibited bool `json:"no_backup"`
RestoreProhibited bool `json:"no_restore"`
DoNotSavePassword bool `json:"no_save_password"`
Keys map[string]string `json:"keys"`
Name string `json:"name"`
SnapshotID string `json:"id"`
StorageURL string `json:"storage"`
Encrypted bool `json:"encrypted"`
BackupProhibited bool `json:"no_backup"`
RestoreProhibited bool `json:"no_restore"`
DoNotSavePassword bool `json:"no_save_password"`
Keys map[string]string `json:"keys"`
}
var preferencePath string
var Preferences [] Preference
var Preferences []Preference
func LoadPreferences(repository string) bool {
preferencePath = path.Join(repository, DUPLICACY_DIRECTORY)
stat, err := os.Stat(preferencePath)
if err != nil {
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", repository, err)
return false
}
if !stat.IsDir() {
content, err := ioutil.ReadFile(preferencePath)
if err != nil {
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to locate the preference path: %v", err)
return false
}
realPreferencePath := strings.TrimSpace(string(content))
stat, err := os.Stat(realPreferencePath)
if err != nil {
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", content, err)
return false
}
if !stat.IsDir() {
LOG_ERROR("PREFERENCE_PATH", "The preference path %s is not a directory", realPreferencePath)
}
preferencePath = realPreferencePath
}
description, err := ioutil.ReadFile(path.Join(preferencePath, "preferences"))
if err != nil {
LOG_ERROR("PREFERENCE_OPEN", "Failed to read the preference file from repository %s: %v", repository, err)
return false
}
preferencePath = path.Join(repository, DUPLICACY_DIRECTORY)
err = json.Unmarshal(description, &Preferences)
if err != nil {
LOG_ERROR("PREFERENCE_PARSE", "Failed to parse the preference file for repository %s: %v", repository, err)
return false
}
stat, err := os.Stat(preferencePath)
if err != nil {
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", repository, err)
return false
}
if len(Preferences) == 0 {
LOG_ERROR("PREFERENCE_NONE", "No preference found in the preference file")
return false
}
if !stat.IsDir() {
content, err := ioutil.ReadFile(preferencePath)
if err != nil {
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to locate the preference path: %v", err)
return false
}
realPreferencePath := strings.TrimSpace(string(content))
stat, err := os.Stat(realPreferencePath)
if err != nil {
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", content, err)
return false
}
if !stat.IsDir() {
LOG_ERROR("PREFERENCE_PATH", "The preference path %s is not a directory", realPreferencePath)
}
return true
preferencePath = realPreferencePath
}
description, err := ioutil.ReadFile(path.Join(preferencePath, "preferences"))
if err != nil {
LOG_ERROR("PREFERENCE_OPEN", "Failed to read the preference file from repository %s: %v", repository, err)
return false
}
err = json.Unmarshal(description, &Preferences)
if err != nil {
LOG_ERROR("PREFERENCE_PARSE", "Failed to parse the preference file for repository %s: %v", repository, err)
return false
}
if len(Preferences) == 0 {
LOG_ERROR("PREFERENCE_NONE", "No preference found in the preference file")
return false
}
return true
}
func GetDuplicacyPreferencePath() string {
if preferencePath == "" {
LOG_ERROR("PREFERENCE_PATH", "The preference path has not been set")
return ""
}
return preferencePath
if preferencePath == "" {
LOG_ERROR("PREFERENCE_PATH", "The preference path has not been set")
return ""
}
return preferencePath
}
// Normally 'preferencePath' is set in LoadPreferences; however, if LoadPreferences is not called, this function
// provide another change to set 'preferencePath'
func SetDuplicacyPreferencePath(p string) {
preferencePath = p
preferencePath = p
}
func SavePreferences() (bool) {
description, err := json.MarshalIndent(Preferences, "", " ")
if err != nil {
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
return false
}
preferenceFile := path.Join(GetDuplicacyPreferencePath(), "preferences")
err = ioutil.WriteFile(preferenceFile, description, 0600)
if err != nil {
LOG_ERROR("PREFERENCE_WRITE", "Failed to save the preference file %s: %v", preferenceFile, err)
return false
}
func SavePreferences() bool {
description, err := json.MarshalIndent(Preferences, "", " ")
if err != nil {
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
return false
}
preferenceFile := path.Join(GetDuplicacyPreferencePath(), "preferences")
return true
err = ioutil.WriteFile(preferenceFile, description, 0600)
if err != nil {
LOG_ERROR("PREFERENCE_WRITE", "Failed to save the preference file %s: %v", preferenceFile, err)
return false
}
return true
}
func FindPreference(name string) (*Preference) {
for i, preference := range Preferences {
if preference.Name == name || preference.StorageURL == name {
return &Preferences[i]
}
}
func FindPreference(name string) *Preference {
for i, preference := range Preferences {
if preference.Name == name || preference.StorageURL == name {
return &Preferences[i]
}
}
return nil
return nil
}
func (preference *Preference) Equal(other *Preference) bool {
return reflect.DeepEqual(preference, other)
return reflect.DeepEqual(preference, other)
}

View File

@@ -5,208 +5,209 @@
package duplicacy
import (
"time"
"github.com/gilbertchen/goamz/aws"
"github.com/gilbertchen/goamz/s3"
"time"
"github.com/gilbertchen/goamz/aws"
"github.com/gilbertchen/goamz/s3"
)
// S3CStorage is a storage backend for s3 compatible storages that require V2 Signing.
type S3CStorage struct {
RateLimitedStorage
RateLimitedStorage
buckets []*s3.Bucket
storageDir string
buckets []*s3.Bucket
storageDir string
}
// CreateS3CStorage creates a amazon s3 storage object.
func CreateS3CStorage(regionName string, endpoint string, bucketName string, storageDir string,
accessKey string, secretKey string, threads int) (storage *S3CStorage, err error) {
accessKey string, secretKey string, threads int) (storage *S3CStorage, err error) {
var region aws.Region
var region aws.Region
if endpoint == "" {
if regionName == "" {
regionName = "us-east-1"
}
region = aws.Regions[regionName]
} else {
region = aws.Region { Name: regionName, S3Endpoint:"https://" + endpoint }
}
if endpoint == "" {
if regionName == "" {
regionName = "us-east-1"
}
region = aws.Regions[regionName]
} else {
region = aws.Region{Name: regionName, S3Endpoint: "https://" + endpoint}
}
auth := aws.Auth{ AccessKey: accessKey, SecretKey: secretKey }
auth := aws.Auth{AccessKey: accessKey, SecretKey: secretKey}
var buckets []*s3.Bucket
for i := 0; i < threads; i++ {
s3Client := s3.New(auth, region)
s3Client.AttemptStrategy = aws.AttemptStrategy{
Min: 8,
Total: 300 * time.Second,
Delay: 1000 * time.Millisecond,
}
var buckets []*s3.Bucket
for i := 0; i < threads; i++ {
s3Client := s3.New(auth, region)
s3Client.AttemptStrategy = aws.AttemptStrategy{
Min: 8,
Total: 300 * time.Second,
Delay: 1000 * time.Millisecond,
}
bucket := s3Client.Bucket(bucketName)
buckets = append(buckets, bucket)
}
bucket := s3Client.Bucket(bucketName)
buckets = append(buckets, bucket)
}
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
storageDir += "/"
}
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
storageDir += "/"
}
storage = &S3CStorage {
buckets: buckets,
storageDir: storageDir,
}
storage = &S3CStorage{
buckets: buckets,
storageDir: storageDir,
}
return storage, nil
return storage, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
func (storage *S3CStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
if len(dir) > 0 && dir[len(dir) - 1] != '/' {
dir += "/"
}
if len(dir) > 0 && dir[len(dir)-1] != '/' {
dir += "/"
}
dirLength := len(storage.storageDir + dir)
if dir == "snapshots/" {
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "/", "", 100)
if err != nil {
return nil, nil, err
}
dirLength := len(storage.storageDir + dir)
if dir == "snapshots/" {
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "/", "", 100)
if err != nil {
return nil, nil, err
}
for _, subDir := range results.CommonPrefixes {
files = append(files, subDir[dirLength:])
}
return files, nil, nil
} else if dir == "chunks/" {
marker := ""
for {
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", marker, 1000)
if err != nil {
return nil, nil, err
}
for _, subDir := range results.CommonPrefixes {
files = append(files, subDir[dirLength:])
}
return files, nil, nil
} else if dir == "chunks/" {
marker := ""
for {
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "", marker, 1000)
if err != nil {
return nil, nil, err
}
for _, object := range results.Contents {
files = append(files, object.Key[dirLength:])
sizes = append(sizes, object.Size)
}
for _, object := range results.Contents {
files = append(files, object.Key[dirLength:])
sizes = append(sizes, object.Size)
}
if !results.IsTruncated {
break
}
if !results.IsTruncated {
break
}
marker = results.Contents[len(results.Contents) - 1].Key
}
return files, sizes, nil
marker = results.Contents[len(results.Contents)-1].Key
}
return files, sizes, nil
} else {
} else {
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", "", 1000)
if err != nil {
return nil, nil, err
}
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "", "", 1000)
if err != nil {
return nil, nil, err
}
for _, object := range results.Contents {
files = append(files, object.Key[dirLength:])
}
return files, nil, nil
}
for _, object := range results.Contents {
files = append(files, object.Key[dirLength:])
}
return files, nil, nil
}
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *S3CStorage) DeleteFile(threadIndex int, filePath string) (err error) {
return storage.buckets[threadIndex].Del(storage.storageDir + filePath)
return storage.buckets[threadIndex].Del(storage.storageDir + filePath)
}
// MoveFile renames the file.
func (storage *S3CStorage) MoveFile(threadIndex int, from string, to string) (err error) {
options := s3.CopyOptions { ContentType: "application/duplicacy" }
_, err = storage.buckets[threadIndex].PutCopy(storage.storageDir + to, s3.Private, options, storage.buckets[threadIndex].Name + "/" + storage.storageDir + from)
if err != nil {
return nil
}
options := s3.CopyOptions{ContentType: "application/duplicacy"}
_, err = storage.buckets[threadIndex].PutCopy(storage.storageDir+to, s3.Private, options, storage.buckets[threadIndex].Name+"/"+storage.storageDir+from)
if err != nil {
return nil
}
return storage.DeleteFile(threadIndex, from)
return storage.DeleteFile(threadIndex, from)
}
// CreateDirectory creates a new directory.
func (storage *S3CStorage) CreateDirectory(threadIndex int, dir string) (err error) {
return nil
return nil
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *S3CStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
response, err := storage.buckets[threadIndex].Head(storage.storageDir + filePath, nil)
if err != nil {
if e, ok := err.(*s3.Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
response, err := storage.buckets[threadIndex].Head(storage.storageDir+filePath, nil)
if err != nil {
if e, ok := err.(*s3.Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
if response.StatusCode == 403 || response.StatusCode == 404 {
return false, false, 0, nil
} else {
return true, false, response.ContentLength, nil
}
if response.StatusCode == 403 || response.StatusCode == 404 {
return false, false, 0, nil
} else {
return true, false, response.ContentLength, nil
}
}
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
// the suffix '.fsl'.
func (storage *S3CStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
filePath = "chunks/" + chunkID
if isFossil {
filePath += ".fsl"
}
filePath = "chunks/" + chunkID
if isFossil {
filePath += ".fsl"
}
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
if err != nil {
return "", false, 0, err
} else {
return filePath, exist, size, err
}
if err != nil {
return "", false, 0, err
} else {
return filePath, exist, size, err
}
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *S3CStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
readCloser, err := storage.buckets[threadIndex].GetReader(storage.storageDir + filePath)
if err != nil {
return err
}
readCloser, err := storage.buckets[threadIndex].GetReader(storage.storageDir + filePath)
if err != nil {
return err
}
defer readCloser.Close()
defer readCloser.Close()
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.buckets))
return err
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.buckets))
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *S3CStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
options := s3.Options { }
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.buckets))
return storage.buckets[threadIndex].PutReader(storage.storageDir + filePath, reader, int64(len(content)), "application/duplicacy", s3.Private, options)
options := s3.Options{}
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.buckets))
return storage.buckets[threadIndex].PutReader(storage.storageDir+filePath, reader, int64(len(content)), "application/duplicacy", s3.Private, options)
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *S3CStorage) IsCacheNeeded () (bool) { return true }
func (storage *S3CStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *S3CStorage) IsMoveFileImplemented() (bool) { return true }
func (storage *S3CStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *S3CStorage) IsStrongConsistent() (bool) { return false }
func (storage *S3CStorage) IsStrongConsistent() bool { return false }
// If the storage supports fast listing of files names.
func (storage *S3CStorage) IsFastListing() (bool) { return true }
func (storage *S3CStorage) IsFastListing() bool { return true }
// Enable the test mode.
func (storage *S3CStorage) EnableTestMode() {}

View File

@@ -5,266 +5,266 @@
package duplicacy
import (
"strings"
"reflect"
"reflect"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
)
type S3Storage struct {
RateLimitedStorage
RateLimitedStorage
client *s3.S3
bucket string
storageDir string
numberOfThreads int
client *s3.S3
bucket string
storageDir string
numberOfThreads int
}
// CreateS3Storage creates a amazon s3 storage object.
func CreateS3Storage(regionName string, endpoint string, bucketName string, storageDir string,
accessKey string, secretKey string, threads int,
isSSLSupported bool, isMinioCompatible bool) (storage *S3Storage, err error) {
accessKey string, secretKey string, threads int,
isSSLSupported bool, isMinioCompatible bool) (storage *S3Storage, err error) {
token := ""
auth := credentials.NewStaticCredentials(accessKey, secretKey, token)
token := ""
if regionName == "" && endpoint == "" {
defaultRegionConfig := &aws.Config {
Region: aws.String("us-east-1"),
Credentials: auth,
}
s3Client := s3.New(session.New(defaultRegionConfig))
auth := credentials.NewStaticCredentials(accessKey, secretKey, token)
response, err := s3Client.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String(bucketName)})
if regionName == "" && endpoint == "" {
defaultRegionConfig := &aws.Config{
Region: aws.String("us-east-1"),
Credentials: auth,
}
if err != nil {
return nil, err
}
regionName = "us-east-1"
if response.LocationConstraint != nil {
regionName = *response.LocationConstraint
}
}
config := &aws.Config {
Region: aws.String(regionName),
Credentials: auth,
Endpoint: aws.String(endpoint),
S3ForcePathStyle: aws.Bool(isMinioCompatible),
DisableSSL: aws.Bool(!isSSLSupported),
}
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
storageDir += "/"
}
s3Client := s3.New(session.New(defaultRegionConfig))
storage = &S3Storage {
client: s3.New(session.New(config)),
bucket: bucketName,
storageDir: storageDir,
numberOfThreads: threads,
}
return storage, nil
response, err := s3Client.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String(bucketName)})
if err != nil {
return nil, err
}
regionName = "us-east-1"
if response.LocationConstraint != nil {
regionName = *response.LocationConstraint
}
}
config := &aws.Config{
Region: aws.String(regionName),
Credentials: auth,
Endpoint: aws.String(endpoint),
S3ForcePathStyle: aws.Bool(isMinioCompatible),
DisableSSL: aws.Bool(!isSSLSupported),
}
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
storageDir += "/"
}
storage = &S3Storage{
client: s3.New(session.New(config)),
bucket: bucketName,
storageDir: storageDir,
numberOfThreads: threads,
}
return storage, nil
}
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
if len(dir) > 0 && dir[len(dir) - 1] != '/' {
dir += "/"
}
if len(dir) > 0 && dir[len(dir)-1] != '/' {
dir += "/"
}
if dir == "snapshots/" {
dir = storage.storageDir + dir
input := s3.ListObjectsInput {
Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(1000),
}
output, err := storage.client.ListObjects(&input)
if err != nil {
return nil, nil, err
}
for _, subDir := range output.CommonPrefixes {
files = append(files, (*subDir.Prefix)[len(dir):])
}
return files, nil, nil
} else {
dir = storage.storageDir + dir
marker := ""
for {
input := s3.ListObjectsInput {
Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir),
MaxKeys: aws.Int64(1000),
Marker: aws.String(marker),
}
if dir == "snapshots/" {
dir = storage.storageDir + dir
input := s3.ListObjectsInput{
Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir),
Delimiter: aws.String("/"),
MaxKeys: aws.Int64(1000),
}
output, err := storage.client.ListObjects(&input)
if err != nil {
return nil, nil, err
}
output, err := storage.client.ListObjects(&input)
if err != nil {
return nil, nil, err
}
for _, object := range output.Contents {
files = append(files, (*object.Key)[len(dir):])
sizes = append(sizes, *object.Size)
}
for _, subDir := range output.CommonPrefixes {
files = append(files, (*subDir.Prefix)[len(dir):])
}
return files, nil, nil
} else {
dir = storage.storageDir + dir
marker := ""
for {
input := s3.ListObjectsInput{
Bucket: aws.String(storage.bucket),
Prefix: aws.String(dir),
MaxKeys: aws.Int64(1000),
Marker: aws.String(marker),
}
if !*output.IsTruncated {
break
}
output, err := storage.client.ListObjects(&input)
if err != nil {
return nil, nil, err
}
marker = *output.Contents[len(output.Contents) - 1].Key
}
return files, sizes, nil
}
for _, object := range output.Contents {
files = append(files, (*object.Key)[len(dir):])
sizes = append(sizes, *object.Size)
}
if !*output.IsTruncated {
break
}
marker = *output.Contents[len(output.Contents)-1].Key
}
return files, sizes, nil
}
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *S3Storage) DeleteFile(threadIndex int, filePath string) (err error) {
input := &s3.DeleteObjectInput {
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
}
_, err = storage.client.DeleteObject(input)
return err
input := &s3.DeleteObjectInput{
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
}
_, err = storage.client.DeleteObject(input)
return err
}
// MoveFile renames the file.
func (storage *S3Storage) MoveFile(threadIndex int, from string, to string) (err error) {
input := &s3.CopyObjectInput {
Bucket: aws.String(storage.bucket),
CopySource: aws.String(storage.bucket + "/" + storage.storageDir + from),
Key: aws.String(storage.storageDir + to),
}
input := &s3.CopyObjectInput{
Bucket: aws.String(storage.bucket),
CopySource: aws.String(storage.bucket + "/" + storage.storageDir + from),
Key: aws.String(storage.storageDir + to),
}
_, err = storage.client.CopyObject(input)
if err != nil {
return err
}
return storage.DeleteFile(threadIndex, from)
_, err = storage.client.CopyObject(input)
if err != nil {
return err
}
return storage.DeleteFile(threadIndex, from)
}
// CreateDirectory creates a new directory.
func (storage *S3Storage) CreateDirectory(threadIndex int, dir string) (err error) {
return nil
return nil
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *S3Storage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
input := &s3.HeadObjectInput {
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
}
input := &s3.HeadObjectInput{
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
}
output, err := storage.client.HeadObject(input)
if err != nil {
if e, ok := err.(awserr.RequestFailure); ok && (e.StatusCode() == 403 || e.StatusCode() == 404) {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
if output == nil || output.ContentLength == nil {
return false, false, 0, nil
} else {
return true, false, *output.ContentLength, nil
}
output, err := storage.client.HeadObject(input)
if err != nil {
if e, ok := err.(awserr.RequestFailure); ok && (e.StatusCode() == 403 || e.StatusCode() == 404) {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
if output == nil || output.ContentLength == nil {
return false, false, 0, nil
} else {
return true, false, *output.ContentLength, nil
}
}
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
// the suffix '.fsl'.
func (storage *S3Storage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
filePath = "chunks/" + chunkID
if isFossil {
filePath += ".fsl"
}
filePath = "chunks/" + chunkID
if isFossil {
filePath += ".fsl"
}
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
if err != nil {
return "", false, 0, err
} else {
return filePath, exist, size, err
}
if err != nil {
return "", false, 0, err
} else {
return filePath, exist, size, err
}
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
input := &s3.GetObjectInput {
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
}
output, err := storage.client.GetObject(input)
if err != nil {
return err
}
defer output.Body.Close()
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit / len(storage.bucket))
return err
input := &s3.GetObjectInput{
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
}
output, err := storage.client.GetObject(input)
if err != nil {
return err
}
defer output.Body.Close()
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.bucket))
return err
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
attempts := 0
attempts := 0
for {
input := &s3.PutObjectInput {
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
ACL: aws.String(s3.ObjectCannedACLPrivate),
Body: CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.bucket)),
ContentType: aws.String("application/duplicacy"),
}
for {
input := &s3.PutObjectInput{
Bucket: aws.String(storage.bucket),
Key: aws.String(storage.storageDir + filePath),
ACL: aws.String(s3.ObjectCannedACLPrivate),
Body: CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.bucket)),
ContentType: aws.String("application/duplicacy"),
}
_, err = storage.client.PutObject(input)
if err == nil || attempts >= 3 || !strings.Contains(err.Error(), "XAmzContentSHA256Mismatch") {
return err
}
_, err = storage.client.PutObject(input)
if err == nil || attempts >= 3 || !strings.Contains(err.Error(), "XAmzContentSHA256Mismatch") {
return err
}
LOG_INFO("S3_RETRY", "Retrying on %s: %v", reflect.TypeOf(err), err)
attempts += 1
}
return err
LOG_INFO("S3_RETRY", "Retrying on %s: %v", reflect.TypeOf(err), err)
attempts += 1
}
return err
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *S3Storage) IsCacheNeeded () (bool) { return true }
func (storage *S3Storage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *S3Storage) IsMoveFileImplemented() (bool) { return true }
func (storage *S3Storage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *S3Storage) IsStrongConsistent() (bool) { return false }
func (storage *S3Storage) IsStrongConsistent() bool { return false }
// If the storage supports fast listing of files names.
func (storage *S3Storage) IsFastListing() (bool) { return true }
func (storage *S3Storage) IsFastListing() bool { return true }
// Enable the test mode.
func (storage *S3Storage) EnableTestMode() {}

View File

@@ -5,305 +5,304 @@
package duplicacy
import (
"fmt"
"io"
"os"
"net"
"path"
"time"
"runtime"
"math/rand"
"fmt"
"io"
"math/rand"
"net"
"os"
"path"
"runtime"
"time"
"golang.org/x/crypto/ssh"
"github.com/pkg/sftp"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
type SFTPStorage struct {
RateLimitedStorage
RateLimitedStorage
client *sftp.Client
storageDir string
numberOfThreads int
client *sftp.Client
storageDir string
numberOfThreads int
}
func CreateSFTPStorageWithPassword(server string, port int, username string, storageDir string,
password string, threads int) (storage *SFTPStorage, err error) {
password string, threads int) (storage *SFTPStorage, err error) {
authMethods := [] ssh.AuthMethod { ssh.Password(password) }
authMethods := []ssh.AuthMethod{ssh.Password(password)}
hostKeyCallback := func(hostname string, remote net.Addr,
key ssh.PublicKey) error {
return nil
}
hostKeyCallback := func(hostname string, remote net.Addr,
key ssh.PublicKey) error {
return nil
}
return CreateSFTPStorage(server, port, username, storageDir, authMethods, hostKeyCallback, threads)
return CreateSFTPStorage(server, port, username, storageDir, authMethods, hostKeyCallback, threads)
}
func CreateSFTPStorage(server string, port int, username string, storageDir string,
authMethods [] ssh.AuthMethod,
hostKeyCallback func(hostname string, remote net.Addr,
key ssh.PublicKey) error, threads int) (storage *SFTPStorage, err error) {
authMethods []ssh.AuthMethod,
hostKeyCallback func(hostname string, remote net.Addr,
key ssh.PublicKey) error, threads int) (storage *SFTPStorage, err error) {
config := &ssh.ClientConfig{
User: username,
Auth: authMethods,
HostKeyCallback: hostKeyCallback,
}
config := &ssh.ClientConfig{
User: username,
Auth: authMethods,
HostKeyCallback: hostKeyCallback,
}
if server == "sftp.hidrive.strato.com" {
config.Ciphers = []string {"aes128-cbc", "aes128-ctr", "aes256-ctr"}
}
if server == "sftp.hidrive.strato.com" {
config.Ciphers = []string{"aes128-cbc", "aes128-ctr", "aes256-ctr"}
}
serverAddress := fmt.Sprintf("%s:%d", server, port)
connection, err := ssh.Dial("tcp", serverAddress, config)
if err != nil {
return nil, err
}
serverAddress := fmt.Sprintf("%s:%d", server, port)
connection, err := ssh.Dial("tcp", serverAddress, config)
if err != nil {
return nil, err
}
client, err := sftp.NewClient(connection)
if err != nil {
connection.Close()
return nil, err
}
client, err := sftp.NewClient(connection)
if err != nil {
connection.Close()
return nil, err
}
for storageDir[len(storageDir) - 1] == '/' {
storageDir = storageDir[:len(storageDir) - 1]
}
for storageDir[len(storageDir)-1] == '/' {
storageDir = storageDir[:len(storageDir)-1]
}
fileInfo, err := client.Stat(storageDir)
if err != nil {
return nil, fmt.Errorf("Can't access the storage path %s: %v", storageDir, err)
}
fileInfo, err := client.Stat(storageDir)
if err != nil {
return nil, fmt.Errorf("Can't access the storage path %s: %v", storageDir, err)
}
if !fileInfo.IsDir() {
return nil, fmt.Errorf("The storage path %s is not a directory", storageDir)
}
if !fileInfo.IsDir() {
return nil, fmt.Errorf("The storage path %s is not a directory", storageDir)
}
storage = &SFTPStorage {
client: client,
storageDir: storageDir,
numberOfThreads: threads,
}
storage = &SFTPStorage{
client: client,
storageDir: storageDir,
numberOfThreads: threads,
}
// Random number fo generating the temporary chunk file suffix.
rand.Seed(time.Now().UnixNano())
// Random number fo generating the temporary chunk file suffix.
rand.Seed(time.Now().UnixNano())
runtime.SetFinalizer(storage, CloseSFTPStorage)
runtime.SetFinalizer(storage, CloseSFTPStorage)
return storage, nil
return storage, nil
}
func CloseSFTPStorage(storage *SFTPStorage) {
storage.client.Close()
storage.client.Close()
}
// ListFiles return the list of files and subdirectories under 'file' (non-recursively)
func (storage *SFTPStorage) ListFiles(threadIndex int, dirPath string) (files []string, sizes []int64, err error) {
entries, err := storage.client.ReadDir(path.Join(storage.storageDir, dirPath))
if err != nil {
return nil, nil, err
}
entries, err := storage.client.ReadDir(path.Join(storage.storageDir, dirPath))
if err != nil {
return nil, nil, err
}
for _, entry := range entries {
name := entry.Name()
if entry.IsDir() && name[len(name) - 1] != '/' {
name += "/"
}
for _, entry := range entries {
name := entry.Name()
if entry.IsDir() && name[len(name)-1] != '/' {
name += "/"
}
files = append(files, name)
sizes = append(sizes, entry.Size())
}
files = append(files, name)
sizes = append(sizes, entry.Size())
}
return files, sizes, nil
return files, sizes, nil
}
// DeleteFile deletes the file or directory at 'filePath'.
func (storage *SFTPStorage) DeleteFile(threadIndex int, filePath string) (err error) {
fullPath := path.Join(storage.storageDir, filePath)
fileInfo, err := storage.client.Stat(fullPath)
if err != nil {
if os.IsNotExist(err) {
LOG_TRACE("SFTP_STORAGE", "File %s has disappeared before deletion", filePath)
return nil
}
return err
}
if fileInfo == nil {
return nil
}
return storage.client.Remove(path.Join(storage.storageDir, filePath))
fullPath := path.Join(storage.storageDir, filePath)
fileInfo, err := storage.client.Stat(fullPath)
if err != nil {
if os.IsNotExist(err) {
LOG_TRACE("SFTP_STORAGE", "File %s has disappeared before deletion", filePath)
return nil
}
return err
}
if fileInfo == nil {
return nil
}
return storage.client.Remove(path.Join(storage.storageDir, filePath))
}
// MoveFile renames the file.
func (storage *SFTPStorage) MoveFile(threadIndex int, from string, to string) (err error) {
toPath := path.Join(storage.storageDir, to)
fileInfo, err := storage.client.Stat(toPath)
if fileInfo != nil {
return fmt.Errorf("The destination file %s already exists", toPath)
}
return storage.client.Rename(path.Join(storage.storageDir, from),
path.Join(storage.storageDir, to))
toPath := path.Join(storage.storageDir, to)
fileInfo, err := storage.client.Stat(toPath)
if fileInfo != nil {
return fmt.Errorf("The destination file %s already exists", toPath)
}
return storage.client.Rename(path.Join(storage.storageDir, from),
path.Join(storage.storageDir, to))
}
// CreateDirectory creates a new directory.
func (storage *SFTPStorage) CreateDirectory(threadIndex int, dirPath string) (err error) {
fullPath := path.Join(storage.storageDir, dirPath)
fileInfo, err := storage.client.Stat(fullPath)
if fileInfo != nil && fileInfo.IsDir() {
return nil
}
return storage.client.Mkdir(path.Join(storage.storageDir, dirPath))
fullPath := path.Join(storage.storageDir, dirPath)
fileInfo, err := storage.client.Stat(fullPath)
if fileInfo != nil && fileInfo.IsDir() {
return nil
}
return storage.client.Mkdir(path.Join(storage.storageDir, dirPath))
}
// GetFileInfo returns the information about the file or directory at 'filePath'.
func (storage *SFTPStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
fileInfo, err := storage.client.Stat(path.Join(storage.storageDir, filePath))
if err != nil {
if os.IsNotExist(err) {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
fileInfo, err := storage.client.Stat(path.Join(storage.storageDir, filePath))
if err != nil {
if os.IsNotExist(err) {
return false, false, 0, nil
} else {
return false, false, 0, err
}
}
if fileInfo == nil {
return false, false, 0, nil
}
if fileInfo == nil {
return false, false, 0, nil
}
return true, fileInfo.IsDir(), fileInfo.Size(), nil
return true, fileInfo.IsDir(), fileInfo.Size(), nil
}
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
// the suffix '.fsl'.
func (storage *SFTPStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
dir := path.Join(storage.storageDir, "chunks")
dir := path.Join(storage.storageDir, "chunks")
suffix := ""
if isFossil {
suffix = ".fsl"
}
suffix := ""
if isFossil {
suffix = ".fsl"
}
// The minimum level of directories to dive into before searching for the chunk file.
minimumLevel := 2
// The minimum level of directories to dive into before searching for the chunk file.
minimumLevel := 2
for level := 0; level * 2 < len(chunkID); level ++ {
if level >= minimumLevel {
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
if stat, err := storage.client.Stat(filePath); err == nil && !stat.IsDir() {
return filePath[len(storage.storageDir) + 1:], true, stat.Size(), nil
} else if err == nil && stat.IsDir() {
return filePath[len(storage.storageDir) + 1:], true, 0, fmt.Errorf("The path %s is a directory", filePath)
}
}
for level := 0; level*2 < len(chunkID); level++ {
if level >= minimumLevel {
filePath = path.Join(dir, chunkID[2*level:]) + suffix
if stat, err := storage.client.Stat(filePath); err == nil && !stat.IsDir() {
return filePath[len(storage.storageDir)+1:], true, stat.Size(), nil
} else if err == nil && stat.IsDir() {
return filePath[len(storage.storageDir)+1:], true, 0, fmt.Errorf("The path %s is a directory", filePath)
}
}
// Find the subdirectory the chunk file may reside.
subDir := path.Join(dir, chunkID[2 * level: 2 * level + 2])
stat, err := storage.client.Stat(subDir)
if err == nil && stat.IsDir() {
dir = subDir
continue
}
// Find the subdirectory the chunk file may reside.
subDir := path.Join(dir, chunkID[2*level:2*level+2])
stat, err := storage.client.Stat(subDir)
if err == nil && stat.IsDir() {
dir = subDir
continue
}
if level < minimumLevel {
// Create the subdirectory if is doesn't exist.
if level < minimumLevel {
// Create the subdirectory if is doesn't exist.
if err == nil && !stat.IsDir() {
return "", false, 0, fmt.Errorf("The path %s is not a directory", subDir)
}
if err == nil && !stat.IsDir() {
return "", false, 0, fmt.Errorf("The path %s is not a directory", subDir)
}
err = storage.client.Mkdir(subDir)
if err != nil {
// The directory may have been created by other threads so check it again.
stat, _ := storage.client.Stat(subDir)
if stat == nil || !stat.IsDir() {
return "", false, 0, fmt.Errorf("Failed to create the directory %s: %v", subDir, err)
}
}
err = storage.client.Mkdir(subDir)
if err != nil {
// The directory may have been created by other threads so check it again.
stat, _ := storage.client.Stat(subDir)
if stat == nil || !stat.IsDir() {
return "", false, 0, fmt.Errorf("Failed to create the directory %s: %v", subDir, err)
}
}
dir = subDir
continue
}
dir = subDir
continue
}
// Teh chunk must be under this subdirectory but it doesn't exist.
return path.Join(dir, chunkID[2 * level:])[len(storage.storageDir) + 1:] + suffix, false, 0, nil
// Teh chunk must be under this subdirectory but it doesn't exist.
return path.Join(dir, chunkID[2*level:])[len(storage.storageDir)+1:] + suffix, false, 0, nil
}
}
LOG_FATAL("CHUNK_FIND", "Chunk %s is still not found after having searched a maximum level of directories",
chunkID)
return "", false, 0, nil
LOG_FATAL("CHUNK_FIND", "Chunk %s is still not found after having searched a maximum level of directories",
chunkID)
return "", false, 0, nil
}
// DownloadFile reads the file at 'filePath' into the chunk.
func (storage *SFTPStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
file, err := storage.client.Open(path.Join(storage.storageDir, filePath))
file, err := storage.client.Open(path.Join(storage.storageDir, filePath))
if err != nil {
return err
}
if err != nil {
return err
}
defer file.Close()
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit / storage.numberOfThreads); err != nil {
return err
}
defer file.Close()
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
return err
}
return nil
return nil
}
// UploadFile writes 'content' to the file at 'filePath'.
func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
fullPath := path.Join(storage.storageDir, filePath)
fullPath := path.Join(storage.storageDir, filePath)
letters := "abcdefghijklmnopqrstuvwxyz"
suffix := make([]byte, 8)
for i := range suffix {
suffix[i] = letters[rand.Intn(len(letters))]
}
letters := "abcdefghijklmnopqrstuvwxyz"
suffix := make([]byte, 8)
for i := range suffix {
suffix[i] = letters[rand.Intn(len(letters))]
}
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
file, err := storage.client.OpenFile(temporaryFile, os.O_WRONLY | os.O_CREATE | os.O_TRUNC)
if err != nil {
return err
}
file, err := storage.client.OpenFile(temporaryFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
if err != nil {
return err
}
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / storage.numberOfThreads)
_, err = io.Copy(file, reader)
if err != nil {
file.Close()
return err
}
file.Close()
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
_, err = io.Copy(file, reader)
if err != nil {
file.Close()
return err
}
file.Close()
err = storage.client.Rename(temporaryFile, fullPath)
if err != nil {
err = storage.client.Rename(temporaryFile, fullPath)
if err != nil {
if _, err = storage.client.Stat(fullPath); err == nil {
storage.client.Remove(temporaryFile)
return nil
} else {
return fmt.Errorf("Uploaded file but failed to store it at %s: %v", fullPath, err)
}
}
if _, err = storage.client.Stat(fullPath); err == nil {
storage.client.Remove(temporaryFile)
return nil
} else {
return fmt.Errorf("Uploaded file but failed to store it at %s: %v", fullPath, err)
}
}
return nil
return nil
}
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
func (storage *SFTPStorage) IsCacheNeeded () (bool) { return true }
func (storage *SFTPStorage) IsCacheNeeded() bool { return true }
// If the 'MoveFile' method is implemented.
func (storage *SFTPStorage) IsMoveFileImplemented() (bool) { return true }
func (storage *SFTPStorage) IsMoveFileImplemented() bool { return true }
// If the storage can guarantee strong consistency.
func (storage *SFTPStorage) IsStrongConsistent() (bool) { return true }
func (storage *SFTPStorage) IsStrongConsistent() bool { return true }
// If the storage supports fast listing of files names.
func (storage *SFTPStorage) IsFastListing() (bool) { return false }
func (storage *SFTPStorage) IsFastListing() bool { return false }
// Enable the test mode.
func (storage *SFTPStorage) EnableTestMode() {}

View File

@@ -7,7 +7,7 @@
package duplicacy
func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
return top
return top
}
func DeleteShadowCopy() {}

View File

@@ -5,327 +5,325 @@
package duplicacy
import (
"syscall"
"unsafe"
"time"
"os"
"runtime"
"os"
"runtime"
"syscall"
"time"
"unsafe"
ole "github.com/gilbertchen/go-ole"
ole "github.com/gilbertchen/go-ole"
)
//507C37B4-CF5B-4e95-B0AF-14EB9767467E
var IID_IVSS_ASYNC = &ole.GUID{0x507C37B4, 0xCF5B, 0x4e95, [8]byte{0xb0, 0xaf, 0x14, 0xeb, 0x97, 0x67, 0x46, 0x7e}}
type IVSSAsync struct {
ole.IUnknown
ole.IUnknown
}
type IVSSAsyncVtbl struct {
ole.IUnknownVtbl
cancel uintptr
wait uintptr
queryStatus uintptr
ole.IUnknownVtbl
cancel uintptr
wait uintptr
queryStatus uintptr
}
func (async *IVSSAsync) VTable() * IVSSAsyncVtbl {
return (*IVSSAsyncVtbl)(unsafe.Pointer(async.RawVTable))
func (async *IVSSAsync) VTable() *IVSSAsyncVtbl {
return (*IVSSAsyncVtbl)(unsafe.Pointer(async.RawVTable))
}
var VSS_S_ASYNC_PENDING int32 = 0x00042309
var VSS_S_ASYNC_FINISHED int32 = 0x0004230A
var VSS_S_ASYNC_PENDING int32 = 0x00042309
var VSS_S_ASYNC_FINISHED int32 = 0x0004230A
var VSS_S_ASYNC_CANCELLED int32 = 0x0004230B
func (async *IVSSAsync) Wait(seconds int) bool {
startTime := time.Now().Unix()
for {
ret, _, _ := syscall.Syscall(async.VTable().wait, 2, uintptr(unsafe.Pointer(async)), uintptr(1000), 0)
if ret != 0 {
LOG_WARN("IVSSASYNC_WAIT", "IVssAsync::Wait returned %d\n", ret)
}
startTime := time.Now().Unix()
for {
ret, _, _ := syscall.Syscall(async.VTable().wait, 2, uintptr(unsafe.Pointer(async)), uintptr(1000), 0)
if ret != 0 {
LOG_WARN("IVSSASYNC_WAIT", "IVssAsync::Wait returned %d\n", ret)
}
var status int32
ret, _, _ = syscall.Syscall(async.VTable().queryStatus, 3, uintptr(unsafe.Pointer(async)),
uintptr(unsafe.Pointer(&status)), 0)
if ret != 0 {
LOG_WARN("IVSSASYNC_QUERY", "IVssAsync::QueryStatus returned %d\n", ret)
}
var status int32
ret, _, _ = syscall.Syscall(async.VTable().queryStatus, 3, uintptr(unsafe.Pointer(async)),
uintptr(unsafe.Pointer(&status)), 0)
if ret != 0 {
LOG_WARN("IVSSASYNC_QUERY", "IVssAsync::QueryStatus returned %d\n", ret)
}
if status == VSS_S_ASYNC_FINISHED {
return true
}
if time.Now().Unix() - startTime > int64(seconds) {
LOG_WARN("IVSSASYNC_TIMEOUT", "IVssAsync is pending for more than %d seconds\n", seconds)
return false
}
}
if status == VSS_S_ASYNC_FINISHED {
return true
}
if time.Now().Unix()-startTime > int64(seconds) {
LOG_WARN("IVSSASYNC_TIMEOUT", "IVssAsync is pending for more than %d seconds\n", seconds)
return false
}
}
}
func getIVSSAsync(unknown *ole.IUnknown, iid *ole.GUID) (async *IVSSAsync) {
r, _, _ := syscall.Syscall(
unknown.VTable().QueryInterface,
3,
uintptr(unsafe.Pointer(unknown)),
uintptr(unsafe.Pointer(iid)),
uintptr(unsafe.Pointer(&async)))
r, _, _ := syscall.Syscall(
unknown.VTable().QueryInterface,
3,
uintptr(unsafe.Pointer(unknown)),
uintptr(unsafe.Pointer(iid)),
uintptr(unsafe.Pointer(&async)))
if r != 0 {
LOG_WARN("IVSSASYNC_QUERY", "IVSSAsync::QueryInterface returned %d\n", r)
return nil
}
return
if r != 0 {
LOG_WARN("IVSSASYNC_QUERY", "IVSSAsync::QueryInterface returned %d\n", r)
return nil
}
return
}
//665c1d5f-c218-414d-a05d-7fef5f9d5c86
var IID_IVSS = &ole.GUID{0x665c1d5f, 0xc218, 0x414d, [8]byte{0xa0, 0x5d, 0x7f, 0xef, 0x5f, 0x9d, 0x5c, 0x86}}
type IVSS struct {
ole.IUnknown
ole.IUnknown
}
type IVSSVtbl struct {
ole.IUnknownVtbl
getWriterComponentsCount uintptr
getWriterComponents uintptr
initializeForBackup uintptr
setBackupState uintptr
initializeForRestore uintptr
setRestoreState uintptr
gatherWriterMetadata uintptr
getWriterMetadataCount uintptr
getWriterMetadata uintptr
freeWriterMetadata uintptr
addComponent uintptr
prepareForBackup uintptr
abortBackup uintptr
gatherWriterStatus uintptr
getWriterStatusCount uintptr
freeWriterStatus uintptr
getWriterStatus uintptr
setBackupSucceeded uintptr
setBackupOptions uintptr
setSelectedForRestore uintptr
setRestoreOptions uintptr
setAdditionalRestores uintptr
setPreviousBackupStamp uintptr
saveAsXML uintptr
backupComplete uintptr
addAlternativeLocationMapping uintptr
addRestoreSubcomponent uintptr
setFileRestoreStatus uintptr
addNewTarget uintptr
setRangesFilePath uintptr
preRestore uintptr
postRestore uintptr
setContext uintptr
startSnapshotSet uintptr
addToSnapshotSet uintptr
doSnapshotSet uintptr
deleteSnapshots uintptr
importSnapshots uintptr
breakSnapshotSet uintptr
getSnapshotProperties uintptr
query uintptr
isVolumeSupported uintptr
disableWriterClasses uintptr
enableWriterClasses uintptr
disableWriterInstances uintptr
exposeSnapshot uintptr
revertToSnapshot uintptr
queryRevertStatus uintptr
ole.IUnknownVtbl
getWriterComponentsCount uintptr
getWriterComponents uintptr
initializeForBackup uintptr
setBackupState uintptr
initializeForRestore uintptr
setRestoreState uintptr
gatherWriterMetadata uintptr
getWriterMetadataCount uintptr
getWriterMetadata uintptr
freeWriterMetadata uintptr
addComponent uintptr
prepareForBackup uintptr
abortBackup uintptr
gatherWriterStatus uintptr
getWriterStatusCount uintptr
freeWriterStatus uintptr
getWriterStatus uintptr
setBackupSucceeded uintptr
setBackupOptions uintptr
setSelectedForRestore uintptr
setRestoreOptions uintptr
setAdditionalRestores uintptr
setPreviousBackupStamp uintptr
saveAsXML uintptr
backupComplete uintptr
addAlternativeLocationMapping uintptr
addRestoreSubcomponent uintptr
setFileRestoreStatus uintptr
addNewTarget uintptr
setRangesFilePath uintptr
preRestore uintptr
postRestore uintptr
setContext uintptr
startSnapshotSet uintptr
addToSnapshotSet uintptr
doSnapshotSet uintptr
deleteSnapshots uintptr
importSnapshots uintptr
breakSnapshotSet uintptr
getSnapshotProperties uintptr
query uintptr
isVolumeSupported uintptr
disableWriterClasses uintptr
enableWriterClasses uintptr
disableWriterInstances uintptr
exposeSnapshot uintptr
revertToSnapshot uintptr
queryRevertStatus uintptr
}
func (vss *IVSS) VTable() * IVSSVtbl {
return (*IVSSVtbl)(unsafe.Pointer(vss.RawVTable))
func (vss *IVSS) VTable() *IVSSVtbl {
return (*IVSSVtbl)(unsafe.Pointer(vss.RawVTable))
}
func (vss *IVSS) InitializeForBackup() int {
ret, _, _ := syscall.Syscall(vss.VTable().initializeForBackup, 2, uintptr(unsafe.Pointer(vss)), 0, 0)
return int(ret)
ret, _, _ := syscall.Syscall(vss.VTable().initializeForBackup, 2, uintptr(unsafe.Pointer(vss)), 0, 0)
return int(ret)
}
func (vss *IVSS) GatherWriterMetadata() (int, *IVSSAsync) {
var unknown *ole.IUnknown
ret, _, _ := syscall.Syscall(vss.VTable().gatherWriterMetadata, 2,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(&unknown)), 0)
var unknown *ole.IUnknown
ret, _, _ := syscall.Syscall(vss.VTable().gatherWriterMetadata, 2,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(&unknown)), 0)
if ret != 0 {
return int(ret), nil
} else {
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
}
if ret != 0 {
return int(ret), nil
} else {
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
}
}
func (vss *IVSS) StartSnapshotSet(snapshotID *ole.GUID) int {
ret, _, _ := syscall.Syscall(vss.VTable().startSnapshotSet, 2,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(snapshotID)), 0)
return int(ret)
ret, _, _ := syscall.Syscall(vss.VTable().startSnapshotSet, 2,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(snapshotID)), 0)
return int(ret)
}
func (vss *IVSS) AddToSnapshotSet(drive string, snapshotID *ole.GUID) int {
volumeName := syscall.StringToUTF16Ptr(drive)
volumeName := syscall.StringToUTF16Ptr(drive)
var ret uintptr
if runtime.GOARCH == "386" {
// On 32-bit Windows, GUID is passed by value
ret, _, _ = syscall.Syscall9(vss.VTable().addToSnapshotSet, 7,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(volumeName)),
0, 0, 0, 0,
uintptr(unsafe.Pointer(snapshotID)), 0, 0)
} else {
ret, _, _ = syscall.Syscall6(vss.VTable().addToSnapshotSet, 4,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(volumeName)),
uintptr(unsafe.Pointer(ole.IID_NULL)),
uintptr(unsafe.Pointer(snapshotID)), 0, 0)
}
return int(ret)
var ret uintptr
if runtime.GOARCH == "386" {
// On 32-bit Windows, GUID is passed by value
ret, _, _ = syscall.Syscall9(vss.VTable().addToSnapshotSet, 7,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(volumeName)),
0, 0, 0, 0,
uintptr(unsafe.Pointer(snapshotID)), 0, 0)
} else {
ret, _, _ = syscall.Syscall6(vss.VTable().addToSnapshotSet, 4,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(volumeName)),
uintptr(unsafe.Pointer(ole.IID_NULL)),
uintptr(unsafe.Pointer(snapshotID)), 0, 0)
}
return int(ret)
}
func (vss *IVSS) SetBackupState() int {
VSS_BT_COPY := 5
ret, _, _ := syscall.Syscall6(vss.VTable().setBackupState, 4,
uintptr(unsafe.Pointer(vss)),
0, 0, uintptr(VSS_BT_COPY), 0, 0)
return int(ret)
VSS_BT_COPY := 5
ret, _, _ := syscall.Syscall6(vss.VTable().setBackupState, 4,
uintptr(unsafe.Pointer(vss)),
0, 0, uintptr(VSS_BT_COPY), 0, 0)
return int(ret)
}
func (vss *IVSS) PrepareForBackup() (int, *IVSSAsync) {
var unknown *ole.IUnknown
ret, _, _ := syscall.Syscall(vss.VTable().prepareForBackup, 2,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(&unknown)), 0)
var unknown *ole.IUnknown
ret, _, _ := syscall.Syscall(vss.VTable().prepareForBackup, 2,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(&unknown)), 0)
if ret != 0 {
return int(ret), nil
} else {
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
}
if ret != 0 {
return int(ret), nil
} else {
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
}
}
func (vss *IVSS) DoSnapshotSet() (int, *IVSSAsync) {
var unknown *ole.IUnknown
ret, _, _ := syscall.Syscall(vss.VTable().doSnapshotSet, 2,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(&unknown)), 0)
var unknown *ole.IUnknown
ret, _, _ := syscall.Syscall(vss.VTable().doSnapshotSet, 2,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(&unknown)), 0)
if ret != 0 {
return int(ret), nil
} else {
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
}
if ret != 0 {
return int(ret), nil
} else {
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
}
}
type SnapshotProperties struct {
SnapshotID ole.GUID
SnapshotSetID ole.GUID
SnapshotsCount uint32
SnapshotDeviceObject *uint16
OriginalVolumeName *uint16
OriginatingMachine *uint16
ServiceMachine *uint16
ExposedName *uint16
ExposedPath *uint16
ProviderId ole.GUID
SnapshotAttributes uint32
CreationTimestamp int64
Status int
SnapshotID ole.GUID
SnapshotSetID ole.GUID
SnapshotsCount uint32
SnapshotDeviceObject *uint16
OriginalVolumeName *uint16
OriginatingMachine *uint16
ServiceMachine *uint16
ExposedName *uint16
ExposedPath *uint16
ProviderId ole.GUID
SnapshotAttributes uint32
CreationTimestamp int64
Status int
}
func (vss *IVSS) GetSnapshotProperties(snapshotSetID ole.GUID, properties *SnapshotProperties) (int) {
var ret uintptr
if runtime.GOARCH == "386" {
address := uint(uintptr(unsafe.Pointer(&snapshotSetID)))
ret, _, _ = syscall.Syscall6(vss.VTable().getSnapshotProperties, 6,
uintptr(unsafe.Pointer(vss)),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address)))),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 4)))),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 8)))),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 12)))),
uintptr(unsafe.Pointer(properties)))
} else {
ret, _, _ = syscall.Syscall(vss.VTable().getSnapshotProperties, 3,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(&snapshotSetID)),
uintptr(unsafe.Pointer(properties)))
}
return int(ret)
func (vss *IVSS) GetSnapshotProperties(snapshotSetID ole.GUID, properties *SnapshotProperties) int {
var ret uintptr
if runtime.GOARCH == "386" {
address := uint(uintptr(unsafe.Pointer(&snapshotSetID)))
ret, _, _ = syscall.Syscall6(vss.VTable().getSnapshotProperties, 6,
uintptr(unsafe.Pointer(vss)),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address)))),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 4)))),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 8)))),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 12)))),
uintptr(unsafe.Pointer(properties)))
} else {
ret, _, _ = syscall.Syscall(vss.VTable().getSnapshotProperties, 3,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(&snapshotSetID)),
uintptr(unsafe.Pointer(properties)))
}
return int(ret)
}
func (vss *IVSS) DeleteSnapshots(snapshotID ole.GUID) (int, int, ole.GUID) {
VSS_OBJECT_SNAPSHOT := 3
VSS_OBJECT_SNAPSHOT := 3
deleted := int32(0)
deleted := int32(0)
var deletedGUID ole.GUID
var deletedGUID ole.GUID
var ret uintptr
if runtime.GOARCH == "386" {
address := uint(uintptr(unsafe.Pointer(&snapshotID)))
ret, _, _ = syscall.Syscall9(vss.VTable().deleteSnapshots, 9,
uintptr(unsafe.Pointer(vss)),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address)))),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 4)))),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 8)))),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 12)))),
uintptr(VSS_OBJECT_SNAPSHOT),
uintptr(1),
uintptr(unsafe.Pointer(&deleted)),
uintptr(unsafe.Pointer(&deletedGUID)))
} else {
ret, _, _ = syscall.Syscall6(vss.VTable().deleteSnapshots, 6,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(&snapshotID)),
uintptr(VSS_OBJECT_SNAPSHOT),
uintptr(1),
uintptr(unsafe.Pointer(&deleted)),
uintptr(unsafe.Pointer(&deletedGUID)))
}
var ret uintptr
if runtime.GOARCH == "386" {
address := uint(uintptr(unsafe.Pointer(&snapshotID)))
ret, _, _ = syscall.Syscall9(vss.VTable().deleteSnapshots, 9,
uintptr(unsafe.Pointer(vss)),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address)))),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 4)))),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 8)))),
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 12)))),
uintptr(VSS_OBJECT_SNAPSHOT),
uintptr(1),
uintptr(unsafe.Pointer(&deleted)),
uintptr(unsafe.Pointer(&deletedGUID)))
} else {
ret, _, _ = syscall.Syscall6(vss.VTable().deleteSnapshots, 6,
uintptr(unsafe.Pointer(vss)),
uintptr(unsafe.Pointer(&snapshotID)),
uintptr(VSS_OBJECT_SNAPSHOT),
uintptr(1),
uintptr(unsafe.Pointer(&deleted)),
uintptr(unsafe.Pointer(&deletedGUID)))
}
return int(ret), int(deleted), deletedGUID
return int(ret), int(deleted), deletedGUID
}
func uint16ArrayToString(p *uint16) string {
if p == nil {
return ""
}
s := make([]uint16, 0)
address := uintptr(unsafe.Pointer(p))
for {
c := *(*uint16)(unsafe.Pointer(address))
if c == 0 {
break
}
func uint16ArrayToString(p *uint16) (string) {
if p == nil {
return ""
}
s := make([]uint16, 0)
address := uintptr(unsafe.Pointer(p))
for {
c := *(*uint16)(unsafe.Pointer(address))
if c == 0 {
break
}
s = append(s, c)
address = uintptr(int(address) + 2)
}
s = append(s, c)
address = uintptr(int(address) + 2)
}
return syscall.UTF16ToString(s)
return syscall.UTF16ToString(s)
}
func getIVSS(unknown *ole.IUnknown, iid *ole.GUID) (ivss *IVSS) {
r, _, _ := syscall.Syscall(
unknown.VTable().QueryInterface,
3,
uintptr(unsafe.Pointer(unknown)),
uintptr(unsafe.Pointer(iid)),
uintptr(unsafe.Pointer(&ivss)))
r, _, _ := syscall.Syscall(
unknown.VTable().QueryInterface,
3,
uintptr(unsafe.Pointer(unknown)),
uintptr(unsafe.Pointer(iid)),
uintptr(unsafe.Pointer(&ivss)))
if r != 0 {
LOG_WARN("IVSS_QUERY", "IVSS::QueryInterface returned %d\n", r)
return nil
}
if r != 0 {
LOG_WARN("IVSS_QUERY", "IVSS::QueryInterface returned %d\n", r)
return nil
}
return ivss
return ivss
}
var vssBackupComponent *IVSS
@@ -333,193 +331,189 @@ var snapshotID ole.GUID
var shadowLink string
func DeleteShadowCopy() {
if vssBackupComponent != nil {
defer vssBackupComponent.Release()
if vssBackupComponent != nil {
defer vssBackupComponent.Release()
LOG_TRACE("VSS_DELETE", "Deleting the shadow copy used for this backup")
ret, _, _ := vssBackupComponent.DeleteSnapshots(snapshotID)
if ret != 0 {
LOG_WARN("VSS_DELETE", "Failed to delete the shadow copy: %x\n", uint(ret))
} else {
LOG_INFO("VSS_DELETE", "The shadow copy has been successfully deleted")
}
}
LOG_TRACE("VSS_DELETE", "Deleting the shadow copy used for this backup")
ret, _, _ := vssBackupComponent.DeleteSnapshots(snapshotID)
if ret != 0 {
LOG_WARN("VSS_DELETE", "Failed to delete the shadow copy: %x\n", uint(ret))
} else {
LOG_INFO("VSS_DELETE", "The shadow copy has been successfully deleted")
}
}
if shadowLink != "" {
err := os.Remove(shadowLink)
if err != nil {
LOG_WARN("VSS_SYMLINK", "Failed to remove the symbolic link for the shadow copy: %v", err)
}
}
if shadowLink != "" {
err := os.Remove(shadowLink)
if err != nil {
LOG_WARN("VSS_SYMLINK", "Failed to remove the symbolic link for the shadow copy: %v", err)
}
}
ole.CoUninitialize()
ole.CoUninitialize()
}
func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
if !shadowCopy {
return top
}
if !shadowCopy {
return top
}
ole.CoInitialize(0)
defer ole.CoUninitialize()
ole.CoInitialize(0)
defer ole.CoUninitialize()
dllVssApi := syscall.NewLazyDLL("VssApi.dll")
procCreateVssBackupComponents :=
dllVssApi.NewProc("?CreateVssBackupComponents@@YAJPEAPEAVIVssBackupComponents@@@Z")
if runtime.GOARCH == "386" {
procCreateVssBackupComponents =
dllVssApi.NewProc("?CreateVssBackupComponents@@YGJPAPAVIVssBackupComponents@@@Z")
}
dllVssApi := syscall.NewLazyDLL("VssApi.dll")
procCreateVssBackupComponents :=
dllVssApi.NewProc("?CreateVssBackupComponents@@YAJPEAPEAVIVssBackupComponents@@@Z")
if runtime.GOARCH == "386" {
procCreateVssBackupComponents =
dllVssApi.NewProc("?CreateVssBackupComponents@@YGJPAPAVIVssBackupComponents@@@Z")
}
if len(top) < 3 || top[1] != ':' || (top[2] != '/' && top[2] != '\\') {
LOG_ERROR("VSS_PATH", "Invalid repository path: %s", top)
return top
}
volume := top[:1] + ":\\"
if len(top) < 3 || top[1] != ':' || (top[2] != '/' && top[2] != '\\') {
LOG_ERROR("VSS_PATH", "Invalid repository path: %s", top)
return top
}
volume := top[:1] + ":\\"
LOG_INFO("VSS_CREATE", "Creating a shadow copy for %s", volume)
LOG_INFO("VSS_CREATE", "Creating a shadow copy for %s", volume)
var unknown *ole.IUnknown
r, _, err := procCreateVssBackupComponents.Call(uintptr(unsafe.Pointer(&unknown)))
var unknown *ole.IUnknown
r, _, err := procCreateVssBackupComponents.Call(uintptr(unsafe.Pointer(&unknown)))
if r == 0x80070005 {
LOG_ERROR("VSS_CREATE", "Only administrators can create shadow copies")
return top
}
if r == 0x80070005 {
LOG_ERROR("VSS_CREATE", "Only administrators can create shadow copies")
return top
}
if r != 0 {
LOG_ERROR("VSS_CREATE", "Failed to create the VSS backup component: %d", r)
return top
}
if r != 0 {
LOG_ERROR("VSS_CREATE", "Failed to create the VSS backup component: %d", r)
return top
}
vssBackupComponent = getIVSS(unknown, IID_IVSS)
if vssBackupComponent == nil {
LOG_ERROR("VSS_CREATE", "Failed to create the VSS backup component")
return top
}
vssBackupComponent = getIVSS(unknown, IID_IVSS)
if vssBackupComponent == nil {
LOG_ERROR("VSS_CREATE", "Failed to create the VSS backup component")
return top
}
ret := vssBackupComponent.InitializeForBackup()
if ret != 0 {
LOG_ERROR("VSS_INIT", "Shadow copy creation failed: InitializeForBackup returned %x", uint(ret))
return top
}
ret := vssBackupComponent.InitializeForBackup()
if ret != 0 {
LOG_ERROR("VSS_INIT", "Shadow copy creation failed: InitializeForBackup returned %x", uint(ret))
return top
}
var async *IVSSAsync
ret, async = vssBackupComponent.GatherWriterMetadata()
if ret != 0 {
LOG_ERROR("VSS_GATHER", "Shadow copy creation failed: GatherWriterMetadata returned %x", uint(ret))
return top
}
var async *IVSSAsync
ret, async = vssBackupComponent.GatherWriterMetadata()
if ret != 0 {
LOG_ERROR("VSS_GATHER", "Shadow copy creation failed: GatherWriterMetadata returned %x", uint(ret))
return top
}
if async == nil {
LOG_ERROR("VSS_GATHER",
"Shadow copy creation failed: GatherWriterMetadata failed to return a valid IVssAsync object")
return top
}
if async == nil {
LOG_ERROR("VSS_GATHER",
"Shadow copy creation failed: GatherWriterMetadata failed to return a valid IVssAsync object")
return top
}
if !async.Wait(20) {
LOG_ERROR("VSS_GATHER", "Shadow copy creation failed: GatherWriterMetadata didn't finish properly")
return top
}
async.Release()
if !async.Wait(20) {
LOG_ERROR("VSS_GATHER", "Shadow copy creation failed: GatherWriterMetadata didn't finish properly")
return top
}
async.Release()
var snapshotSetID ole.GUID
var snapshotSetID ole.GUID
ret = vssBackupComponent.StartSnapshotSet(&snapshotSetID)
if ret != 0 {
LOG_ERROR("VSS_START", "Shadow copy creation failed: StartSnapshotSet returned %x", uint(ret))
return top
}
ret = vssBackupComponent.StartSnapshotSet(&snapshotSetID)
if ret != 0 {
LOG_ERROR("VSS_START", "Shadow copy creation failed: StartSnapshotSet returned %x", uint(ret))
return top
}
ret = vssBackupComponent.AddToSnapshotSet(volume, &snapshotID)
if ret != 0 {
LOG_ERROR("VSS_ADD", "Shadow copy creation failed: AddToSnapshotSet returned %x", uint(ret))
return top
}
ret = vssBackupComponent.AddToSnapshotSet(volume, &snapshotID)
if ret != 0 {
LOG_ERROR("VSS_ADD", "Shadow copy creation failed: AddToSnapshotSet returned %x", uint(ret))
return top
}
s, _ := ole.StringFromIID(&snapshotID)
LOG_DEBUG("VSS_ID", "Creating shadow copy %s", s)
s, _ := ole.StringFromIID(&snapshotID)
LOG_DEBUG("VSS_ID", "Creating shadow copy %s", s)
ret = vssBackupComponent.SetBackupState()
if ret != 0 {
LOG_ERROR("VSS_SET", "Shadow copy creation failed: SetBackupState returned %x", uint(ret))
return top
}
ret = vssBackupComponent.SetBackupState()
if ret != 0 {
LOG_ERROR("VSS_SET", "Shadow copy creation failed: SetBackupState returned %x", uint(ret))
return top
}
ret, async = vssBackupComponent.PrepareForBackup()
if ret != 0 {
LOG_ERROR("VSS_PREPARE", "Shadow copy creation failed: PrepareForBackup returned %x", uint(ret))
return top
}
if async == nil {
LOG_ERROR("VSS_PREPARE",
"Shadow copy creation failed: PrepareForBackup failed to return a valid IVssAsync object")
return top
}
ret, async = vssBackupComponent.PrepareForBackup()
if ret != 0 {
LOG_ERROR("VSS_PREPARE", "Shadow copy creation failed: PrepareForBackup returned %x", uint(ret))
return top
}
if async == nil {
LOG_ERROR("VSS_PREPARE",
"Shadow copy creation failed: PrepareForBackup failed to return a valid IVssAsync object")
return top
}
if !async.Wait(20) {
LOG_ERROR("VSS_PREPARE", "Shadow copy creation failed: PrepareForBackup didn't finish properly")
return top
}
async.Release()
if !async.Wait(20) {
LOG_ERROR("VSS_PREPARE", "Shadow copy creation failed: PrepareForBackup didn't finish properly")
return top
}
async.Release()
ret, async = vssBackupComponent.DoSnapshotSet()
if ret != 0 {
LOG_ERROR("VSS_SNAPSHOT", "Shadow copy creation failed: DoSnapshotSet returned %x", uint(ret))
return top
}
if async == nil {
LOG_ERROR("VSS_SNAPSHOT",
"Shadow copy creation failed: DoSnapshotSet failed to return a valid IVssAsync object")
return top
}
ret, async = vssBackupComponent.DoSnapshotSet()
if ret != 0 {
LOG_ERROR("VSS_SNAPSHOT", "Shadow copy creation failed: DoSnapshotSet returned %x", uint(ret))
return top
}
if async == nil {
LOG_ERROR("VSS_SNAPSHOT",
"Shadow copy creation failed: DoSnapshotSet failed to return a valid IVssAsync object")
return top
}
if !async.Wait(60) {
LOG_ERROR("VSS_SNAPSHOT", "Shadow copy creation failed: DoSnapshotSet didn't finish properly")
return top
}
async.Release()
if !async.Wait(60) {
LOG_ERROR("VSS_SNAPSHOT", "Shadow copy creation failed: DoSnapshotSet didn't finish properly")
return top
}
async.Release()
properties := SnapshotProperties{}
properties := SnapshotProperties {
}
ret = vssBackupComponent.GetSnapshotProperties(snapshotID, &properties)
if ret != 0 {
LOG_ERROR("VSS_PROPERTIES", "GetSnapshotProperties returned %x", ret)
return top
}
ret = vssBackupComponent.GetSnapshotProperties(snapshotID, &properties)
if ret != 0 {
LOG_ERROR("VSS_PROPERTIES", "GetSnapshotProperties returned %x", ret)
return top
}
SnapshotIDString, _ := ole.StringFromIID(&properties.SnapshotID)
SnapshotSetIDString, _ := ole.StringFromIID(&properties.SnapshotSetID)
SnapshotIDString, _ := ole.StringFromIID(&properties.SnapshotID)
SnapshotSetIDString, _ := ole.StringFromIID(&properties.SnapshotSetID)
LOG_DEBUG("VSS_PROPERTY", "SnapshotID: %s", SnapshotIDString)
LOG_DEBUG("VSS_PROPERTY", "SnapshotSetID: %s", SnapshotSetIDString)
LOG_DEBUG("VSS_PROPERTY", "SnapshotID: %s", SnapshotIDString)
LOG_DEBUG("VSS_PROPERTY", "SnapshotSetID: %s", SnapshotSetIDString)
LOG_DEBUG("VSS_PROPERTY", "SnapshotDeviceObject: %s", uint16ArrayToString(properties.SnapshotDeviceObject))
LOG_DEBUG("VSS_PROPERTY", "OriginalVolumeName: %s", uint16ArrayToString(properties.OriginalVolumeName))
LOG_DEBUG("VSS_PROPERTY", "OriginatingMachine: %s", uint16ArrayToString(properties.OriginatingMachine))
LOG_DEBUG("VSS_PROPERTY", "OriginatingMachine: %s", uint16ArrayToString(properties.OriginatingMachine))
LOG_DEBUG("VSS_PROPERTY", "ServiceMachine: %s", uint16ArrayToString(properties.ServiceMachine))
LOG_DEBUG("VSS_PROPERTY", "ExposedName: %s", uint16ArrayToString(properties.ExposedName))
LOG_DEBUG("VSS_PROPERTY", "ExposedPath: %s", uint16ArrayToString(properties.ExposedPath))
LOG_DEBUG("VSS_PROPERTY", "SnapshotDeviceObject: %s", uint16ArrayToString(properties.SnapshotDeviceObject))
LOG_DEBUG("VSS_PROPERTY", "OriginalVolumeName: %s", uint16ArrayToString(properties.OriginalVolumeName))
LOG_DEBUG("VSS_PROPERTY", "OriginatingMachine: %s", uint16ArrayToString(properties.OriginatingMachine))
LOG_DEBUG("VSS_PROPERTY", "OriginatingMachine: %s", uint16ArrayToString(properties.OriginatingMachine))
LOG_DEBUG("VSS_PROPERTY", "ServiceMachine: %s", uint16ArrayToString(properties.ServiceMachine))
LOG_DEBUG("VSS_PROPERTY", "ExposedName: %s", uint16ArrayToString(properties.ExposedName))
LOG_DEBUG("VSS_PROPERTY", "ExposedPath: %s", uint16ArrayToString(properties.ExposedPath))
LOG_INFO("VSS_DONE", "Shadow copy %s created", SnapshotIDString)
LOG_INFO("VSS_DONE", "Shadow copy %s created", SnapshotIDString)
snapshotPath := uint16ArrayToString(properties.SnapshotDeviceObject)
snapshotPath := uint16ArrayToString(properties.SnapshotDeviceObject)
preferencePath := GetDuplicacyPreferencePath()
shadowLink = preferencePath + "\\shadow"
os.Remove(shadowLink)
err = os.Symlink(snapshotPath + "\\", shadowLink)
if err != nil {
LOG_ERROR("VSS_SYMLINK", "Failed to create a symbolic link to the shadow copy just created: %v", err)
return top
}
preferencePath := GetDuplicacyPreferencePath()
shadowLink = preferencePath + "\\shadow"
os.Remove(shadowLink)
err = os.Symlink(snapshotPath+"\\", shadowLink)
if err != nil {
LOG_ERROR("VSS_SYMLINK", "Failed to create a symbolic link to the shadow copy just created: %v", err)
return top
}
return shadowLink + "\\" + top[2:]
return shadowLink + "\\" + top[2:]
}

View File

@@ -5,419 +5,417 @@
package duplicacy
import (
"os"
"fmt"
"time"
"path"
"strings"
"strconv"
"io/ioutil"
"encoding/json"
"encoding/hex"
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strconv"
"strings"
"time"
)
// Snapshot represents a backup of the repository.
type Snapshot struct {
ID string // the snapshot id; must be different for different repositories
Revision int // the revision number
Options string // options used to create this snapshot (some not included)
Tag string // user-assigned tag
StartTime int64 // at what time the snapshot was created
EndTime int64 // at what time the snapshot was done
FileSize int64 // total file size
NumberOfFiles int64 // number of files
ID string // the snapshot id; must be different for different repositories
Revision int // the revision number
Options string // options used to create this snapshot (some not included)
Tag string // user-assigned tag
StartTime int64 // at what time the snapshot was created
EndTime int64 // at what time the snapshot was done
FileSize int64 // total file size
NumberOfFiles int64 // number of files
// A sequence of chunks whose aggregated content is the json representation of 'Files'.
FileSequence []string
// A sequence of chunks whose aggregated content is the json representation of 'Files'.
FileSequence []string
// A sequence of chunks whose aggregated content is the json representation of 'ChunkHashes'.
ChunkSequence []string
// A sequence of chunks whose aggregated content is the json representation of 'ChunkHashes'.
ChunkSequence []string
// A sequence of chunks whose aggregated content is the json representation of 'ChunkLengths'.
LengthSequence []string
// A sequence of chunks whose aggregated content is the json representation of 'ChunkLengths'.
LengthSequence []string
Files []*Entry // list of files and subdirectories
Files []*Entry // list of files and subdirectories
ChunkHashes []string // a sequence of chunks representing the file content
ChunkLengths []int // the length of each chunk
ChunkHashes []string // a sequence of chunks representing the file content
ChunkLengths []int // the length of each chunk
Flag bool // used to mark certain snapshots for deletion or copy
Flag bool // used to mark certain snapshots for deletion or copy
discardAttributes bool
discardAttributes bool
}
// CreateEmptySnapshot creates an empty snapshot.
func CreateEmptySnapshot (id string) (snapshto *Snapshot) {
return &Snapshot{
ID : id,
Revision : 0,
StartTime: time.Now().Unix(),
}
func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
return &Snapshot{
ID: id,
Revision: 0,
StartTime: time.Now().Unix(),
}
}
// CreateSnapshotFromDirectory creates a snapshot from the local directory 'top'. Only 'Files'
// will be constructed, while 'ChunkHashes' and 'ChunkLengths' can only be populated after uploading.
func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, skippedDirectories []string,
skippedFiles []string, err error) {
skippedFiles []string, err error) {
snapshot = &Snapshot {
ID : id,
Revision: 0,
StartTime: time.Now().Unix(),
}
snapshot = &Snapshot{
ID: id,
Revision: 0,
StartTime: time.Now().Unix(),
}
var patterns []string
var patterns []string
patternFile, err := ioutil.ReadFile(path.Join(GetDuplicacyPreferencePath(), "filters"))
if err == nil {
for _, pattern := range strings.Split(string(patternFile), "\n") {
pattern = strings.TrimSpace(pattern)
if len(pattern) == 0 {
continue
}
patternFile, err := ioutil.ReadFile(path.Join(GetDuplicacyPreferencePath(), "filters"))
if err == nil {
for _, pattern := range strings.Split(string(patternFile), "\n") {
pattern = strings.TrimSpace(pattern)
if len(pattern) == 0 {
continue
}
if pattern[0] == '#' {
continue
}
if pattern[0] == '#' {
continue
}
if IsUnspecifiedFilter(pattern) {
pattern = "+" + pattern
}
if IsUnspecifiedFilter(pattern) {
pattern = "+" + pattern
}
if IsEmptyFilter(pattern) {
continue
}
if IsEmptyFilter(pattern) {
continue
}
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
valid, err := IsValidRegex(pattern[2:])
if !valid || err != nil {
LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
}
}
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
valid, err := IsValidRegex(pattern[2:])
if !valid || err != nil {
LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
}
}
patterns = append(patterns, pattern)
}
patterns = append(patterns, pattern)
}
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))
LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
if IsTracing() {
for _, pattern := range patterns {
LOG_TRACE("SNAPSHOT_PATTERN", "Pattern: %s", pattern)
}
}
if IsTracing() {
for _, pattern := range patterns {
LOG_TRACE("SNAPSHOT_PATTERN", "Pattern: %s", pattern)
}
}
}
}
directories := make([]*Entry, 0, 256)
directories = append(directories, CreateEntry("", 0, 0, 0))
directories := make([]*Entry, 0, 256)
directories = append(directories, CreateEntry("", 0, 0, 0))
snapshot.Files = make([]*Entry, 0, 256)
snapshot.Files = make([]*Entry, 0, 256)
attributeThreshold := 1024 * 1024
if attributeThresholdValue, found := os.LookupEnv("DUPLICACY_ATTRIBUTE_THRESHOLD"); found && attributeThresholdValue != "" {
attributeThreshold, _ = strconv.Atoi(attributeThresholdValue)
}
attributeThreshold := 1024 * 1024
if attributeThresholdValue, found := os.LookupEnv("DUPLICACY_ATTRIBUTE_THRESHOLD"); found && attributeThresholdValue != "" {
attributeThreshold, _ = strconv.Atoi(attributeThresholdValue)
}
for len(directories) > 0 {
for len(directories) > 0 {
directory := directories[len(directories) - 1]
directories = directories[:len(directories) - 1]
snapshot.Files = append(snapshot.Files, directory)
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, snapshot.discardAttributes)
if err != nil {
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory: %v", err)
skippedDirectories = append(skippedDirectories, directory.Path)
continue
}
directory := directories[len(directories)-1]
directories = directories[:len(directories)-1]
snapshot.Files = append(snapshot.Files, directory)
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, snapshot.discardAttributes)
if err != nil {
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory: %v", err)
skippedDirectories = append(skippedDirectories, directory.Path)
continue
}
directories = append(directories, subdirectories...)
skippedFiles = append(skippedFiles, skipped...)
directories = append(directories, subdirectories...)
skippedFiles = append(skippedFiles, skipped...)
if !snapshot.discardAttributes && len(snapshot.Files) > attributeThreshold {
LOG_INFO("LIST_ATTRIBUTES", "Discarding file attributes")
snapshot.discardAttributes = true
for _, file := range snapshot.Files {
file.Attributes = nil
}
}
}
if !snapshot.discardAttributes && len(snapshot.Files) > attributeThreshold {
LOG_INFO("LIST_ATTRIBUTES", "Discarding file attributes")
snapshot.discardAttributes = true
for _, file := range snapshot.Files {
file.Attributes = nil
}
}
}
// Remove the root entry
snapshot.Files = snapshot.Files[1:]
// Remove the root entry
snapshot.Files = snapshot.Files[1:]
return snapshot, skippedDirectories, skippedFiles, nil
return snapshot, skippedDirectories, skippedFiles, nil
}
// This is the struct used to save/load incomplete snapshots
type IncompleteSnapshot struct {
Files [] *Entry
ChunkHashes []string
ChunkLengths [] int
Files []*Entry
ChunkHashes []string
ChunkLengths []int
}
// LoadIncompleteSnapshot loads the incomplete snapshot if it exists
func LoadIncompleteSnapshot() (snapshot *Snapshot) {
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
description, err := ioutil.ReadFile(snapshotFile)
if err != nil {
LOG_DEBUG("INCOMPLETE_LOCATE", "Failed to locate incomplete snapshot: %v", err)
return nil
}
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
description, err := ioutil.ReadFile(snapshotFile)
if err != nil {
LOG_DEBUG("INCOMPLETE_LOCATE", "Failed to locate incomplete snapshot: %v", err)
return nil
}
var incompleteSnapshot IncompleteSnapshot
var incompleteSnapshot IncompleteSnapshot
err = json.Unmarshal(description, &incompleteSnapshot)
if err != nil {
LOG_DEBUG("INCOMPLETE_PARSE", "Failed to parse incomplete snapshot: %v", err)
return nil
}
err = json.Unmarshal(description, &incompleteSnapshot)
if err != nil {
LOG_DEBUG("INCOMPLETE_PARSE", "Failed to parse incomplete snapshot: %v", err)
return nil
}
var chunkHashes []string
for _, chunkHash := range incompleteSnapshot.ChunkHashes {
hash, err := hex.DecodeString(chunkHash)
if err != nil {
LOG_DEBUG("INCOMPLETE_DECODE", "Failed to decode incomplete snapshot: %v", err)
return nil
}
chunkHashes = append(chunkHashes, string(hash))
}
var chunkHashes []string
for _, chunkHash := range incompleteSnapshot.ChunkHashes {
hash, err := hex.DecodeString(chunkHash)
if err != nil {
LOG_DEBUG("INCOMPLETE_DECODE", "Failed to decode incomplete snapshot: %v", err)
return nil
}
chunkHashes = append(chunkHashes, string(hash))
}
snapshot = &Snapshot {
Files: incompleteSnapshot.Files,
ChunkHashes: chunkHashes,
ChunkLengths: incompleteSnapshot.ChunkLengths,
}
LOG_INFO("INCOMPLETE_LOAD", "Incomplete snapshot loaded from %s", snapshotFile)
return snapshot
snapshot = &Snapshot{
Files: incompleteSnapshot.Files,
ChunkHashes: chunkHashes,
ChunkLengths: incompleteSnapshot.ChunkLengths,
}
LOG_INFO("INCOMPLETE_LOAD", "Incomplete snapshot loaded from %s", snapshotFile)
return snapshot
}
// SaveIncompleteSnapshot saves the incomplete snapshot under the preference directory
func SaveIncompleteSnapshot(snapshot *Snapshot) {
var files []*Entry
for _, file := range snapshot.Files {
// All unprocessed files will have a size of -1
if file.Size >= 0 {
file.Attributes = nil
files = append(files, file)
} else {
break
}
}
var chunkHashes []string
for _, chunkHash := range snapshot.ChunkHashes {
chunkHashes = append(chunkHashes, hex.EncodeToString([]byte(chunkHash)))
}
var files []*Entry
for _, file := range snapshot.Files {
// All unprocessed files will have a size of -1
if file.Size >= 0 {
file.Attributes = nil
files = append(files, file)
} else {
break
}
}
var chunkHashes []string
for _, chunkHash := range snapshot.ChunkHashes {
chunkHashes = append(chunkHashes, hex.EncodeToString([]byte(chunkHash)))
}
incompleteSnapshot := IncompleteSnapshot {
Files: files,
ChunkHashes: chunkHashes,
ChunkLengths: snapshot.ChunkLengths,
}
incompleteSnapshot := IncompleteSnapshot{
Files: files,
ChunkHashes: chunkHashes,
ChunkLengths: snapshot.ChunkLengths,
}
description, err := json.MarshalIndent(incompleteSnapshot, "", " ")
if err != nil {
LOG_WARN("INCOMPLETE_ENCODE", "Failed to encode the incomplete snapshot: %v", err)
return
}
description, err := json.MarshalIndent(incompleteSnapshot, "", " ")
if err != nil {
LOG_WARN("INCOMPLETE_ENCODE", "Failed to encode the incomplete snapshot: %v", err)
return
}
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
err = ioutil.WriteFile(snapshotFile, description, 0644)
if err != nil {
LOG_WARN("INCOMPLETE_WRITE", "Failed to save the incomplete snapshot: %v", err)
return
}
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
err = ioutil.WriteFile(snapshotFile, description, 0644)
if err != nil {
LOG_WARN("INCOMPLETE_WRITE", "Failed to save the incomplete snapshot: %v", err)
return
}
LOG_INFO("INCOMPLETE_SAVE", "Incomplete snapshot saved to %s", snapshotFile)
LOG_INFO("INCOMPLETE_SAVE", "Incomplete snapshot saved to %s", snapshotFile)
}
func RemoveIncompleteSnapshot() {
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
if stat, err := os.Stat(snapshotFile); err == nil && !stat.IsDir() {
err = os.Remove(snapshotFile)
if err != nil {
LOG_INFO("INCOMPLETE_SAVE", "Failed to remove ncomplete snapshot: %v", err)
} else {
LOG_INFO("INCOMPLETE_SAVE", "Removed incomplete snapshot %s", snapshotFile)
}
}
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
if stat, err := os.Stat(snapshotFile); err == nil && !stat.IsDir() {
err = os.Remove(snapshotFile)
if err != nil {
LOG_INFO("INCOMPLETE_SAVE", "Failed to remove ncomplete snapshot: %v", err)
} else {
LOG_INFO("INCOMPLETE_SAVE", "Removed incomplete snapshot %s", snapshotFile)
}
}
}
// CreateSnapshotFromDescription creates a snapshot from json decription.
func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err error) {
var root map[string] interface{}
var root map[string]interface{}
err = json.Unmarshal(description, &root)
if err != nil {
return nil, err
}
err = json.Unmarshal(description, &root)
if err != nil {
return nil, err
}
snapshot = &Snapshot {}
snapshot = &Snapshot{}
if value, ok := root["id"]; !ok {
return nil, fmt.Errorf("No id is specified in the snapshot")
} else if snapshot.ID, ok = value.(string); !ok {
return nil, fmt.Errorf("Invalid id is specified in the snapshot")
}
if value, ok := root["id"]; !ok {
return nil, fmt.Errorf("No id is specified in the snapshot")
} else if snapshot.ID, ok = value.(string); !ok {
return nil, fmt.Errorf("Invalid id is specified in the snapshot")
}
if value, ok := root["revision"]; !ok {
return nil, fmt.Errorf("No revision is specified in the snapshot")
} else if _, ok = value.(float64); !ok {
return nil, fmt.Errorf("Invalid revision is specified in the snapshot")
} else {
snapshot.Revision = int(value.(float64))
}
if value, ok := root["revision"]; !ok {
return nil, fmt.Errorf("No revision is specified in the snapshot")
} else if _, ok = value.(float64); !ok {
return nil, fmt.Errorf("Invalid revision is specified in the snapshot")
} else {
snapshot.Revision = int(value.(float64))
}
if value, ok := root["tag"]; !ok {
} else if snapshot.Tag, ok = value.(string); !ok {
return nil, fmt.Errorf("Invalid tag is specified in the snapshot")
}
if value, ok := root["tag"]; !ok {
} else if snapshot.Tag, ok = value.(string); !ok {
return nil, fmt.Errorf("Invalid tag is specified in the snapshot")
}
if value, ok := root["options"]; !ok {
} else if snapshot.Options, ok = value.(string); !ok {
return nil, fmt.Errorf("Invalid options is specified in the snapshot")
}
if value, ok := root["options"]; !ok {
} else if snapshot.Options, ok = value.(string); !ok {
return nil, fmt.Errorf("Invalid options is specified in the snapshot")
}
if value, ok := root["start_time"]; !ok {
return nil, fmt.Errorf("No creation time is specified in the snapshot")
} else if _, ok = value.(float64); !ok {
return nil, fmt.Errorf("Invalid creation time is specified in the snapshot")
} else {
snapshot.StartTime = int64(value.(float64))
}
if value, ok := root["start_time"]; !ok {
return nil, fmt.Errorf("No creation time is specified in the snapshot")
} else if _, ok = value.(float64); !ok {
return nil, fmt.Errorf("Invalid creation time is specified in the snapshot")
} else {
snapshot.StartTime = int64(value.(float64))
}
if value, ok := root["end_time"]; !ok {
return nil, fmt.Errorf("No creation time is specified in the snapshot")
} else if _, ok = value.(float64); !ok {
return nil, fmt.Errorf("Invalid creation time is specified in the snapshot")
} else {
snapshot.EndTime = int64(value.(float64))
}
if value, ok := root["end_time"]; !ok {
return nil, fmt.Errorf("No creation time is specified in the snapshot")
} else if _, ok = value.(float64); !ok {
return nil, fmt.Errorf("Invalid creation time is specified in the snapshot")
} else {
snapshot.EndTime = int64(value.(float64))
}
if value, ok := root["file_size"]; ok {
if _, ok = value.(float64); ok {
snapshot.FileSize = int64(value.(float64))
}
}
if value, ok := root["file_size"]; ok {
if _, ok = value.(float64); ok {
snapshot.FileSize = int64(value.(float64))
}
}
if value, ok := root["number_of_files"]; ok {
if _, ok = value.(float64); ok {
snapshot.NumberOfFiles = int64(value.(float64))
}
}
if value, ok := root["number_of_files"]; ok {
if _, ok = value.(float64); ok {
snapshot.NumberOfFiles = int64(value.(float64))
}
}
for _, sequenceType := range []string { "files", "chunks", "lengths" } {
if value, ok := root[sequenceType]; !ok {
return nil, fmt.Errorf("No %s are specified in the snapshot", sequenceType)
} else if _, ok = value.([]interface{}); !ok {
return nil, fmt.Errorf("Invalid %s are specified in the snapshot", sequenceType)
} else {
array := value.([]interface{})
sequence := make([]string, len(array))
for i := 0; i < len(array); i++ {
if hashInHex, ok := array[i].(string); !ok {
return nil, fmt.Errorf("Invalid file sequence is specified in the snapshot")
} else if hash, err := hex.DecodeString(hashInHex); err != nil {
return nil, fmt.Errorf("Hash %s is not a valid hex string in the snapshot", hashInHex)
} else {
sequence[i] = string(hash)
}
}
for _, sequenceType := range []string{"files", "chunks", "lengths"} {
if value, ok := root[sequenceType]; !ok {
return nil, fmt.Errorf("No %s are specified in the snapshot", sequenceType)
} else if _, ok = value.([]interface{}); !ok {
return nil, fmt.Errorf("Invalid %s are specified in the snapshot", sequenceType)
} else {
array := value.([]interface{})
sequence := make([]string, len(array))
for i := 0; i < len(array); i++ {
if hashInHex, ok := array[i].(string); !ok {
return nil, fmt.Errorf("Invalid file sequence is specified in the snapshot")
} else if hash, err := hex.DecodeString(hashInHex); err != nil {
return nil, fmt.Errorf("Hash %s is not a valid hex string in the snapshot", hashInHex)
} else {
sequence[i] = string(hash)
}
}
snapshot.SetSequence(sequenceType, sequence)
}
}
snapshot.SetSequence(sequenceType, sequence)
}
}
return snapshot, nil
return snapshot, nil
}
// LoadChunks construct 'ChunkHashes' from the json description.
func (snapshot *Snapshot) LoadChunks(description []byte) (err error) {
var root [] interface {}
err = json.Unmarshal(description, &root)
if err != nil {
return err
}
var root []interface{}
err = json.Unmarshal(description, &root)
if err != nil {
return err
}
snapshot.ChunkHashes = make([]string, len(root))
snapshot.ChunkHashes = make([]string, len(root))
for i, object := range root {
if hashInHex, ok := object.(string); !ok {
return fmt.Errorf("Invalid chunk hash is specified in the snapshot")
} else if hash, err := hex.DecodeString(hashInHex); err != nil {
return fmt.Errorf("The chunk hash %s is not a valid hex string", hashInHex)
} else {
snapshot.ChunkHashes[i] = string(hash)
}
}
for i, object := range root {
if hashInHex, ok := object.(string); !ok {
return fmt.Errorf("Invalid chunk hash is specified in the snapshot")
} else if hash, err := hex.DecodeString(hashInHex); err != nil {
return fmt.Errorf("The chunk hash %s is not a valid hex string", hashInHex)
} else {
snapshot.ChunkHashes[i] = string(hash)
}
}
return err
return err
}
// LoadLengths construct 'ChunkLengths' from the json description.
func (snapshot *Snapshot) LoadLengths(description []byte) (err error) {
return json.Unmarshal(description, &snapshot.ChunkLengths)
return json.Unmarshal(description, &snapshot.ChunkLengths)
}
// MarshalJSON creates a json representation of the snapshot.
func (snapshot *Snapshot) MarshalJSON() ([] byte, error) {
func (snapshot *Snapshot) MarshalJSON() ([]byte, error) {
object := make(map[string]interface{})
object := make(map[string]interface{})
object["id"] = snapshot.ID
object["revision"] = snapshot.Revision
object["options"] = snapshot.Options
object["tag"] = snapshot.Tag
object["start_time"] = snapshot.StartTime
object["end_time"] = snapshot.EndTime
object["id"] = snapshot.ID
object["revision"] = snapshot.Revision
object["options"] = snapshot.Options
object["tag"] = snapshot.Tag
object["start_time"] = snapshot.StartTime
object["end_time"] = snapshot.EndTime
if snapshot.FileSize != 0 && snapshot.NumberOfFiles != 0 {
object["file_size"] = snapshot.FileSize
object["number_of_files"] = snapshot.NumberOfFiles
}
object["files"] = encodeSequence(snapshot.FileSequence)
object["chunks"] = encodeSequence(snapshot.ChunkSequence)
object["lengths"] = encodeSequence(snapshot.LengthSequence)
if snapshot.FileSize != 0 && snapshot.NumberOfFiles != 0 {
object["file_size"] = snapshot.FileSize
object["number_of_files"] = snapshot.NumberOfFiles
}
object["files"] = encodeSequence(snapshot.FileSequence)
object["chunks"] = encodeSequence(snapshot.ChunkSequence)
object["lengths"] = encodeSequence(snapshot.LengthSequence)
return json.Marshal(object)
return json.Marshal(object)
}
// MarshalSequence creates a json represetion for the specified chunk sequence.
func (snapshot *Snapshot) MarshalSequence(sequenceType string) ([] byte, error) {
func (snapshot *Snapshot) MarshalSequence(sequenceType string) ([]byte, error) {
if sequenceType == "files" {
return json.Marshal(snapshot.Files)
} else if sequenceType == "chunks" {
return json.Marshal(encodeSequence(snapshot.ChunkHashes))
} else {
return json.Marshal(snapshot.ChunkLengths)
}
if sequenceType == "files" {
return json.Marshal(snapshot.Files)
} else if sequenceType == "chunks" {
return json.Marshal(encodeSequence(snapshot.ChunkHashes))
} else {
return json.Marshal(snapshot.ChunkLengths)
}
}
// SetSequence assign a chunk sequence to the specified field.
func (snapshot *Snapshot) SetSequence(sequenceType string, sequence [] string) {
if sequenceType == "files" {
snapshot.FileSequence = sequence
} else if sequenceType == "chunks" {
snapshot.ChunkSequence = sequence
} else {
snapshot.LengthSequence = sequence
}
func (snapshot *Snapshot) SetSequence(sequenceType string, sequence []string) {
if sequenceType == "files" {
snapshot.FileSequence = sequence
} else if sequenceType == "chunks" {
snapshot.ChunkSequence = sequence
} else {
snapshot.LengthSequence = sequence
}
}
// encodeSequence turns a sequence of binary hashes into a sequence of hex hashes.
func encodeSequence(sequence[] string) ([] string) {
func encodeSequence(sequence []string) []string {
sequenceInHex := make([]string, len(sequence))
sequenceInHex := make([]string, len(sequence))
for i, hash := range sequence {
sequenceInHex[i] = hex.EncodeToString([]byte(hash))
}
for i, hash := range sequence {
sequenceInHex[i] = hex.EncodeToString([]byte(hash))
}
return sequenceInHex
return sequenceInHex
}

File diff suppressed because it is too large Load Diff

View File

@@ -5,463 +5,463 @@
package duplicacy
import (
"testing"
"os"
"fmt"
"time"
"path"
"strings"
"crypto/rand"
"encoding/json"
"encoding/hex"
"crypto/rand"
"encoding/hex"
"encoding/json"
"fmt"
"os"
"path"
"strings"
"testing"
"time"
)
func createDummySnapshot(snapshotID string, revision int, endTime int64) * Snapshot {
return &Snapshot {
ID: snapshotID,
Revision: revision,
EndTime: endTime,
}
func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot {
return &Snapshot{
ID: snapshotID,
Revision: revision,
EndTime: endTime,
}
}
func TestIsDeletable(t *testing.T) {
//SetLoggingLevel(DEBUG)
//SetLoggingLevel(DEBUG)
now := time.Now().Unix()
day := int64(3600 * 24)
now := time.Now().Unix()
day := int64(3600 * 24)
allSnapshots := make(map[string][] *Snapshot)
allSnapshots["host1"] = append([]*Snapshot{}, createDummySnapshot("host1", 1, now - 2 * day))
allSnapshots["host2"] = append([]*Snapshot{}, createDummySnapshot("host2", 1, now - 2 * day))
allSnapshots["host1"] = append(allSnapshots["host1"], createDummySnapshot("host1", 2, now - 1 * day))
allSnapshots["host2"] = append(allSnapshots["host2"], createDummySnapshot("host2", 2, now - 1 * day))
allSnapshots := make(map[string][]*Snapshot)
allSnapshots["host1"] = append([]*Snapshot{}, createDummySnapshot("host1", 1, now-2*day))
allSnapshots["host2"] = append([]*Snapshot{}, createDummySnapshot("host2", 1, now-2*day))
allSnapshots["host1"] = append(allSnapshots["host1"], createDummySnapshot("host1", 2, now-1*day))
allSnapshots["host2"] = append(allSnapshots["host2"], createDummySnapshot("host2", 2, now-1*day))
collection := & FossilCollection {
EndTime: now - day - 3600,
LastRevisions: make(map[string] int),
}
collection := &FossilCollection{
EndTime: now - day - 3600,
LastRevisions: make(map[string]int),
}
collection.LastRevisions["host1"] = 1
collection.LastRevisions["host2"] = 1
collection.LastRevisions["host1"] = 1
collection.LastRevisions["host2"] = 1
isDeletable, newSnapshots := collection.IsDeletable(true, nil, allSnapshots)
if !isDeletable || len(newSnapshots) != 2 {
t.Errorf("Scenario 1: should be deletable, 2 new snapshots")
}
isDeletable, newSnapshots := collection.IsDeletable(true, nil, allSnapshots)
if !isDeletable || len(newSnapshots) != 2 {
t.Errorf("Scenario 1: should be deletable, 2 new snapshots")
}
collection.LastRevisions["host3"] = 1
allSnapshots["host3"] = append([]*Snapshot{}, createDummySnapshot("host3", 1, now - 2 * day))
collection.LastRevisions["host3"] = 1
allSnapshots["host3"] = append([]*Snapshot{}, createDummySnapshot("host3", 1, now-2*day))
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
if isDeletable {
t.Errorf("Scenario 2: should not be deletable")
}
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
if isDeletable {
t.Errorf("Scenario 2: should not be deletable")
}
allSnapshots["host3"] = append(allSnapshots["host3"], createDummySnapshot("host3", 2, now - day))
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
if !isDeletable || len(newSnapshots) != 3 {
t.Errorf("Scenario 3: should be deletable, 3 new snapshots")
}
allSnapshots["host3"] = append(allSnapshots["host3"], createDummySnapshot("host3", 2, now-day))
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
if !isDeletable || len(newSnapshots) != 3 {
t.Errorf("Scenario 3: should be deletable, 3 new snapshots")
}
collection.LastRevisions["host4"] = 1
allSnapshots["host4"] = append([]*Snapshot{}, createDummySnapshot("host4", 1, now - 8 * day))
collection.LastRevisions["host4"] = 1
allSnapshots["host4"] = append([]*Snapshot{}, createDummySnapshot("host4", 1, now-8*day))
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
if !isDeletable || len(newSnapshots) != 3 {
t.Errorf("Scenario 4: should be deletable, 3 new snapshots")
}
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
if !isDeletable || len(newSnapshots) != 3 {
t.Errorf("Scenario 4: should be deletable, 3 new snapshots")
}
collection.LastRevisions["repository1@host5"] = 1
allSnapshots["repository1@host5"] = append([]*Snapshot{}, createDummySnapshot("repository1@host5", 1, now - 3 * day))
collection.LastRevisions["repository1@host5"] = 1
allSnapshots["repository1@host5"] = append([]*Snapshot{}, createDummySnapshot("repository1@host5", 1, now-3*day))
collection.LastRevisions["repository2@host5"] = 1
allSnapshots["repository2@host5"] = append([]*Snapshot{}, createDummySnapshot("repository2@host5", 1, now - 2 * day))
collection.LastRevisions["repository2@host5"] = 1
allSnapshots["repository2@host5"] = append([]*Snapshot{}, createDummySnapshot("repository2@host5", 1, now-2*day))
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
if isDeletable {
t.Errorf("Scenario 5: should not be deletable")
}
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
if isDeletable {
t.Errorf("Scenario 5: should not be deletable")
}
allSnapshots["repository1@host5"] = append(allSnapshots["repository1@host5"], createDummySnapshot("repository1@host5", 2, now - day))
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
if !isDeletable || len(newSnapshots) != 4 {
t.Errorf("Scenario 6: should be deletable, 4 new snapshots")
}
allSnapshots["repository1@host5"] = append(allSnapshots["repository1@host5"], createDummySnapshot("repository1@host5", 2, now-day))
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
if !isDeletable || len(newSnapshots) != 4 {
t.Errorf("Scenario 6: should be deletable, 4 new snapshots")
}
}
func createTestSnapshotManager(testDir string) *SnapshotManager {
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
storage, _ := CreateFileStorage(testDir, 2, false, 1)
storage.CreateDirectory(0, "chunks")
storage.CreateDirectory(0, "snapshots")
config := CreateConfig()
snapshotManager := CreateSnapshotManager(config, storage)
storage, _ := CreateFileStorage(testDir, 2, false, 1)
storage.CreateDirectory(0, "chunks")
storage.CreateDirectory(0, "snapshots")
config := CreateConfig()
snapshotManager := CreateSnapshotManager(config, storage)
cacheDir := path.Join(testDir, "cache")
snapshotCache, _ := CreateFileStorage(cacheDir, 2, false, 1)
snapshotCache.CreateDirectory(0, "chunks")
snapshotCache.CreateDirectory(0, "snapshots")
cacheDir := path.Join(testDir, "cache")
snapshotCache, _ := CreateFileStorage(cacheDir, 2, false, 1)
snapshotCache.CreateDirectory(0, "chunks")
snapshotCache.CreateDirectory(0, "snapshots")
snapshotManager.snapshotCache = snapshotCache
return snapshotManager
snapshotManager.snapshotCache = snapshotCache
return snapshotManager
}
func uploadTestChunk(manager *SnapshotManager, content []byte) string {
completionFunc := func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
LOG_INFO("UPLOAD_CHUNK", "Chunk %s size %d uploaded", chunk.GetID(), chunkSize)
}
completionFunc := func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
LOG_INFO("UPLOAD_CHUNK", "Chunk %s size %d uploaded", chunk.GetID(), chunkSize)
}
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, testThreads, nil)
chunkUploader.completionFunc = completionFunc
chunkUploader.Start()
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, testThreads, nil)
chunkUploader.completionFunc = completionFunc
chunkUploader.Start()
chunk := CreateChunk(manager.config, true)
chunk.Reset(true)
chunk.Write(content)
chunkUploader.StartChunk(chunk, 0)
chunkUploader.Stop()
chunk := CreateChunk(manager.config, true)
chunk.Reset(true)
chunk.Write(content)
chunkUploader.StartChunk(chunk, 0)
chunkUploader.Stop()
return chunk.GetHash()
return chunk.GetHash()
}
func uploadRandomChunk(manager *SnapshotManager, chunkSize int) string {
content := make([]byte, chunkSize)
_, err := rand.Read(content)
if err != nil {
LOG_ERROR("UPLOAD_RANDOM", "Error generating random content: %v", err)
return ""
}
content := make([]byte, chunkSize)
_, err := rand.Read(content)
if err != nil {
LOG_ERROR("UPLOAD_RANDOM", "Error generating random content: %v", err)
return ""
}
return uploadTestChunk(manager, content)
return uploadTestChunk(manager, content)
}
func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision int, startTime int64, endTime int64, chunkHashes []string) {
snapshot := &Snapshot {
ID: snapshotID,
Revision: revision,
StartTime: startTime,
EndTime: endTime,
ChunkHashes: chunkHashes,
}
snapshot := &Snapshot{
ID: snapshotID,
Revision: revision,
StartTime: startTime,
EndTime: endTime,
ChunkHashes: chunkHashes,
}
var chunkHashesInHex [] string
for _, chunkHash := range chunkHashes {
chunkHashesInHex = append(chunkHashesInHex, hex.EncodeToString([]byte(chunkHash)))
}
var chunkHashesInHex []string
for _, chunkHash := range chunkHashes {
chunkHashesInHex = append(chunkHashesInHex, hex.EncodeToString([]byte(chunkHash)))
}
sequence, _ := json.Marshal(chunkHashesInHex)
snapshot.ChunkSequence = []string { uploadTestChunk(manager, sequence) }
sequence, _ := json.Marshal(chunkHashesInHex)
snapshot.ChunkSequence = []string{uploadTestChunk(manager, sequence)}
description, _ := snapshot.MarshalJSON()
path := fmt.Sprintf("snapshots/%s/%d", snapshotID, snapshot.Revision)
manager.storage.CreateDirectory(0, "snapshots/" + snapshotID)
manager.UploadFile(path, path, description)
description, _ := snapshot.MarshalJSON()
path := fmt.Sprintf("snapshots/%s/%d", snapshotID, snapshot.Revision)
manager.storage.CreateDirectory(0, "snapshots/"+snapshotID)
manager.UploadFile(path, path, description)
}
func checkTestSnapshots(manager *SnapshotManager, expectedSnapshots int, expectedFossils int) {
var snapshotIDs [] string
var err error
var snapshotIDs []string
var err error
chunks := make(map[string]bool)
files, _ := manager.ListAllFiles(manager.storage, "chunks/")
for _, file := range files {
if file[len(file) - 1] == '/' {
continue
}
chunk := strings.Replace(file, "/", "", -1)
chunks[chunk] = false
}
chunks := make(map[string]bool)
files, _ := manager.ListAllFiles(manager.storage, "chunks/")
for _, file := range files {
if file[len(file)-1] == '/' {
continue
}
chunk := strings.Replace(file, "/", "", -1)
chunks[chunk] = false
}
snapshotIDs, err = manager.ListSnapshotIDs()
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
return
}
snapshotIDs, err = manager.ListSnapshotIDs()
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
return
}
numberOfSnapshots := 0
numberOfSnapshots := 0
for _, snapshotID := range snapshotIDs {
for _, snapshotID := range snapshotIDs {
revisions, err := manager.ListSnapshotRevisions(snapshotID)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", snapshotID, err)
return
}
revisions, err := manager.ListSnapshotRevisions(snapshotID)
if err != nil {
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", snapshotID, err)
return
}
for _, revision := range revisions {
snapshot := manager.DownloadSnapshot(snapshotID, revision)
numberOfSnapshots++
for _, revision := range revisions {
snapshot := manager.DownloadSnapshot(snapshotID, revision)
numberOfSnapshots++
for _, chunk := range manager.GetSnapshotChunks(snapshot) {
chunks[chunk] = true
}
}
}
for _, chunk := range manager.GetSnapshotChunks(snapshot) {
chunks[chunk] = true
}
}
}
numberOfFossils := 0
for chunk, referenced := range chunks {
if !referenced {
LOG_INFO("UNREFERENCED_CHUNK", "Unreferenced chunk %s", chunk)
numberOfFossils++
}
}
numberOfFossils := 0
for chunk, referenced := range chunks {
if !referenced {
LOG_INFO("UNREFERENCED_CHUNK", "Unreferenced chunk %s", chunk)
numberOfFossils++
}
}
if numberOfSnapshots != expectedSnapshots {
LOG_ERROR("SNAPSHOT_COUNT", "Expecting %d snapshots, got %d instead", expectedSnapshots, numberOfSnapshots)
}
if numberOfSnapshots != expectedSnapshots {
LOG_ERROR("SNAPSHOT_COUNT", "Expecting %d snapshots, got %d instead", expectedSnapshots, numberOfSnapshots)
}
if numberOfFossils != expectedFossils {
LOG_ERROR("FOSSIL_COUNT", "Expecting %d unreferenced chunks, got %d instead", expectedFossils, numberOfFossils)
}
if numberOfFossils != expectedFossils {
LOG_ERROR("FOSSIL_COUNT", "Expecting %d unreferenced chunks, got %d instead", expectedFossils, numberOfFossils)
}
}
func TestSingleRepositoryPrune(t *testing.T) {
setTestingT(t)
setTestingT(t)
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
snapshotManager := createTestSnapshotManager(testDir)
snapshotManager := createTestSnapshotManager(testDir)
chunkSize := 1024
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
chunkSize := 1024
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 1 snapshot")
createTestSnapshot(snapshotManager, "repository1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2 })
checkTestSnapshots(snapshotManager, 1, 2)
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 1 snapshot")
createTestSnapshot(snapshotManager, "repository1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
checkTestSnapshots(snapshotManager, 1, 2)
t.Logf("Creating 2 snapshots")
createTestSnapshot(snapshotManager, "repository1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3 })
createTestSnapshot(snapshotManager, "repository1", 3, now - 1 * day - 3600, now - 1 * day - 60, []string { chunkHash3, chunkHash4 })
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Creating 2 snapshots")
createTestSnapshot(snapshotManager, "repository1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
createTestSnapshot(snapshotManager, "repository1", 3, now-1*day-3600, now-1*day-60, []string{chunkHash3, chunkHash4})
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot repository1 revision 1 with --exclusive")
snapshotManager.PruneSnapshots("repository1", "repository1", []int{1}, []string{}, []string{}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 0)
t.Logf("Removing snapshot repository1 revision 1 with --exclusive")
snapshotManager.PruneSnapshots("repository1", "repository1", []int{1}, []string{}, []string{}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 0)
t.Logf("Removing snapshot repository1 revision 2 without --exclusive")
snapshotManager.PruneSnapshots("repository1", "repository1", []int{2}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 1, 2)
t.Logf("Removing snapshot repository1 revision 2 without --exclusive")
snapshotManager.PruneSnapshots("repository1", "repository1", []int{2}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 1, 2)
t.Logf("Creating 1 snapshot")
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "repository1", 4, now + 1 * day - 3600 , now + 1 * day, []string { chunkHash4, chunkHash5 })
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 1 snapshot")
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "repository1", 4, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots("repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 0)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots("repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 0)
}
func TestSingleHostPrune(t *testing.T) {
setTestingT(t)
setTestingT(t)
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
snapshotManager := createTestSnapshotManager(testDir)
snapshotManager := createTestSnapshotManager(testDir)
chunkSize := 1024
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
chunkSize := 1024
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 3 snapshots")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2 })
createTestSnapshot(snapshotManager, "vm1@host1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3 })
createTestSnapshot(snapshotManager, "vm2@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash3, chunkHash4 })
checkTestSnapshots(snapshotManager, 3, 0)
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 3 snapshots")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
createTestSnapshot(snapshotManager, "vm2@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4})
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 1 snapshot")
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm2@host1", 2, now + 1 * day - 3600 , now + 1 * day, []string { chunkHash4, chunkHash5 })
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Creating 1 snapshot")
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm2@host1", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 3, 0)
}
func TestMultipleHostPrune(t *testing.T) {
setTestingT(t)
setTestingT(t)
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
snapshotManager := createTestSnapshotManager(testDir)
snapshotManager := createTestSnapshotManager(testDir)
chunkSize := 1024
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
chunkSize := 1024
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 3 snapshot")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2 })
createTestSnapshot(snapshotManager, "vm1@host1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3 })
createTestSnapshot(snapshotManager, "vm2@host2", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash3, chunkHash4 })
checkTestSnapshots(snapshotManager, 3, 0)
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 3 snapshot")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4})
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 1 snapshot")
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm2@host2", 2, now + 1 * day - 3600 , now + 1 * day, []string {chunkHash4, chunkHash5})
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Creating 1 snapshot")
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm2@host2", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Creating 1 snapshot")
chunkHash6 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 3, now + 1 * day - 3600 , now + 1 * day, []string {chunkHash5, chunkHash6})
checkTestSnapshots(snapshotManager, 4, 2)
t.Logf("Creating 1 snapshot")
chunkHash6 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash5, chunkHash6})
checkTestSnapshots(snapshotManager, 4, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 4, 0)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 4, 0)
}
func TestPruneAndResurrect(t *testing.T) {
setTestingT(t)
setTestingT(t)
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
snapshotManager := createTestSnapshotManager(testDir)
snapshotManager := createTestSnapshotManager(testDir)
chunkSize := 1024
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
chunkSize := 1024
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 2 snapshots")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2})
createTestSnapshot(snapshotManager, "vm1@host1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3})
checkTestSnapshots(snapshotManager, 2, 0)
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 2 snapshots")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
checkTestSnapshots(snapshotManager, 2, 0)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 1, 2)
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 1, 2)
t.Logf("Creating 1 snapshot")
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 4, now + 1 * day - 3600 , now + 1 * day, []string { chunkHash4, chunkHash1})
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 1 snapshot")
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 4, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash1})
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- one fossil will be resurrected")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 0)
t.Logf("Prune without removing any snapshots -- one fossil will be resurrected")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 0)
}
func TestInactiveHostPrune(t *testing.T) {
setTestingT(t)
setTestingT(t)
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
snapshotManager := createTestSnapshotManager(testDir)
snapshotManager := createTestSnapshotManager(testDir)
chunkSize := 1024
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
chunkSize := 1024
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 3 snapshot")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2} )
createTestSnapshot(snapshotManager, "vm1@host1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3} )
// Host2 is inactive
createTestSnapshot(snapshotManager, "vm2@host2", 1, now - 7 * day - 3600, now - 7 * day - 60, []string { chunkHash3, chunkHash4} )
checkTestSnapshots(snapshotManager, 3, 0)
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 3 snapshot")
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
// Host2 is inactive
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-7*day-3600, now-7*day-60, []string{chunkHash3, chunkHash4})
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Removing snapshot vm1@host1 revision 1")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Removing snapshot vm1@host1 revision 1")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 2, 2)
t.Logf("Creating 1 snapshot")
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 3, now + 1 * day - 3600 , now + 1 * day, []string { chunkHash4, chunkHash5} )
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Creating 1 snapshot")
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
checkTestSnapshots(snapshotManager, 3, 2)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 3, 0)
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 3, 0)
}
func TestRetentionPolicy(t *testing.T) {
setTestingT(t)
setTestingT(t)
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
snapshotManager := createTestSnapshotManager(testDir)
snapshotManager := createTestSnapshotManager(testDir)
chunkSize := 1024
var chunkHashes [] string
for i := 0; i < 30; i++ {
chunkHashes = append(chunkHashes, uploadRandomChunk(snapshotManager, chunkSize))
}
chunkSize := 1024
var chunkHashes []string
for i := 0; i < 30; i++ {
chunkHashes = append(chunkHashes, uploadRandomChunk(snapshotManager, chunkSize))
}
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 30 snapshots")
for i := 0; i < 30; i++ {
createTestSnapshot(snapshotManager, "vm1@host1", i + 1, now - int64(30 - i) * day - 3600, now - int64(30 - i) * day - 60, []string { chunkHashes[i] })
}
now := time.Now().Unix()
day := int64(24 * 3600)
t.Logf("Creating 30 snapshots")
for i := 0; i < 30; i++ {
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]})
}
checkTestSnapshots(snapshotManager, 30, 0)
checkTestSnapshots(snapshotManager, 30, 0)
t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 19, 0)
t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 19, 0)
t.Logf("Removing snapshot vm1@host1 -k 0:20 with --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 19, 0)
t.Logf("Removing snapshot vm1@host1 -k 0:20 with --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 19, 0)
t.Logf("Removing snapshot vm1@host1 -k 3:14 -k 2:7 with --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 12, 0)
t.Logf("Removing snapshot vm1@host1 -k 3:14 -k 2:7 with --exclusive")
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false)
checkTestSnapshots(snapshotManager, 12, 0)
}

View File

@@ -5,474 +5,473 @@
package duplicacy
import (
"fmt"
"regexp"
"strings"
"strconv"
"os"
"net"
"path"
"io/ioutil"
"runtime"
"fmt"
"io/ioutil"
"net"
"os"
"path"
"regexp"
"runtime"
"strconv"
"strings"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
)
type Storage interface {
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
ListFiles(threadIndex int, dir string) (files []string, size []int64, err error)
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
ListFiles(threadIndex int, dir string) (files []string, size []int64, err error)
// DeleteFile deletes the file or directory at 'filePath'.
DeleteFile(threadIndex int, filePath string) (err error)
// DeleteFile deletes the file or directory at 'filePath'.
DeleteFile(threadIndex int, filePath string) (err error)
// MoveFile renames the file.
MoveFile(threadIndex int, from string, to string) (err error)
// MoveFile renames the file.
MoveFile(threadIndex int, from string, to string) (err error)
// CreateDirectory creates a new directory.
CreateDirectory(threadIndex int, dir string) (err error)
// CreateDirectory creates a new directory.
CreateDirectory(threadIndex int, dir string) (err error)
// GetFileInfo returns the information about the file or directory at 'filePath'.
GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error)
// GetFileInfo returns the information about the file or directory at 'filePath'.
GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error)
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
// the suffix '.fsl'.
FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error)
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
// the suffix '.fsl'.
FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error)
// DownloadFile reads the file at 'filePath' into the chunk.
DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error)
// DownloadFile reads the file at 'filePath' into the chunk.
DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error)
// UploadFile writes 'content' to the file at 'filePath'.
UploadFile(threadIndex int, filePath string, content []byte) (err error)
// UploadFile writes 'content' to the file at 'filePath'.
UploadFile(threadIndex int, filePath string, content []byte) (err error)
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
IsCacheNeeded() (bool)
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
// managing snapshots.
IsCacheNeeded() bool
// If the 'MoveFile' method is implemented.
IsMoveFileImplemented() (bool)
// If the 'MoveFile' method is implemented.
IsMoveFileImplemented() bool
// If the storage can guarantee strong consistency.
IsStrongConsistent() (bool)
// If the storage can guarantee strong consistency.
IsStrongConsistent() bool
// If the storage supports fast listing of files names.
IsFastListing() (bool)
// If the storage supports fast listing of files names.
IsFastListing() bool
// Enable the test mode.
EnableTestMode()
// Enable the test mode.
EnableTestMode()
// Set the maximum transfer speeds.
SetRateLimits(downloadRateLimit int, uploadRateLimit int)
// Set the maximum transfer speeds.
SetRateLimits(downloadRateLimit int, uploadRateLimit int)
}
type RateLimitedStorage struct {
DownloadRateLimit int
UploadRateLimit int
DownloadRateLimit int
UploadRateLimit int
}
func (storage *RateLimitedStorage) SetRateLimits(downloadRateLimit int, uploadRateLimit int) {
storage.DownloadRateLimit = downloadRateLimit
storage.UploadRateLimit = uploadRateLimit
storage.DownloadRateLimit = downloadRateLimit
storage.UploadRateLimit = uploadRateLimit
}
func checkHostKey(hostname string, remote net.Addr, key ssh.PublicKey) error {
preferencePath := GetDuplicacyPreferencePath()
hostFile := path.Join(preferencePath, "known_hosts")
file, err := os.OpenFile(hostFile, os.O_RDWR | os.O_CREATE, 0600)
if err != nil {
return err
}
defer file.Close()
content, err := ioutil.ReadAll(file)
if err != nil {
return err
}
preferencePath := GetDuplicacyPreferencePath()
hostFile := path.Join(preferencePath, "known_hosts")
file, err := os.OpenFile(hostFile, os.O_RDWR|os.O_CREATE, 0600)
if err != nil {
return err
}
lineRegex := regexp.MustCompile(`^([^\s]+)\s+(.+)`)
defer file.Close()
content, err := ioutil.ReadAll(file)
if err != nil {
return err
}
keyString := string(ssh.MarshalAuthorizedKey(key))
keyString = strings.Replace(keyString, "\n", "", -1)
remoteAddress := remote.String()
if strings.HasSuffix(remoteAddress, ":22") {
remoteAddress = remoteAddress[:len(remoteAddress) - len(":22")]
}
lineRegex := regexp.MustCompile(`^([^\s]+)\s+(.+)`)
for i, line := range strings.Split(string(content), "\n") {
matched := lineRegex.FindStringSubmatch(line)
if matched == nil {
continue
}
keyString := string(ssh.MarshalAuthorizedKey(key))
keyString = strings.Replace(keyString, "\n", "", -1)
remoteAddress := remote.String()
if strings.HasSuffix(remoteAddress, ":22") {
remoteAddress = remoteAddress[:len(remoteAddress)-len(":22")]
}
if matched[1] == remote.String() {
if keyString != matched[2] {
LOG_WARN("HOSTKEY_OLD", "The existing key for '%s' is %s (file %s, line %d)",
remote.String(), matched[2], hostFile, i)
LOG_WARN("HOSTKEY_NEW", "The new key is '%s'", keyString)
return fmt.Errorf("The host key for '%s' has changed", remote.String())
} else {
return nil
}
}
}
for i, line := range strings.Split(string(content), "\n") {
matched := lineRegex.FindStringSubmatch(line)
if matched == nil {
continue
}
file.Write([]byte(remote.String() + " " + keyString + "\n"))
return nil
if matched[1] == remote.String() {
if keyString != matched[2] {
LOG_WARN("HOSTKEY_OLD", "The existing key for '%s' is %s (file %s, line %d)",
remote.String(), matched[2], hostFile, i)
LOG_WARN("HOSTKEY_NEW", "The new key is '%s'", keyString)
return fmt.Errorf("The host key for '%s' has changed", remote.String())
} else {
return nil
}
}
}
file.Write([]byte(remote.String() + " " + keyString + "\n"))
return nil
}
// CreateStorage creates a storage object based on the provide storage URL.
func CreateStorage(preference Preference, resetPassword bool, threads int) (storage Storage) {
storageURL := preference.StorageURL
storageURL := preference.StorageURL
isFileStorage := false
isCacheNeeded := false
isFileStorage := false
isCacheNeeded := false
if strings.HasPrefix(storageURL, "/") {
isFileStorage = true
} else if runtime.GOOS == "windows" {
if len(storageURL) >= 3 && storageURL[1] == ':' && (storageURL[2] == '/' || storageURL[2] == '\\') {
volume := strings.ToLower(storageURL[:1])
if volume[0] >= 'a' && volume[0] <= 'z' {
isFileStorage = true
}
}
if strings.HasPrefix(storageURL, "/") {
isFileStorage = true
} else if runtime.GOOS == "windows" {
if len(storageURL) >= 3 && storageURL[1] == ':' && (storageURL[2] == '/' || storageURL[2] == '\\') {
volume := strings.ToLower(storageURL[:1])
if volume[0] >= 'a' && volume[0] <= 'z' {
isFileStorage = true
}
}
if !isFileStorage && strings.HasPrefix(storageURL, `\\`) {
isFileStorage = true
isCacheNeeded = true
}
}
if !isFileStorage && strings.HasPrefix(storageURL, `\\`) {
isFileStorage = true
isCacheNeeded = true
}
}
if isFileStorage {
fileStorage, err := CreateFileStorage(storageURL, 2, isCacheNeeded, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
return nil
}
return fileStorage
}
if isFileStorage {
fileStorage, err := CreateFileStorage(storageURL, 2, isCacheNeeded, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
return nil
}
return fileStorage
}
if strings.HasPrefix(storageURL, "flat://") {
fileStorage, err := CreateFileStorage(storageURL[7:], 0, false, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
return nil
}
return fileStorage
}
if strings.HasPrefix(storageURL, "flat://") {
fileStorage, err := CreateFileStorage(storageURL[7:], 0, false, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
return nil
}
return fileStorage
}
if strings.HasPrefix(storageURL, "samba://") {
fileStorage, err := CreateFileStorage(storageURL[8:], 2, true, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
return nil
}
return fileStorage
}
if strings.HasPrefix(storageURL, "samba://") {
fileStorage, err := CreateFileStorage(storageURL[8:], 2, true, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
return nil
}
return fileStorage
}
urlRegex := regexp.MustCompile(`^(\w+)://([\w\-]+@)?([^/]+)(/(.+))?`)
urlRegex := regexp.MustCompile(`^(\w+)://([\w\-]+@)?([^/]+)(/(.+))?`)
matched := urlRegex.FindStringSubmatch(storageURL)
matched := urlRegex.FindStringSubmatch(storageURL)
if matched == nil {
LOG_ERROR("STORAGE_CREATE", "Unrecognizable storage URL: %s", storageURL)
return nil
} else if matched[1] == "sftp" {
server := matched[3]
username := matched[2]
storageDir := matched[5]
port := 22
if matched == nil {
LOG_ERROR("STORAGE_CREATE", "Unrecognizable storage URL: %s", storageURL)
return nil
} else if matched[1] == "sftp" {
server := matched[3]
username := matched[2]
storageDir := matched[5]
port := 22
if strings.Contains(server, ":") {
index := strings.Index(server, ":")
port, _ = strconv.Atoi(server[index + 1:])
server = server[:index]
}
if strings.Contains(server, ":") {
index := strings.Index(server, ":")
port, _ = strconv.Atoi(server[index+1:])
server = server[:index]
}
if storageDir == "" {
LOG_ERROR("STORAGE_CREATE", "The SFTP storage directory can't be empty")
return nil
}
if storageDir == "" {
LOG_ERROR("STORAGE_CREATE", "The SFTP storage directory can't be empty")
return nil
}
if username != "" {
username = username[:len(username) - 1]
}
if username != "" {
username = username[:len(username)-1]
}
// If ssh_key_file is set, skip password-based login
keyFile := GetPasswordFromPreference(preference, "ssh_key_file")
// If ssh_key_file is set, skip password-based login
keyFile := GetPasswordFromPreference(preference, "ssh_key_file")
password := ""
passwordCallback := func() (string, error) {
LOG_DEBUG("SSH_PASSWORD", "Attempting password login")
password = GetPassword(preference, "ssh_password", "Enter SSH password:", false, resetPassword)
return password, nil
}
password := ""
passwordCallback := func() (string, error) {
LOG_DEBUG("SSH_PASSWORD", "Attempting password login")
password = GetPassword(preference, "ssh_password", "Enter SSH password:", false, resetPassword)
return password, nil
}
keyboardInteractive := func (user, instruction string, questions []string, echos []bool) (answers []string,
err error) {
if len(questions) == 1 {
LOG_DEBUG("SSH_INTERACTIVE", "Attempting keyboard interactive login")
password = GetPassword(preference, "ssh_password", "Enter SSH password:", false, resetPassword)
answers = []string { password }
return answers, nil
} else {
return nil, nil
}
}
keyboardInteractive := func(user, instruction string, questions []string, echos []bool) (answers []string,
err error) {
if len(questions) == 1 {
LOG_DEBUG("SSH_INTERACTIVE", "Attempting keyboard interactive login")
password = GetPassword(preference, "ssh_password", "Enter SSH password:", false, resetPassword)
answers = []string{password}
return answers, nil
} else {
return nil, nil
}
}
publicKeysCallback := func() ([]ssh.Signer, error) {
LOG_DEBUG("SSH_PUBLICKEY", "Attempting public key authentication")
publicKeysCallback := func() ([]ssh.Signer, error) {
LOG_DEBUG("SSH_PUBLICKEY", "Attempting public key authentication")
signers := []ssh.Signer {}
signers := []ssh.Signer{}
agentSock := os.Getenv("SSH_AUTH_SOCK")
if agentSock != "" {
connection, err := net.Dial("unix", agentSock)
// TODO: looks like we need to close the connection
if err == nil {
LOG_DEBUG("SSH_AGENT", "Attempting public key authentication via agent")
sshAgent := agent.NewClient(connection)
signers, err = sshAgent.Signers()
if err != nil {
LOG_DEBUG("SSH_AGENT", "Can't log in using public key authentication via agent: %v", err)
}
}
}
agentSock := os.Getenv("SSH_AUTH_SOCK")
if agentSock != "" {
connection, err := net.Dial("unix", agentSock)
// TODO: looks like we need to close the connection
if err == nil {
LOG_DEBUG("SSH_AGENT", "Attempting public key authentication via agent")
sshAgent := agent.NewClient(connection)
signers, err = sshAgent.Signers()
if err != nil {
LOG_DEBUG("SSH_AGENT", "Can't log in using public key authentication via agent: %v", err)
}
}
}
keyFile = GetPassword(preference, "ssh_key_file", "Enter the path of the private key file:",
true, resetPassword)
keyFile = GetPassword(preference, "ssh_key_file", "Enter the path of the private key file:",
true, resetPassword)
var key ssh.Signer
var err error
var key ssh.Signer
var err error
if keyFile == "" {
LOG_INFO("SSH_PUBLICKEY", "No private key file is provided")
} else {
var content []byte
content, err = ioutil.ReadFile(keyFile)
if err != nil {
LOG_INFO("SSH_PUBLICKEY", "Failed to read the private key file: %v", err)
} else {
key, err = ssh.ParsePrivateKey(content)
if err != nil {
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the private key file %s: %v", keyFile, err)
}
}
}
if keyFile == "" {
LOG_INFO("SSH_PUBLICKEY", "No private key file is provided")
} else {
var content []byte
content, err = ioutil.ReadFile(keyFile)
if err != nil {
LOG_INFO("SSH_PUBLICKEY", "Failed to read the private key file: %v", err)
} else {
key, err = ssh.ParsePrivateKey(content)
if err != nil {
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the private key file %s: %v", keyFile, err)
}
}
}
if key != nil {
signers = append(signers, key)
}
if key != nil {
signers = append(signers, key)
}
if len(signers) > 0 {
return signers, nil
} else {
return nil, err
}
if len(signers) > 0 {
return signers, nil
} else {
return nil, err
}
}
}
authMethods := [] ssh.AuthMethod {
}
passwordAuthMethods := [] ssh.AuthMethod {
ssh.PasswordCallback(passwordCallback),
ssh.KeyboardInteractive(keyboardInteractive),
}
keyFileAuthMethods := [] ssh.AuthMethod {
ssh.PublicKeysCallback(publicKeysCallback),
}
if keyFile != "" {
authMethods = append(keyFileAuthMethods, passwordAuthMethods...)
} else {
authMethods = append(passwordAuthMethods, keyFileAuthMethods...)
}
authMethods := []ssh.AuthMethod{}
passwordAuthMethods := []ssh.AuthMethod{
ssh.PasswordCallback(passwordCallback),
ssh.KeyboardInteractive(keyboardInteractive),
}
keyFileAuthMethods := []ssh.AuthMethod{
ssh.PublicKeysCallback(publicKeysCallback),
}
if keyFile != "" {
authMethods = append(keyFileAuthMethods, passwordAuthMethods...)
} else {
authMethods = append(passwordAuthMethods, keyFileAuthMethods...)
}
if RunInBackground {
if RunInBackground {
passwordKey := "ssh_password"
keyFileKey := "ssh_key_file"
if preference.Name != "default" {
passwordKey = preference.Name + "_" + passwordKey
keyFileKey = preference.Name + "_" + keyFileKey
}
passwordKey := "ssh_password"
keyFileKey := "ssh_key_file"
if preference.Name != "default" {
passwordKey = preference.Name + "_" + passwordKey
keyFileKey = preference.Name + "_" + keyFileKey
}
authMethods = [] ssh.AuthMethod {}
if keyringGet(passwordKey) != "" {
authMethods = append(authMethods, ssh.PasswordCallback(passwordCallback))
authMethods = append(authMethods, ssh.KeyboardInteractive(keyboardInteractive))
}
if keyringGet(keyFileKey) != "" || os.Getenv("SSH_AUTH_SOCK") != "" {
authMethods = append(authMethods, ssh.PublicKeysCallback(publicKeysCallback))
}
}
authMethods = []ssh.AuthMethod{}
if keyringGet(passwordKey) != "" {
authMethods = append(authMethods, ssh.PasswordCallback(passwordCallback))
authMethods = append(authMethods, ssh.KeyboardInteractive(keyboardInteractive))
}
if keyringGet(keyFileKey) != "" || os.Getenv("SSH_AUTH_SOCK") != "" {
authMethods = append(authMethods, ssh.PublicKeysCallback(publicKeysCallback))
}
}
hostKeyChecker := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
return checkHostKey(hostname, remote, key)
}
hostKeyChecker := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
return checkHostKey(hostname, remote, key)
}
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, authMethods, hostKeyChecker, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the SFTP storage at %s: %v", storageURL, err)
return nil
}
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, authMethods, hostKeyChecker, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the SFTP storage at %s: %v", storageURL, err)
return nil
}
if keyFile != "" {
SavePassword(preference, "ssh_key_file", keyFile)
} else if password != "" {
SavePassword(preference, "ssh_password", password)
}
return sftpStorage
} else if matched[1] == "s3" || matched[1] == "s3c" || matched[1] == "minio" || matched[1] == "minios" {
if keyFile != "" {
SavePassword(preference, "ssh_key_file", keyFile)
} else if password != "" {
SavePassword(preference, "ssh_password", password)
}
return sftpStorage
} else if matched[1] == "s3" || matched[1] == "s3c" || matched[1] == "minio" || matched[1] == "minios" {
// urlRegex := regexp.MustCompile(`^(\w+)://([\w\-]+@)?([^/]+)(/(.+))?`)
// urlRegex := regexp.MustCompile(`^(\w+)://([\w\-]+@)?([^/]+)(/(.+))?`)
region := matched[2]
endpoint := matched[3]
bucket := matched[5]
region := matched[2]
endpoint := matched[3]
bucket := matched[5]
if region != "" {
region = region[:len(region) - 1]
}
if region != "" {
region = region[:len(region)-1]
}
if strings.EqualFold(endpoint, "amazon") || strings.EqualFold(endpoint, "amazon.com") {
endpoint = ""
}
if strings.EqualFold(endpoint, "amazon") || strings.EqualFold(endpoint, "amazon.com") {
endpoint = ""
}
storageDir := ""
if strings.Contains(bucket, "/") {
firstSlash := strings.Index(bucket, "/")
storageDir = bucket[firstSlash + 1:]
bucket = bucket[:firstSlash]
}
storageDir := ""
if strings.Contains(bucket, "/") {
firstSlash := strings.Index(bucket, "/")
storageDir = bucket[firstSlash+1:]
bucket = bucket[:firstSlash]
}
accessKey := GetPassword(preference, "s3_id", "Enter S3 Access Key ID:", true, resetPassword)
secretKey := GetPassword(preference, "s3_secret", "Enter S3 Secret Access Key:", true, resetPassword)
accessKey := GetPassword(preference, "s3_id", "Enter S3 Access Key ID:", true, resetPassword)
secretKey := GetPassword(preference, "s3_secret", "Enter S3 Secret Access Key:", true, resetPassword)
var err error
var err error
if matched[1] == "s3c" {
storage, err = CreateS3CStorage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3C storage at %s: %v", storageURL, err)
return nil
}
} else {
isMinioCompatible := (matched[1] == "minio" || matched[1] == "minios")
isSSLSupported := (matched[1] == "s3" || matched[1] == "minios")
storage, err = CreateS3Storage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads, isSSLSupported, isMinioCompatible)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3 storage at %s: %v", storageURL, err)
return nil
}
}
SavePassword(preference, "s3_id", accessKey)
SavePassword(preference, "s3_secret", secretKey)
if matched[1] == "s3c" {
storage, err = CreateS3CStorage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3C storage at %s: %v", storageURL, err)
return nil
}
} else {
isMinioCompatible := (matched[1] == "minio" || matched[1] == "minios")
isSSLSupported := (matched[1] == "s3" || matched[1] == "minios")
storage, err = CreateS3Storage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads, isSSLSupported, isMinioCompatible)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3 storage at %s: %v", storageURL, err)
return nil
}
}
SavePassword(preference, "s3_id", accessKey)
SavePassword(preference, "s3_secret", secretKey)
return storage
} else if matched[1] == "dropbox" {
storageDir := matched[3] + matched[5]
token := GetPassword(preference, "dropbox_token", "Enter Dropbox access token:", true, resetPassword)
dropboxStorage, err := CreateDropboxStorage(token, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the dropbox storage: %v", err)
return nil
}
SavePassword(preference, "dropbox_token", token)
return dropboxStorage
} else if matched[1] == "b2" {
bucket := matched[3]
return storage
} else if matched[1] == "dropbox" {
storageDir := matched[3] + matched[5]
token := GetPassword(preference, "dropbox_token", "Enter Dropbox access token:", true, resetPassword)
dropboxStorage, err := CreateDropboxStorage(token, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the dropbox storage: %v", err)
return nil
}
SavePassword(preference, "dropbox_token", token)
return dropboxStorage
} else if matched[1] == "b2" {
bucket := matched[3]
accountID := GetPassword(preference, "b2_id", "Enter Backblaze Account ID:", true, resetPassword)
applicationKey := GetPassword(preference, "b2_key", "Enter Backblaze Application Key:", true, resetPassword)
accountID := GetPassword(preference, "b2_id", "Enter Backblaze Account ID:", true, resetPassword)
applicationKey := GetPassword(preference, "b2_key", "Enter Backblaze Application Key:", true, resetPassword)
b2Storage, err := CreateB2Storage(accountID, applicationKey, bucket, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "b2_id", accountID)
SavePassword(preference, "b2_key", applicationKey)
return b2Storage
} else if matched[1] == "azure" {
account := matched[3]
container := matched[5]
b2Storage, err := CreateB2Storage(accountID, applicationKey, bucket, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "b2_id", accountID)
SavePassword(preference, "b2_key", applicationKey)
return b2Storage
} else if matched[1] == "azure" {
account := matched[3]
container := matched[5]
if container == "" {
LOG_ERROR("STORAGE_CREATE", "The container name for the Azure storage can't be empty")
return nil
}
if container == "" {
LOG_ERROR("STORAGE_CREATE", "The container name for the Azure storage can't be empty")
return nil
}
prompt := fmt.Sprintf("Enter the Access Key for the Azure storage account %s:", account)
accessKey := GetPassword(preference, "azure_key", prompt, true, resetPassword)
prompt := fmt.Sprintf("Enter the Access Key for the Azure storage account %s:", account)
accessKey := GetPassword(preference, "azure_key", prompt, true, resetPassword)
azureStorage, err := CreateAzureStorage(account, accessKey, container, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Azure storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "azure_key", accessKey)
return azureStorage
} else if matched[1] == "acd" {
storagePath := matched[3] + matched[4]
prompt := fmt.Sprintf("Enter the path of the Amazon Cloud Drive token file (downloadable from https://duplicacy.com/acd_start):")
tokenFile := GetPassword(preference, "acd_token", prompt, true, resetPassword)
acdStorage, err := CreateACDStorage(tokenFile, storagePath, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Amazon Cloud Drive storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "acd_token", tokenFile)
return acdStorage
} else if matched[1] == "gcs" {
bucket := matched[3]
storageDir := matched[5]
prompt := fmt.Sprintf("Enter the path of the Google Cloud Storage token file (downloadable from https://duplicacy.com/gcs_start) or the service account credential file:")
tokenFile := GetPassword(preference, "gcs_token", prompt, true, resetPassword)
gcsStorage, err := CreateGCSStorage(tokenFile, bucket, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Cloud Storage backend at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "gcs_token", tokenFile)
return gcsStorage
} else if matched[1] == "gcd" {
storagePath := matched[3] + matched[4]
prompt := fmt.Sprintf("Enter the path of the Google Drive token file (downloadable from https://duplicacy.com/gcd_start):")
tokenFile := GetPassword(preference, "gcd_token", prompt, true, resetPassword)
gcdStorage, err := CreateGCDStorage(tokenFile, storagePath, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Drive storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "gcd_token", tokenFile)
return gcdStorage
} else if matched[1] == "one" {
storagePath := matched[3] + matched[4]
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
tokenFile := GetPassword(preference, "one_token", prompt, true, resetPassword)
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, storagePath, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "one_token", tokenFile)
return oneDriveStorage
} else if matched[1] == "hubic" {
storagePath := matched[3] + matched[4]
prompt := fmt.Sprintf("Enter the path of the Hubic token file (downloadable from https://duplicacy.com/hubic_start):")
tokenFile := GetPassword(preference, "hubic_token", prompt, true, resetPassword)
hubicStorage, err := CreateHubicStorage(tokenFile, storagePath, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Hubic storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "hubic_token", tokenFile)
return hubicStorage
} else {
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
return nil
}
azureStorage, err := CreateAzureStorage(account, accessKey, container, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Azure storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "azure_key", accessKey)
return azureStorage
} else if matched[1] == "acd" {
storagePath := matched[3] + matched[4]
prompt := fmt.Sprintf("Enter the path of the Amazon Cloud Drive token file (downloadable from https://duplicacy.com/acd_start):")
tokenFile := GetPassword(preference, "acd_token", prompt, true, resetPassword)
acdStorage, err := CreateACDStorage(tokenFile, storagePath, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Amazon Cloud Drive storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "acd_token", tokenFile)
return acdStorage
} else if matched[1] == "gcs" {
bucket := matched[3]
storageDir := matched[5]
prompt := fmt.Sprintf("Enter the path of the Google Cloud Storage token file (downloadable from https://duplicacy.com/gcs_start) or the service account credential file:")
tokenFile := GetPassword(preference, "gcs_token", prompt, true, resetPassword)
gcsStorage, err := CreateGCSStorage(tokenFile, bucket, storageDir, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Cloud Storage backend at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "gcs_token", tokenFile)
return gcsStorage
} else if matched[1] == "gcd" {
storagePath := matched[3] + matched[4]
prompt := fmt.Sprintf("Enter the path of the Google Drive token file (downloadable from https://duplicacy.com/gcd_start):")
tokenFile := GetPassword(preference, "gcd_token", prompt, true, resetPassword)
gcdStorage, err := CreateGCDStorage(tokenFile, storagePath, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Drive storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "gcd_token", tokenFile)
return gcdStorage
} else if matched[1] == "one" {
storagePath := matched[3] + matched[4]
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
tokenFile := GetPassword(preference, "one_token", prompt, true, resetPassword)
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, storagePath, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "one_token", tokenFile)
return oneDriveStorage
} else if matched[1] == "hubic" {
storagePath := matched[3] + matched[4]
prompt := fmt.Sprintf("Enter the path of the Hubic token file (downloadable from https://duplicacy.com/hubic_start):")
tokenFile := GetPassword(preference, "hubic_token", prompt, true, resetPassword)
hubicStorage, err := CreateHubicStorage(tokenFile, storagePath, threads)
if err != nil {
LOG_ERROR("STORAGE_CREATE", "Failed to load the Hubic storage at %s: %v", storageURL, err)
return nil
}
SavePassword(preference, "hubic_token", tokenFile)
return hubicStorage
} else {
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
return nil
}
}

View File

@@ -5,22 +5,22 @@
package duplicacy
import (
"os"
"fmt"
"time"
"flag"
"path"
"testing"
"strings"
"strconv"
"io/ioutil"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"runtime/debug"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"os"
"path"
"runtime/debug"
"strconv"
"strings"
"testing"
"time"
crypto_rand "crypto/rand"
"math/rand"
crypto_rand "crypto/rand"
"math/rand"
)
var testStorageName string
@@ -30,492 +30,491 @@ var testThreads int
var testFixedChunkSize bool
func init() {
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
flag.IntVar(&testRateLimit, "limit-rate", 0, "maximum transfer speed in kbytes/sec")
flag.BoolVar(&testQuickMode, "quick", false, "quick test")
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
flag.Parse()
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
flag.IntVar(&testRateLimit, "limit-rate", 0, "maximum transfer speed in kbytes/sec")
flag.BoolVar(&testQuickMode, "quick", false, "quick test")
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
flag.Parse()
}
func loadStorage(localStoragePath string, threads int) (Storage, error) {
if testStorageName == "" || testStorageName == "file" {
return CreateFileStorage(localStoragePath, 2, false, threads)
}
if testStorageName == "" || testStorageName == "file" {
return CreateFileStorage(localStoragePath, 2, false, threads)
}
config, err := ioutil.ReadFile("test_storage.conf")
if err != nil {
return nil, err
}
config, err := ioutil.ReadFile("test_storage.conf")
if err != nil {
return nil, err
}
storages := make(map[string]map[string]string)
storages := make(map[string]map[string]string)
err = json.Unmarshal(config, &storages)
if err != nil {
return nil, err
}
err = json.Unmarshal(config, &storages)
if err != nil {
return nil, err
}
storage, found := storages[testStorageName]
if !found {
return nil, fmt.Errorf("No storage named '%s' found", testStorageName)
}
storage, found := storages[testStorageName]
if !found {
return nil, fmt.Errorf("No storage named '%s' found", testStorageName)
}
if testStorageName == "flat" {
return CreateFileStorage(localStoragePath, 0, false, threads)
} else if testStorageName == "samba" {
return CreateFileStorage(localStoragePath, 2, true, threads)
} else if testStorageName == "sftp" {
port, _ := strconv.Atoi(storage["port"])
return CreateSFTPStorageWithPassword(storage["server"], port, storage["username"], storage["directory"], storage["password"], threads)
} else if testStorageName == "s3" || testStorageName == "wasabi" {
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, false)
} else if testStorageName == "s3c" {
return CreateS3CStorage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads)
} else if testStorageName == "minio" {
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, false, true)
} else if testStorageName == "minios" {
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, true)
} else if testStorageName == "dropbox" {
return CreateDropboxStorage(storage["token"], storage["directory"], threads)
} else if testStorageName == "b2" {
return CreateB2Storage(storage["account"], storage["key"], storage["bucket"], threads)
} else if testStorageName == "gcs-s3" {
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, false)
} else if testStorageName == "gcs" {
return CreateGCSStorage(storage["token_file"], storage["bucket"], storage["directory"], threads)
} else if testStorageName == "gcs-sa" {
return CreateGCSStorage(storage["token_file"], storage["bucket"], storage["directory"], threads)
} else if testStorageName == "azure" {
return CreateAzureStorage(storage["account"], storage["key"], storage["container"], threads)
} else if testStorageName == "acd" {
return CreateACDStorage(storage["token_file"], storage["storage_path"], threads)
} else if testStorageName == "gcd" {
return CreateGCDStorage(storage["token_file"], storage["storage_path"], threads)
} else if testStorageName == "one" {
return CreateOneDriveStorage(storage["token_file"], storage["storage_path"], threads)
} else if testStorageName == "hubic" {
return CreateHubicStorage(storage["token_file"], storage["storage_path"], threads)
} else {
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
}
if testStorageName == "flat" {
return CreateFileStorage(localStoragePath, 0, false, threads)
} else if testStorageName == "samba" {
return CreateFileStorage(localStoragePath, 2, true, threads)
} else if testStorageName == "sftp" {
port, _ := strconv.Atoi(storage["port"])
return CreateSFTPStorageWithPassword(storage["server"], port, storage["username"], storage["directory"], storage["password"], threads)
} else if testStorageName == "s3" || testStorageName == "wasabi" {
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, false)
} else if testStorageName == "s3c" {
return CreateS3CStorage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads)
} else if testStorageName == "minio" {
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, false, true)
} else if testStorageName == "minios" {
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, true)
} else if testStorageName == "dropbox" {
return CreateDropboxStorage(storage["token"], storage["directory"], threads)
} else if testStorageName == "b2" {
return CreateB2Storage(storage["account"], storage["key"], storage["bucket"], threads)
} else if testStorageName == "gcs-s3" {
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, false)
} else if testStorageName == "gcs" {
return CreateGCSStorage(storage["token_file"], storage["bucket"], storage["directory"], threads)
} else if testStorageName == "gcs-sa" {
return CreateGCSStorage(storage["token_file"], storage["bucket"], storage["directory"], threads)
} else if testStorageName == "azure" {
return CreateAzureStorage(storage["account"], storage["key"], storage["container"], threads)
} else if testStorageName == "acd" {
return CreateACDStorage(storage["token_file"], storage["storage_path"], threads)
} else if testStorageName == "gcd" {
return CreateGCDStorage(storage["token_file"], storage["storage_path"], threads)
} else if testStorageName == "one" {
return CreateOneDriveStorage(storage["token_file"], storage["storage_path"], threads)
} else if testStorageName == "hubic" {
return CreateHubicStorage(storage["token_file"], storage["storage_path"], threads)
} else {
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
}
}
func cleanStorage(storage Storage) {
directories := make([]string, 0, 1024)
snapshots := make([]string, 0, 1024)
directories := make([]string, 0, 1024)
snapshots := make([]string, 0, 1024)
directories = append(directories, "snapshots/")
directories = append(directories, "snapshots/")
LOG_INFO("STORAGE_LIST", "Listing snapshots in the storage")
for len(directories) > 0 {
LOG_INFO("STORAGE_LIST", "Listing snapshots in the storage")
for len(directories) > 0 {
dir := directories[len(directories) - 1]
directories = directories[:len(directories) - 1]
dir := directories[len(directories)-1]
directories = directories[:len(directories)-1]
files, _, err := storage.ListFiles(0, dir)
if err != nil {
LOG_ERROR("STORAGE_LIST", "Failed to list the directory %s: %v", dir, err)
return
}
files, _, err := storage.ListFiles(0, dir)
if err != nil {
LOG_ERROR("STORAGE_LIST", "Failed to list the directory %s: %v", dir, err)
return
}
for _, file := range files {
if len(file) > 0 && file[len(file) - 1] == '/' {
directories = append(directories, dir + file)
} else {
snapshots = append(snapshots, dir + file)
}
}
}
for _, file := range files {
if len(file) > 0 && file[len(file)-1] == '/' {
directories = append(directories, dir+file)
} else {
snapshots = append(snapshots, dir+file)
}
}
}
LOG_INFO("STORAGE_DELETE", "Deleting %d snapshots in the storage", len(snapshots))
for _, snapshot := range snapshots {
storage.DeleteFile(0, snapshot)
}
LOG_INFO("STORAGE_DELETE", "Deleting %d snapshots in the storage", len(snapshots))
for _, snapshot := range snapshots {
storage.DeleteFile(0, snapshot)
}
for _, chunk := range listChunks(storage) {
storage.DeleteFile(0, "chunks/" + chunk)
}
for _, chunk := range listChunks(storage) {
storage.DeleteFile(0, "chunks/"+chunk)
}
storage.DeleteFile(0, "config")
storage.DeleteFile(0, "config")
return
return
}
func listChunks(storage Storage) (chunks []string) {
directories := make([]string, 0, 1024)
directories := make([]string, 0, 1024)
directories = append(directories, "chunks/")
directories = append(directories, "chunks/")
for len(directories) > 0 {
for len(directories) > 0 {
dir := directories[len(directories) - 1]
directories = directories[:len(directories) - 1]
dir := directories[len(directories)-1]
directories = directories[:len(directories)-1]
files, _, err := storage.ListFiles(0, dir)
if err != nil {
LOG_ERROR("CHUNK_LIST", "Failed to list the directory %s: %v", dir, err)
return nil
}
files, _, err := storage.ListFiles(0, dir)
if err != nil {
LOG_ERROR("CHUNK_LIST", "Failed to list the directory %s: %v", dir, err)
return nil
}
for _, file := range files {
if len(file) > 0 && file[len(file) - 1] == '/' {
directories = append(directories, dir + file)
} else {
chunk := dir + file
chunk = chunk[len("chunks/"):]
chunks = append(chunks, chunk)
}
}
}
for _, file := range files {
if len(file) > 0 && file[len(file)-1] == '/' {
directories = append(directories, dir+file)
} else {
chunk := dir + file
chunk = chunk[len("chunks/"):]
chunks = append(chunks, chunk)
}
}
}
return
return
}
func moveChunk(t *testing.T, storage Storage, chunkID string, isFossil bool, delay int) {
filePath, exist, _, err := storage.FindChunk(0, chunkID, isFossil)
filePath, exist, _, err := storage.FindChunk(0, chunkID, isFossil)
if err != nil {
t.Errorf("Error find chunk %s: %v", chunkID, err)
return
}
if err != nil {
t.Errorf("Error find chunk %s: %v", chunkID, err)
return
}
to := filePath + ".fsl"
if isFossil {
to = filePath[:len(filePath) - len(".fsl")]
}
to := filePath + ".fsl"
if isFossil {
to = filePath[:len(filePath)-len(".fsl")]
}
err = storage.MoveFile(0, filePath, to)
if err != nil {
t.Errorf("Error renaming file %s to %s: %v", filePath, to, err)
}
err = storage.MoveFile(0, filePath, to)
if err != nil {
t.Errorf("Error renaming file %s to %s: %v", filePath, to, err)
}
time.Sleep(time.Duration(delay) * time.Second)
time.Sleep(time.Duration(delay) * time.Second)
_, exist, _, err = storage.FindChunk(0, chunkID, isFossil)
if err != nil {
t.Errorf("Error get file info for chunk %s: %v", chunkID, err)
}
_, exist, _, err = storage.FindChunk(0, chunkID, isFossil)
if err != nil {
t.Errorf("Error get file info for chunk %s: %v", chunkID, err)
}
if exist {
t.Errorf("File %s still exists after renaming", filePath)
}
if exist {
t.Errorf("File %s still exists after renaming", filePath)
}
_, exist, _, err = storage.FindChunk(0, chunkID, !isFossil)
if err != nil {
t.Errorf("Error get file info for %s: %v", to, err)
}
_, exist, _, err = storage.FindChunk(0, chunkID, !isFossil)
if err != nil {
t.Errorf("Error get file info for %s: %v", to, err)
}
if !exist {
t.Errorf("File %s doesn't exist", to)
}
if !exist {
t.Errorf("File %s doesn't exist", to)
}
}
func TestStorage(t *testing.T) {
rand.Seed(time.Now().UnixNano())
setTestingT(t)
SetLoggingLevel(INFO)
rand.Seed(time.Now().UnixNano())
setTestingT(t)
SetLoggingLevel(INFO)
defer func() {
if r := recover(); r != nil {
switch e := r.(type) {
case Exception:
t.Errorf("%s %s", e.LogID, e.Message)
debug.PrintStack()
default:
t.Errorf("%v", e)
debug.PrintStack()
}
}
} ()
defer func() {
if r := recover(); r != nil {
switch e := r.(type) {
case Exception:
t.Errorf("%s %s", e.LogID, e.Message)
debug.PrintStack()
default:
t.Errorf("%v", e)
debug.PrintStack()
}
}
}()
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
storage, err := loadStorage(testDir, 1)
if err != nil {
t.Errorf("Failed to create storage: %v", err)
return
}
storage.EnableTestMode()
storage.SetRateLimits(testRateLimit, testRateLimit)
storage, err := loadStorage(testDir, 1)
if err != nil {
t.Errorf("Failed to create storage: %v", err)
return
}
storage.EnableTestMode()
storage.SetRateLimits(testRateLimit, testRateLimit)
delay := 0
if _, ok := storage.(*ACDStorage); ok {
delay = 5
}
if _, ok := storage.(*HubicStorage); ok {
delay = 2
}
delay := 0
if _, ok := storage.(*ACDStorage); ok {
delay = 5
}
if _, ok := storage.(*HubicStorage); ok {
delay = 2
}
for _, dir := range []string { "chunks", "snapshots" } {
err = storage.CreateDirectory(0, dir)
if err != nil {
t.Errorf("Failed to create directory %s: %v", dir, err)
return
}
}
for _, dir := range []string{"chunks", "snapshots"} {
err = storage.CreateDirectory(0, dir)
if err != nil {
t.Errorf("Failed to create directory %s: %v", dir, err)
return
}
}
storage.CreateDirectory(0, "snapshots/repository1")
storage.CreateDirectory(0, "snapshots/repository2")
time.Sleep(time.Duration(delay) * time.Second)
{
storage.CreateDirectory(0, "snapshots/repository1")
storage.CreateDirectory(0, "snapshots/repository2")
time.Sleep(time.Duration(delay) * time.Second)
{
// Upload fake snapshot files so that for storages having no concept of directories,
// ListFiles("snapshots") still returns correct snapshot IDs.
// Upload fake snapshot files so that for storages having no concept of directories,
// ListFiles("snapshots") still returns correct snapshot IDs.
// Create a random file not a text file to make ACD Storage happy.
content := make([]byte, 100)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
// Create a random file not a text file to make ACD Storage happy.
content := make([]byte, 100)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
err = storage.UploadFile(0, "snapshots/repository1/1", content)
if err != nil {
t.Errorf("Error to upload snapshots/repository1/1: %v", err)
}
err = storage.UploadFile(0, "snapshots/repository1/1", content)
if err != nil {
t.Errorf("Error to upload snapshots/repository1/1: %v", err)
}
err = storage.UploadFile(0, "snapshots/repository2/1", content)
if err != nil {
t.Errorf("Error to upload snapshots/repository2/1: %v", err)
}
}
err = storage.UploadFile(0, "snapshots/repository2/1", content)
if err != nil {
t.Errorf("Error to upload snapshots/repository2/1: %v", err)
}
}
time.Sleep(time.Duration(delay) * time.Second)
time.Sleep(time.Duration(delay) * time.Second)
snapshotDirs, _, err := storage.ListFiles(0, "snapshots/")
if err != nil {
t.Errorf("Failed to list snapshot ids: %v", err)
return
}
snapshotDirs, _, err := storage.ListFiles(0, "snapshots/")
if err != nil {
t.Errorf("Failed to list snapshot ids: %v", err)
return
}
snapshotIDs := []string {}
for _, snapshotDir := range snapshotDirs {
if len(snapshotDir) > 0 && snapshotDir[len(snapshotDir) - 1] == '/' {
snapshotIDs = append(snapshotIDs, snapshotDir[:len(snapshotDir) - 1])
}
}
snapshotIDs := []string{}
for _, snapshotDir := range snapshotDirs {
if len(snapshotDir) > 0 && snapshotDir[len(snapshotDir)-1] == '/' {
snapshotIDs = append(snapshotIDs, snapshotDir[:len(snapshotDir)-1])
}
}
if len(snapshotIDs) < 2 {
t.Errorf("Snapshot directories not created")
return
}
if len(snapshotIDs) < 2 {
t.Errorf("Snapshot directories not created")
return
}
for _, snapshotID := range snapshotIDs {
snapshots, _, err := storage.ListFiles(0, "snapshots/" + snapshotID)
if err != nil {
t.Errorf("Failed to list snapshots for %s: %v", snapshotID, err)
return
}
for _, snapshot := range snapshots {
storage.DeleteFile(0, "snapshots/" + snapshotID + "/" + snapshot)
}
}
for _, snapshotID := range snapshotIDs {
snapshots, _, err := storage.ListFiles(0, "snapshots/"+snapshotID)
if err != nil {
t.Errorf("Failed to list snapshots for %s: %v", snapshotID, err)
return
}
for _, snapshot := range snapshots {
storage.DeleteFile(0, "snapshots/"+snapshotID+"/"+snapshot)
}
}
time.Sleep(time.Duration(delay) * time.Second)
time.Sleep(time.Duration(delay) * time.Second)
storage.DeleteFile(0, "config")
storage.DeleteFile(0, "config")
for _, file := range []string { "snapshots/repository1/1", "snapshots/repository2/1"} {
exist, _, _, err := storage.GetFileInfo(0, file)
if err != nil {
t.Errorf("Failed to get file info for %s: %v", file, err)
return
}
if exist {
t.Errorf("File %s still exists after deletion", file)
return
}
}
for _, file := range []string{"snapshots/repository1/1", "snapshots/repository2/1"} {
exist, _, _, err := storage.GetFileInfo(0, file)
if err != nil {
t.Errorf("Failed to get file info for %s: %v", file, err)
return
}
if exist {
t.Errorf("File %s still exists after deletion", file)
return
}
}
numberOfFiles := 20
maxFileSize := 64 * 1024
numberOfFiles := 20
maxFileSize := 64 * 1024
if testQuickMode {
numberOfFiles = 2
}
if testQuickMode {
numberOfFiles = 2
}
chunks := []string{}
chunks := []string{}
for i := 0; i < numberOfFiles; i++ {
content := make([]byte, rand.Int() % maxFileSize + 1)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
for i := 0; i < numberOfFiles; i++ {
content := make([]byte, rand.Int()%maxFileSize+1)
_, err = crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
hasher := sha256.New()
hasher.Write(content)
chunkID := hex.EncodeToString(hasher.Sum(nil))
chunks = append(chunks, chunkID)
hasher := sha256.New()
hasher.Write(content)
chunkID := hex.EncodeToString(hasher.Sum(nil))
chunks = append(chunks, chunkID)
filePath, exist, _, err := storage.FindChunk(0, chunkID, false)
if err != nil {
t.Errorf("Failed to list the chunk %s: %v", chunkID, err)
return
}
if exist {
t.Errorf("Chunk %s already exists", chunkID)
}
filePath, exist, _, err := storage.FindChunk(0, chunkID, false)
if err != nil {
t.Errorf("Failed to list the chunk %s: %v", chunkID, err)
return
}
if exist {
t.Errorf("Chunk %s already exists", chunkID)
}
err = storage.UploadFile(0, filePath, content)
if err != nil {
t.Errorf("Failed to upload the file %s: %v", filePath, err)
return
}
LOG_INFO("STORAGE_CHUNK", "Uploaded chunk: %s, size: %d", chunkID, len(content))
}
err = storage.UploadFile(0, filePath, content)
if err != nil {
t.Errorf("Failed to upload the file %s: %v", filePath, err)
return
}
LOG_INFO("STORAGE_CHUNK", "Uploaded chunk: %s, size: %d", chunkID, len(content))
}
allChunks := [] string {}
for _, file := range listChunks(storage) {
file = strings.Replace(file, "/", "", -1)
if len(file) == 64 {
allChunks = append(allChunks, file)
}
}
allChunks := []string{}
for _, file := range listChunks(storage) {
file = strings.Replace(file, "/", "", -1)
if len(file) == 64 {
allChunks = append(allChunks, file)
}
}
LOG_INFO("STORAGE_FOSSIL", "Making %s a fossil", chunks[0])
moveChunk(t, storage, chunks[0], false, delay)
LOG_INFO("STORAGE_FOSSIL", "Making %s a chunk", chunks[0])
moveChunk(t, storage, chunks[0], true, delay)
LOG_INFO("STORAGE_FOSSIL", "Making %s a fossil", chunks[0])
moveChunk(t, storage, chunks[0], false, delay)
LOG_INFO("STORAGE_FOSSIL", "Making %s a chunk", chunks[0])
moveChunk(t, storage, chunks[0], true, delay)
config := CreateConfig()
config.MinimumChunkSize = 100
config.chunkPool = make(chan *Chunk, numberOfFiles * 2)
config := CreateConfig()
config.MinimumChunkSize = 100
config.chunkPool = make(chan *Chunk, numberOfFiles*2)
chunk := CreateChunk(config, true)
chunk := CreateChunk(config, true)
for _, chunkID := range chunks {
for _, chunkID := range chunks {
chunk.Reset(false)
filePath, exist, _, err := storage.FindChunk(0, chunkID, false)
if err != nil {
t.Errorf("Error getting file info for chunk %s: %v", chunkID, err)
continue
} else if !exist {
t.Errorf("Chunk %s does not exist", chunkID)
continue
} else {
err = storage.DownloadFile(0, filePath, chunk)
if err != nil {
t.Errorf("Error downloading file %s: %v", filePath, err)
continue
}
LOG_INFO("STORAGE_CHUNK", "Downloaded chunk: %s, size: %d", chunkID, chunk.GetLength())
}
chunk.Reset(false)
filePath, exist, _, err := storage.FindChunk(0, chunkID, false)
if err != nil {
t.Errorf("Error getting file info for chunk %s: %v", chunkID, err)
continue
} else if !exist {
t.Errorf("Chunk %s does not exist", chunkID)
continue
} else {
err = storage.DownloadFile(0, filePath, chunk)
if err != nil {
t.Errorf("Error downloading file %s: %v", filePath, err)
continue
}
LOG_INFO("STORAGE_CHUNK", "Downloaded chunk: %s, size: %d", chunkID, chunk.GetLength())
}
hasher := sha256.New()
hasher.Write(chunk.GetBytes())
hash := hex.EncodeToString(hasher.Sum(nil))
hasher := sha256.New()
hasher.Write(chunk.GetBytes())
hash := hex.EncodeToString(hasher.Sum(nil))
if hash != chunkID {
t.Errorf("File %s, hash %s, size %d", chunkID, hash, chunk.GetBytes())
}
}
if hash != chunkID {
t.Errorf("File %s, hash %s, size %d", chunkID, hash, chunk.GetBytes())
}
}
LOG_INFO("STORAGE_FOSSIL", "Making %s a fossil", chunks[1])
moveChunk(t, storage, chunks[1], false, delay)
LOG_INFO("STORAGE_FOSSIL", "Making %s a fossil", chunks[1])
moveChunk(t, storage, chunks[1], false, delay)
filePath, exist, _, err := storage.FindChunk(0, chunks[1], true)
if err != nil {
t.Errorf("Error getting file info for fossil %s: %v", chunks[1], err)
} else if !exist {
t.Errorf("Fossil %s does not exist", chunks[1])
} else {
err = storage.DeleteFile(0, filePath)
if err != nil {
t.Errorf("Failed to delete file %s: %v", filePath)
} else {
time.Sleep(time.Duration(delay) * time.Second)
filePath, exist, _, err = storage.FindChunk(0, chunks[1], true)
if err != nil {
t.Errorf("Error get file info for deleted fossil %s: %v", chunks[1], err)
} else if exist {
t.Errorf("Fossil %s still exists after deletion", chunks[1])
}
}
}
filePath, exist, _, err := storage.FindChunk(0, chunks[1], true)
if err != nil {
t.Errorf("Error getting file info for fossil %s: %v", chunks[1], err)
} else if !exist {
t.Errorf("Fossil %s does not exist", chunks[1])
} else {
err = storage.DeleteFile(0, filePath)
if err != nil {
t.Errorf("Failed to delete file %s: %v", filePath)
} else {
time.Sleep(time.Duration(delay) * time.Second)
filePath, exist, _, err = storage.FindChunk(0, chunks[1], true)
if err != nil {
t.Errorf("Error get file info for deleted fossil %s: %v", chunks[1], err)
} else if exist {
t.Errorf("Fossil %s still exists after deletion", chunks[1])
}
}
}
for _, file := range allChunks {
for _, file := range allChunks {
err = storage.DeleteFile(0, "chunks/" + file)
if err != nil {
t.Errorf("Failed to delete the file %s: %v", file, err)
return
}
}
err = storage.DeleteFile(0, "chunks/"+file)
if err != nil {
t.Errorf("Failed to delete the file %s: %v", file, err)
return
}
}
}
func TestCleanStorage(t *testing.T) {
setTestingT(t)
SetLoggingLevel(INFO)
setTestingT(t)
SetLoggingLevel(INFO)
defer func() {
if r := recover(); r != nil {
switch e := r.(type) {
case Exception:
t.Errorf("%s %s", e.LogID, e.Message)
debug.PrintStack()
default:
t.Errorf("%v", e)
debug.PrintStack()
}
}
} ()
defer func() {
if r := recover(); r != nil {
switch e := r.(type) {
case Exception:
t.Errorf("%s %s", e.LogID, e.Message)
debug.PrintStack()
default:
t.Errorf("%v", e)
debug.PrintStack()
}
}
}()
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
os.RemoveAll(testDir)
os.MkdirAll(testDir, 0700)
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
storage, err := loadStorage(testDir, 1)
if err != nil {
t.Errorf("Failed to create storage: %v", err)
return
}
storage, err := loadStorage(testDir, 1)
if err != nil {
t.Errorf("Failed to create storage: %v", err)
return
}
directories := make([]string, 0, 1024)
directories = append(directories, "snapshots/")
directories = append(directories, "chunks/")
directories := make([]string, 0, 1024)
directories = append(directories, "snapshots/")
directories = append(directories, "chunks/")
for len(directories) > 0 {
for len(directories) > 0 {
dir := directories[len(directories) - 1]
directories = directories[:len(directories) - 1]
dir := directories[len(directories)-1]
directories = directories[:len(directories)-1]
LOG_INFO("LIST_FILES", "Listing %s", dir)
LOG_INFO("LIST_FILES", "Listing %s", dir)
files, _, err := storage.ListFiles(0, dir)
if err != nil {
LOG_ERROR("LIST_FILES", "Failed to list the directory %s: %v", dir, err)
return
}
files, _, err := storage.ListFiles(0, dir)
if err != nil {
LOG_ERROR("LIST_FILES", "Failed to list the directory %s: %v", dir, err)
return
}
for _, file := range files {
if len(file) > 0 && file[len(file) - 1] == '/' {
directories = append(directories, dir + file)
} else {
storage.DeleteFile(0, dir + file)
LOG_INFO("DELETE_FILE", "Deleted file %s", file)
}
}
}
for _, file := range files {
if len(file) > 0 && file[len(file)-1] == '/' {
directories = append(directories, dir+file)
} else {
storage.DeleteFile(0, dir+file)
LOG_INFO("DELETE_FILE", "Deleted file %s", file)
}
}
}
storage.DeleteFile(0, "config")
LOG_INFO("DELETE_FILE", "Deleted config")
storage.DeleteFile(0, "config")
LOG_INFO("DELETE_FILE", "Deleted config")
}
}

View File

@@ -5,262 +5,262 @@
package duplicacy
import (
"fmt"
"os"
"bufio"
"io"
"time"
"path"
"path/filepath"
"regexp"
"strings"
"strconv"
"runtime"
"crypto/sha256"
"bufio"
"crypto/sha256"
"fmt"
"io"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"time"
"golang.org/x/crypto/pbkdf2"
"github.com/gilbertchen/gopass"
"github.com/gilbertchen/gopass"
"golang.org/x/crypto/pbkdf2"
)
var RunInBackground bool = false
type RateLimitedReader struct {
Content []byte
Rate float64
Next int
StartTime time.Time
Content []byte
Rate float64
Next int
StartTime time.Time
}
var RegexMap map[string]*regexp.Regexp
func init() {
if RegexMap == nil {
RegexMap = make(map[string]*regexp.Regexp)
}
if RegexMap == nil {
RegexMap = make(map[string]*regexp.Regexp)
}
}
func CreateRateLimitedReader(content []byte, rate int) (*RateLimitedReader) {
return &RateLimitedReader {
Content: content,
Rate: float64(rate * 1024),
Next: 0,
}
func CreateRateLimitedReader(content []byte, rate int) *RateLimitedReader {
return &RateLimitedReader{
Content: content,
Rate: float64(rate * 1024),
Next: 0,
}
}
func IsEmptyFilter(pattern string) bool {
if pattern == "+" || pattern == "-" || pattern == "i:" || pattern == "e:" {
return true
} else {
return false
}
if pattern == "+" || pattern == "-" || pattern == "i:" || pattern == "e:" {
return true
} else {
return false
}
}
func IsUnspecifiedFilter(pattern string) bool {
if pattern[0] != '+' && pattern[0] != '-' && pattern[0] != 'i' && pattern[0] != 'e' {
return true
} else {
return false
}
if pattern[0] != '+' && pattern[0] != '-' && pattern[0] != 'i' && pattern[0] != 'e' {
return true
} else {
return false
}
}
func IsValidRegex(pattern string) (valid bool, err error) {
var re *regexp.Regexp = nil
var re *regexp.Regexp = nil
if re, valid = RegexMap[pattern]; valid && re != nil {
return true, nil
}
if re, valid = RegexMap[pattern]; valid && re != nil {
return true, nil
}
re, err = regexp.Compile(pattern)
re, err = regexp.Compile(pattern)
if err != nil {
return false, err
} else {
RegexMap[pattern] = re
LOG_DEBUG("REGEX_STORED", "Saved compiled regex for pattern \"%s\", regex=%#v", pattern, re)
return true, err
}
if err != nil {
return false, err
} else {
RegexMap[pattern] = re
LOG_DEBUG("REGEX_STORED", "Saved compiled regex for pattern \"%s\", regex=%#v", pattern, re)
return true, err
}
}
func (reader *RateLimitedReader) Length() (int64) {
return int64(len(reader.Content))
func (reader *RateLimitedReader) Length() int64 {
return int64(len(reader.Content))
}
func (reader *RateLimitedReader) Reset() {
reader.Next = 0
reader.Next = 0
}
func (reader *RateLimitedReader) Seek(offset int64, whence int) (int64, error) {
if whence == io.SeekStart {
reader.Next = int(offset)
} else if whence == io.SeekCurrent {
reader.Next += int(offset)
} else {
reader.Next = len(reader.Content) - int(offset)
}
return int64(reader.Next), nil
func (reader *RateLimitedReader) Seek(offset int64, whence int) (int64, error) {
if whence == io.SeekStart {
reader.Next = int(offset)
} else if whence == io.SeekCurrent {
reader.Next += int(offset)
} else {
reader.Next = len(reader.Content) - int(offset)
}
return int64(reader.Next), nil
}
func (reader *RateLimitedReader) Read(p []byte) (n int, err error) {
if reader.Next >= len(reader.Content) {
return 0, io.EOF
}
if reader.Next >= len(reader.Content) {
return 0, io.EOF
}
if reader.Rate <= 0 {
n := copy(p, reader.Content[reader.Next:])
reader.Next += n
if reader.Next >= len(reader.Content) {
return n, io.EOF
}
return n, nil
}
if reader.Rate <= 0 {
n := copy(p, reader.Content[reader.Next:])
reader.Next += n
if reader.Next >= len(reader.Content) {
return n, io.EOF
}
return n, nil
}
if reader.StartTime.IsZero() {
reader.StartTime = time.Now()
}
if reader.StartTime.IsZero() {
reader.StartTime = time.Now()
}
elapsed := time.Since(reader.StartTime).Seconds()
delay := float64(reader.Next) / reader.Rate - elapsed
end := reader.Next + int(reader.Rate / 5)
if delay > 0 {
time.Sleep(time.Duration(delay * float64(time.Second)))
} else {
end += - int(delay * reader.Rate)
}
elapsed := time.Since(reader.StartTime).Seconds()
delay := float64(reader.Next)/reader.Rate - elapsed
end := reader.Next + int(reader.Rate/5)
if delay > 0 {
time.Sleep(time.Duration(delay * float64(time.Second)))
} else {
end += -int(delay * reader.Rate)
}
if end > len(reader.Content) {
end = len(reader.Content)
}
if end > len(reader.Content) {
end = len(reader.Content)
}
n = copy(p, reader.Content[reader.Next : end])
reader.Next += n
return n, nil
n = copy(p, reader.Content[reader.Next:end])
reader.Next += n
return n, nil
}
func RateLimitedCopy(writer io.Writer, reader io.Reader, rate int) (written int64, err error) {
if rate <= 0 {
return io.Copy(writer, reader)
}
for range time.Tick(time.Second / 5) {
n, err := io.CopyN(writer, reader, int64(rate * 1024 / 5))
written += n
if err != nil {
if err == io.EOF {
return written, nil
} else {
return written, err
}
}
}
return written, nil
if rate <= 0 {
return io.Copy(writer, reader)
}
for range time.Tick(time.Second / 5) {
n, err := io.CopyN(writer, reader, int64(rate*1024/5))
written += n
if err != nil {
if err == io.EOF {
return written, nil
} else {
return written, err
}
}
}
return written, nil
}
// GenerateKeyFromPassword generates a key from the password.
func GenerateKeyFromPassword(password string) []byte {
return pbkdf2.Key([]byte(password), DEFAULT_KEY, 16384, 32, sha256.New)
return pbkdf2.Key([]byte(password), DEFAULT_KEY, 16384, 32, sha256.New)
}
// Get password from preference, env, but don't start any keyring request
func GetPasswordFromPreference(preference Preference, passwordType string) (string) {
passwordID := passwordType
if preference.Name != "default" {
passwordID = preference.Name + "_" + passwordID
}
func GetPasswordFromPreference(preference Preference, passwordType string) string {
passwordID := passwordType
if preference.Name != "default" {
passwordID = preference.Name + "_" + passwordID
}
{
name := strings.ToUpper("duplicacy_" + passwordID)
LOG_DEBUG("PASSWORD_ENV_VAR", "Reading the environment variable %s", name)
if password, found := os.LookupEnv(name); found && password != "" {
return password
}
}
{
name := strings.ToUpper("duplicacy_" + passwordID)
LOG_DEBUG("PASSWORD_ENV_VAR", "Reading the environment variable %s", name)
if password, found := os.LookupEnv(name); found && password != "" {
return password
}
}
// If the password is stored in the preference, there is no need to include the storage name
// (i.e., preference.Name) in the key, so the key name should really be passwordType rather
// than passwordID; we're using passwordID here only for backward compatibility
if len(preference.Keys) > 0 && len(preference.Keys[passwordID]) > 0 {
LOG_DEBUG("PASSWORD_KEYCHAIN", "Reading %s from preferences", passwordID)
return preference.Keys[passwordID]
}
// If the password is stored in the preference, there is no need to include the storage name
// (i.e., preference.Name) in the key, so the key name should really be passwordType rather
// than passwordID; we're using passwordID here only for backward compatibility
if len(preference.Keys) > 0 && len(preference.Keys[passwordID]) > 0 {
LOG_DEBUG("PASSWORD_KEYCHAIN", "Reading %s from preferences", passwordID)
return preference.Keys[passwordID]
}
if len(preference.Keys) > 0 && len(preference.Keys[passwordType]) > 0 {
LOG_DEBUG("PASSWORD_KEYCHAIN", "Reading %s from preferences", passwordType)
return preference.Keys[passwordType]
}
if len(preference.Keys) > 0 && len(preference.Keys[passwordType]) > 0 {
LOG_DEBUG("PASSWORD_KEYCHAIN", "Reading %s from preferences", passwordType)
return preference.Keys[passwordType]
}
return ""
return ""
}
// GetPassword attempts to get the password from KeyChain/KeyRing, environment variables, or keyboard input.
func GetPassword(preference Preference, passwordType string, prompt string,
showPassword bool, resetPassword bool) (string) {
passwordID := passwordType
password := GetPasswordFromPreference(preference,passwordType)
if password != "" {
return password
}
showPassword bool, resetPassword bool) string {
passwordID := passwordType
password := GetPasswordFromPreference(preference, passwordType)
if password != "" {
return password
}
if preference.Name != "default" {
passwordID = preference.Name + "_" + passwordID
}
if preference.Name != "default" {
passwordID = preference.Name + "_" + passwordID
}
if resetPassword && !RunInBackground {
keyringSet(passwordID, "")
} else {
password := keyringGet(passwordID)
if password != "" {
return password
}
if resetPassword && !RunInBackground {
keyringSet(passwordID, "")
} else {
password := keyringGet(passwordID)
if password != "" {
return password
}
if RunInBackground {
LOG_INFO("PASSWORD_MISSING", "%s is not found in Keychain/Keyring", passwordID)
return ""
}
if RunInBackground {
LOG_INFO("PASSWORD_MISSING", "%s is not found in Keychain/Keyring", passwordID)
return ""
}
}
}
password = ""
fmt.Printf("%s", prompt)
if showPassword {
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
password = scanner.Text()
} else {
passwordInBytes, err := gopass.GetPasswdMasked()
if err != nil {
LOG_ERROR("PASSWORD_READ", "Failed to read the password: %v", err)
return ""
}
password = string(passwordInBytes)
}
password = ""
fmt.Printf("%s", prompt)
if showPassword {
scanner := bufio.NewScanner(os.Stdin)
scanner.Scan()
password = scanner.Text()
} else {
passwordInBytes, err := gopass.GetPasswdMasked()
if err != nil {
LOG_ERROR("PASSWORD_READ", "Failed to read the password: %v", err)
return ""
}
password = string(passwordInBytes)
}
return password
return password
}
// SavePassword saves the specified password in the keyring/keychain.
func SavePassword(preference Preference, passwordType string, password string) {
if password == "" || RunInBackground {
return
}
if password == "" || RunInBackground {
return
}
if preference.DoNotSavePassword {
return
}
if preference.DoNotSavePassword {
return
}
// If the password is retrieved from env or preference, don't save it to keyring
if GetPasswordFromPreference(preference, passwordType) == password {
return
}
// If the password is retrieved from env or preference, don't save it to keyring
if GetPasswordFromPreference(preference, passwordType) == password {
return
}
passwordID := passwordType
if preference.Name != "default" {
passwordID = preference.Name + "_" + passwordID
}
keyringSet(passwordID, password)
passwordID := passwordType
if preference.Name != "default" {
passwordID = preference.Name + "_" + passwordID
}
keyringSet(passwordID, password)
}
// The following code was modified from the online article 'Matching Wildcards: An Algorithm', by Kirk J. Krauss,
@@ -269,61 +269,61 @@ func SavePassword(preference Preference, passwordType string, password string) {
//
func matchPattern(text string, pattern string) bool {
textLength := len(text)
patternLength := len(pattern)
afterLastWildcard := 0
afterLastMatched := 0
textLength := len(text)
patternLength := len(pattern)
afterLastWildcard := 0
afterLastMatched := 0
t := 0
p := 0
t := 0
p := 0
for {
if t >= textLength {
if p >= patternLength {
return true // "x" matches "x"
} else if pattern[p] == '*' {
p++
continue // "x*" matches "x" or "xy"
}
return false // "x" doesn't match "xy"
}
for {
if t >= textLength {
if p >= patternLength {
return true // "x" matches "x"
} else if pattern[p] == '*' {
p++
continue // "x*" matches "x" or "xy"
}
return false // "x" doesn't match "xy"
}
w := byte(0)
if p < patternLength {
w = pattern[p]
}
w := byte(0)
if p < patternLength {
w = pattern[p]
}
if text[t] != w {
if w == '?' {
t++
p++
continue
} else if w == '*' {
p++
afterLastWildcard = p
if p >= patternLength {
return true
}
} else if afterLastWildcard > 0 {
p = afterLastWildcard
t = afterLastMatched
t++
} else {
return false
}
if text[t] != w {
if w == '?' {
t++
p++
continue
} else if w == '*' {
p++
afterLastWildcard = p
if p >= patternLength {
return true
}
} else if afterLastWildcard > 0 {
p = afterLastWildcard
t = afterLastMatched
t++
} else {
return false
}
for t < textLength && text[t] != pattern[p] && pattern[p] != '?' {
t++
}
for t < textLength && text[t] != pattern[p] && pattern[p] != '?' {
t++
}
if t >= textLength {
return false
}
afterLastMatched = t
}
t++
p++
}
if t >= textLength {
return false
}
afterLastMatched = t
}
t++
p++
}
}
@@ -331,129 +331,129 @@ func matchPattern(text string, pattern string) bool {
// either '+' or '-', whereas '-' indicates exclusion and '+' indicates inclusion. Wildcards like '*' and '?' may
// appear in the patterns. In case no matching pattern is found, the file will be excluded if all patterns are
// include patterns, and included otherwise.
func MatchPath(filePath string, patterns [] string) (included bool) {
func MatchPath(filePath string, patterns []string) (included bool) {
var re *regexp.Regexp = nil
var found bool
var matched bool
var re *regexp.Regexp = nil
var found bool
var matched bool
allIncludes := true
allIncludes := true
for _, pattern := range patterns {
if pattern[0] == '+' {
if matchPattern(filePath, pattern[1:]) {
return true
}
} else if pattern[0] == '-' {
allIncludes = false
if matchPattern(filePath, pattern[1:]) {
return false
}
} else if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
if re, found = RegexMap[pattern[2:]]; found {
matched = re.MatchString(filePath)
} else {
re, err := regexp.Compile(pattern)
if err != nil {
LOG_ERROR("REGEX_ERROR", "Invalid regex encountered for pattern \"%s\" - %v", pattern[2:], err)
}
RegexMap[pattern] = re
matched = re.MatchString(filePath)
}
if matched {
return strings.HasPrefix(pattern, "i:")
} else {
if strings.HasPrefix(pattern, "e:") {
allIncludes = false
}
}
}
}
for _, pattern := range patterns {
if pattern[0] == '+' {
if matchPattern(filePath, pattern[1:]) {
return true
}
} else if pattern[0] == '-' {
allIncludes = false
if matchPattern(filePath, pattern[1:]) {
return false
}
} else if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
if re, found = RegexMap[pattern[2:]]; found {
matched = re.MatchString(filePath)
} else {
re, err := regexp.Compile(pattern)
if err != nil {
LOG_ERROR("REGEX_ERROR", "Invalid regex encountered for pattern \"%s\" - %v", pattern[2:], err)
}
RegexMap[pattern] = re
matched = re.MatchString(filePath)
}
if matched {
return strings.HasPrefix(pattern, "i:")
} else {
if strings.HasPrefix(pattern, "e:") {
allIncludes = false
}
}
}
}
return !allIncludes
return !allIncludes
}
func joinPath(components ...string) string {
combinedPath := path.Join(components...)
if len(combinedPath) > 257 && runtime.GOOS == "windows" {
combinedPath = `\\?\` + filepath.Join(components...)
// If the path is on a samba drive we must use the UNC format
if strings.HasPrefix(combinedPath, `\\?\\\`) {
combinedPath = `\\?\UNC\` + combinedPath[6:]
}
}
return combinedPath
combinedPath := path.Join(components...)
if len(combinedPath) > 257 && runtime.GOOS == "windows" {
combinedPath = `\\?\` + filepath.Join(components...)
// If the path is on a samba drive we must use the UNC format
if strings.HasPrefix(combinedPath, `\\?\\\`) {
combinedPath = `\\?\UNC\` + combinedPath[6:]
}
}
return combinedPath
}
func PrettyNumber(number int64) (string) {
func PrettyNumber(number int64) string {
G := int64(1024 * 1024 * 1024)
M := int64(1024 * 1024)
K := int64(1024)
G := int64(1024 * 1024 * 1024)
M := int64(1024 * 1024)
K := int64(1024)
if number > 1000 * G {
return fmt.Sprintf("%dG", number / G)
} else if number > G {
return fmt.Sprintf("%d,%03dM", number / (1000 * M), (number / M) % 1000)
} else if number > M {
return fmt.Sprintf("%d,%03dK", number / (1000 * K), (number / K) % 1000)
} else if number > K {
return fmt.Sprintf("%dK", number / K)
} else {
return fmt.Sprintf("%d", number)
}
if number > 1000*G {
return fmt.Sprintf("%dG", number/G)
} else if number > G {
return fmt.Sprintf("%d,%03dM", number/(1000*M), (number/M)%1000)
} else if number > M {
return fmt.Sprintf("%d,%03dK", number/(1000*K), (number/K)%1000)
} else if number > K {
return fmt.Sprintf("%dK", number/K)
} else {
return fmt.Sprintf("%d", number)
}
}
func PrettySize(size int64) (string) {
if size > 1024 * 1024 {
return fmt.Sprintf("%.2fM", float64(size) / (1024.0 * 1024.0))
} else if size > 1024 {
return fmt.Sprintf("%.0fK", float64(size) / 1024.0)
} else {
return fmt.Sprintf("%d", size)
}
func PrettySize(size int64) string {
if size > 1024*1024 {
return fmt.Sprintf("%.2fM", float64(size)/(1024.0*1024.0))
} else if size > 1024 {
return fmt.Sprintf("%.0fK", float64(size)/1024.0)
} else {
return fmt.Sprintf("%d", size)
}
}
func PrettyTime(seconds int64) (string) {
func PrettyTime(seconds int64) string {
day := int64(3600 * 24)
day := int64(3600 * 24)
if seconds > day * 2 {
return fmt.Sprintf("%d days %02d:%02d:%02d",
seconds / day, (seconds % day) / 3600, (seconds % 3600) / 60, seconds % 60)
} else if seconds > day {
return fmt.Sprintf("1 day %02d:%02d:%02d", (seconds % day) / 3600, (seconds % 3600) / 60, seconds % 60)
} else if seconds > 0 {
return fmt.Sprintf("%02d:%02d:%02d", seconds / 3600, (seconds % 3600) / 60, seconds % 60)
} else {
return "n/a"
}
if seconds > day*2 {
return fmt.Sprintf("%d days %02d:%02d:%02d",
seconds/day, (seconds%day)/3600, (seconds%3600)/60, seconds%60)
} else if seconds > day {
return fmt.Sprintf("1 day %02d:%02d:%02d", (seconds%day)/3600, (seconds%3600)/60, seconds%60)
} else if seconds > 0 {
return fmt.Sprintf("%02d:%02d:%02d", seconds/3600, (seconds%3600)/60, seconds%60)
} else {
return "n/a"
}
}
func AtoSize(sizeString string) (int) {
sizeString = strings.ToLower(sizeString)
func AtoSize(sizeString string) int {
sizeString = strings.ToLower(sizeString)
sizeRegex := regexp.MustCompile(`^([0-9]+)([mk])?$`)
matched := sizeRegex.FindStringSubmatch(sizeString)
if matched == nil {
return 0
}
sizeRegex := regexp.MustCompile(`^([0-9]+)([mk])?$`)
matched := sizeRegex.FindStringSubmatch(sizeString)
if matched == nil {
return 0
}
size, _ := strconv.Atoi(matched[1])
size, _ := strconv.Atoi(matched[1])
if matched[2] == "m" {
size *= 1024 * 1024
} else if matched[2] == "k" {
size *= 1024
}
if matched[2] == "m" {
size *= 1024 * 1024
} else if matched[2] == "k" {
size *= 1024
}
return size
return size
}
func MinInt(x, y int) (int) {
if x < y {
return x
}
return y
func MinInt(x, y int) int {
if x < y {
return x
}
return y
}

View File

@@ -7,79 +7,79 @@
package duplicacy
import (
"os"
"bytes"
"syscall"
"path/filepath"
"bytes"
"os"
"path/filepath"
"syscall"
"github.com/gilbertchen/xattr"
"github.com/gilbertchen/xattr"
)
func Readlink(path string) (isRegular bool, s string, err error) {
s, err = os.Readlink(path)
return false, s, err
s, err = os.Readlink(path)
return false, s, err
}
func GetOwner(entry *Entry, fileInfo *os.FileInfo) {
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
if ok && stat != nil {
entry.UID = int(stat.Uid)
entry.GID = int(stat.Gid)
} else {
entry.UID = -1
entry.GID = -1
}
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
if ok && stat != nil {
entry.UID = int(stat.Uid)
entry.GID = int(stat.Gid)
} else {
entry.UID = -1
entry.GID = -1
}
}
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) (bool) {
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
if ok && stat != nil && (int(stat.Uid) != entry.UID || int(stat.Gid) != entry.GID) {
if entry.UID != -1 && entry.GID != -1 {
err := os.Chown(fullPath, entry.UID, entry.GID)
if err != nil {
LOG_ERROR("RESTORE_CHOWN", "Failed to change uid or gid: %v", err)
return false
}
}
}
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
if ok && stat != nil && (int(stat.Uid) != entry.UID || int(stat.Gid) != entry.GID) {
if entry.UID != -1 && entry.GID != -1 {
err := os.Chown(fullPath, entry.UID, entry.GID)
if err != nil {
LOG_ERROR("RESTORE_CHOWN", "Failed to change uid or gid: %v", err)
return false
}
}
}
return true
return true
}
func (entry *Entry) ReadAttributes(top string) {
fullPath := filepath.Join(top, entry.Path)
attributes, _ := xattr.Listxattr(fullPath)
if len(attributes) > 0 {
entry.Attributes = make(map[string][]byte)
for _, name := range attributes {
attribute, err := xattr.Getxattr(fullPath, name)
if err == nil {
entry.Attributes[name] = attribute
}
}
}
fullPath := filepath.Join(top, entry.Path)
attributes, _ := xattr.Listxattr(fullPath)
if len(attributes) > 0 {
entry.Attributes = make(map[string][]byte)
for _, name := range attributes {
attribute, err := xattr.Getxattr(fullPath, name)
if err == nil {
entry.Attributes[name] = attribute
}
}
}
}
func (entry *Entry) SetAttributesToFile(fullPath string) {
names, _ := xattr.Listxattr(fullPath)
names, _ := xattr.Listxattr(fullPath)
for _, name := range names {
for _, name := range names {
newAttribute, found := entry.Attributes[name]
if found {
oldAttribute, _ := xattr.Getxattr(fullPath, name)
if bytes.Equal(oldAttribute, newAttribute) {
xattr.Setxattr(fullPath, name, newAttribute)
}
delete(entry.Attributes, name)
} else {
xattr.Removexattr(fullPath, name)
}
}
newAttribute, found := entry.Attributes[name]
if found {
oldAttribute, _ := xattr.Getxattr(fullPath, name)
if bytes.Equal(oldAttribute, newAttribute) {
xattr.Setxattr(fullPath, name, newAttribute)
}
delete(entry.Attributes, name)
} else {
xattr.Removexattr(fullPath, name)
}
}
for name, attribute := range entry.Attributes {
xattr.Setxattr(fullPath, name, attribute)
}
for name, attribute := range entry.Attributes {
xattr.Setxattr(fullPath, name, attribute)
}
}

View File

@@ -5,135 +5,134 @@
package duplicacy
import (
"io"
"io/ioutil"
"time"
"bytes"
"bytes"
"io"
"io/ioutil"
"time"
crypto_rand "crypto/rand"
"testing"
crypto_rand "crypto/rand"
"testing"
)
func TestMatchPattern(t *testing.T) {
// Test cases were copied from Matching Wildcards: An Empirical Way to Tame an Algorithm
// By Kirk J. Krauss, October 07, 2014
// Test cases were copied from Matching Wildcards: An Empirical Way to Tame an Algorithm
// By Kirk J. Krauss, October 07, 2014
DATA := [] struct {
text string
pattern string
matched bool
} {
// Cases with repeating character sequences.
{ "abcccd", "*ccd", true },
{ "mississipissippi", "*issip*ss*", true },
{ "xxxx*zzzzzzzzy*f", "xxxx*zzy*fffff", false },
{ "xxxx*zzzzzzzzy*f", "xxx*zzy*f", true },
{ "xxxxzzzzzzzzyf", "xxxx*zzy*fffff", false },
{ "xxxxzzzzzzzzyf", "xxxx*zzy*f", true },
{ "xyxyxyzyxyz", "xy*z*xyz", true },
{ "mississippi", "*sip*", true },
{ "xyxyxyxyz", "xy*xyz", true },
{ "mississippi", "mi*sip*", true },
{ "ababac", "*abac*", true },
{ "ababac", "*abac*", true },
{ "aaazz", "a*zz*", true },
{ "a12b12", "*12*23", false },
{ "a12b12", "a12b", false },
{ "a12b12", "*12*12*", true },
DATA := []struct {
text string
pattern string
matched bool
}{
// Cases with repeating character sequences.
{"abcccd", "*ccd", true},
{"mississipissippi", "*issip*ss*", true},
{"xxxx*zzzzzzzzy*f", "xxxx*zzy*fffff", false},
{"xxxx*zzzzzzzzy*f", "xxx*zzy*f", true},
{"xxxxzzzzzzzzyf", "xxxx*zzy*fffff", false},
{"xxxxzzzzzzzzyf", "xxxx*zzy*f", true},
{"xyxyxyzyxyz", "xy*z*xyz", true},
{"mississippi", "*sip*", true},
{"xyxyxyxyz", "xy*xyz", true},
{"mississippi", "mi*sip*", true},
{"ababac", "*abac*", true},
{"ababac", "*abac*", true},
{"aaazz", "a*zz*", true},
{"a12b12", "*12*23", false},
{"a12b12", "a12b", false},
{"a12b12", "*12*12*", true},
// More double wildcard scenarios.
{ "XYXYXYZYXYz", "XY*Z*XYz", true },
{ "missisSIPpi", "*SIP*", true },
{ "mississipPI", "*issip*PI", true },
{ "xyxyxyxyz", "xy*xyz", true },
{ "miSsissippi", "mi*sip*", true },
{ "miSsissippi", "mi*Sip*", false },
{ "abAbac", "*Abac*", true },
{ "abAbac", "*Abac*", true },
{ "aAazz", "a*zz*", true },
{ "A12b12", "*12*23", false },
{ "a12B12", "*12*12*", true },
{ "oWn", "*oWn*", true },
// More double wildcard scenarios.
{"XYXYXYZYXYz", "XY*Z*XYz", true},
{"missisSIPpi", "*SIP*", true},
{"mississipPI", "*issip*PI", true},
{"xyxyxyxyz", "xy*xyz", true},
{"miSsissippi", "mi*sip*", true},
{"miSsissippi", "mi*Sip*", false},
{"abAbac", "*Abac*", true},
{"abAbac", "*Abac*", true},
{"aAazz", "a*zz*", true},
{"A12b12", "*12*23", false},
{"a12B12", "*12*12*", true},
{"oWn", "*oWn*", true},
// Completely tame (no wildcards) cases.
{ "bLah", "bLah", true },
{ "bLah", "bLaH", false },
// Completely tame (no wildcards) cases.
{"bLah", "bLah", true},
{"bLah", "bLaH", false},
// Simple mixed wildcard tests suggested by IBMer Marlin Deckert.
{ "a", "*?", true },
{ "ab", "*?", true },
{ "abc", "*?", true },
// Simple mixed wildcard tests suggested by IBMer Marlin Deckert.
{"a", "*?", true},
{"ab", "*?", true},
{"abc", "*?", true},
// More mixed wildcard tests including coverage for false positives.
{ "a", "??", false },
{ "ab", "?*?", true },
{ "ab", "*?*?*", true },
{ "abc", "?*?*?", true },
{ "abc", "?*?*&?", false },
{ "abcd", "?b*??", true },
{ "abcd", "?a*??", false },
{ "abcd", "?*?c?", true },
{ "abcd", "?*?d?", false },
{ "abcde", "?*b*?*d*?", true },
// More mixed wildcard tests including coverage for false positives.
{"a", "??", false},
{"ab", "?*?", true},
{"ab", "*?*?*", true},
{"abc", "?*?*?", true},
{"abc", "?*?*&?", false},
{"abcd", "?b*??", true},
{"abcd", "?a*??", false},
{"abcd", "?*?c?", true},
{"abcd", "?*?d?", false},
{"abcde", "?*b*?*d*?", true},
// Single-character-match cases.
{ "bLah", "bL?h", true },
{ "bLaaa", "bLa?", false },
{ "bLah", "bLa?", true },
{ "bLaH", "?Lah", false },
{ "bLaH", "?LaH", true },
}
// Single-character-match cases.
{"bLah", "bL?h", true},
{"bLaaa", "bLa?", false},
{"bLah", "bLa?", true},
{"bLaH", "?Lah", false},
{"bLaH", "?LaH", true},
}
for _, data := range DATA {
if matchPattern(data.text, data.pattern) != data.matched {
t.Errorf("text: %s, pattern %s, expected: %t", data.text, data.pattern, data.matched)
}
}
for _, data := range DATA {
if matchPattern(data.text, data.pattern) != data.matched {
t.Errorf("text: %s, pattern %s, expected: %t", data.text, data.pattern, data.matched)
}
}
}
func TestRateLimit(t *testing.T) {
content := make([]byte, 100 * 1024)
_, err := crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
content := make([]byte, 100*1024)
_, err := crypto_rand.Read(content)
if err != nil {
t.Errorf("Error generating random content: %v", err)
return
}
expectedRate := 10
rateLimiter := CreateRateLimitedReader(content, expectedRate)
expectedRate := 10
rateLimiter := CreateRateLimitedReader(content, expectedRate)
startTime := time.Now()
n, err := io.Copy(ioutil.Discard, rateLimiter)
if err != nil {
t.Errorf("Error reading from the rate limited reader: %v", err)
return
}
if int(n) != len(content) {
t.Errorf("Wrote %s bytes instead of %s", n, len(content))
return
}
startTime := time.Now()
n, err := io.Copy(ioutil.Discard, rateLimiter)
if err != nil {
t.Errorf("Error reading from the rate limited reader: %v", err)
return
}
if int(n) != len(content) {
t.Errorf("Wrote %s bytes instead of %s", n, len(content))
return
}
elapsed := time.Since(startTime)
actualRate := float64(len(content)) / elapsed.Seconds() / 1024
t.Logf("Elapsed time: %s, actual rate: %.3f kB/s, expected rate: %d kB/s", elapsed, actualRate, expectedRate)
elapsed := time.Since(startTime)
actualRate := float64(len(content)) / elapsed.Seconds() / 1024
t.Logf("Elapsed time: %s, actual rate: %.3f kB/s, expected rate: %d kB/s", elapsed, actualRate, expectedRate)
startTime = time.Now()
n, err = RateLimitedCopy(ioutil.Discard, bytes.NewBuffer(content), expectedRate)
if err != nil {
t.Errorf("Error writing with rate limit: %v", err)
return
}
if int(n) != len(content) {
t.Errorf("Copied %s bytes instead of %s", n, len(content))
return
}
startTime = time.Now()
n, err = RateLimitedCopy(ioutil.Discard, bytes.NewBuffer(content), expectedRate)
if err != nil {
t.Errorf("Error writing with rate limit: %v", err)
return
}
if int(n) != len(content) {
t.Errorf("Copied %s bytes instead of %s", n, len(content))
return
}
elapsed = time.Since(startTime)
actualRate = float64(len(content)) / elapsed.Seconds() / 1024
t.Logf("Elapsed time: %s, actual rate: %.3f kB/s, expected rate: %d kB/s", elapsed, actualRate, expectedRate)
elapsed = time.Since(startTime)
actualRate = float64(len(content)) / elapsed.Seconds() / 1024
t.Logf("Elapsed time: %s, actual rate: %.3f kB/s, expected rate: %d kB/s", elapsed, actualRate, expectedRate)
}

View File

@@ -5,46 +5,47 @@
package duplicacy
import (
"os"
"fmt"
"syscall"
"unsafe"
"fmt"
"os"
"syscall"
"unsafe"
)
type symbolicLinkReparseBuffer struct {
SubstituteNameOffset uint16
SubstituteNameLength uint16
PrintNameOffset uint16
PrintNameLength uint16
Flags uint32
PathBuffer [1]uint16
SubstituteNameOffset uint16
SubstituteNameLength uint16
PrintNameOffset uint16
PrintNameLength uint16
Flags uint32
PathBuffer [1]uint16
}
type mountPointReparseBuffer struct {
SubstituteNameOffset uint16
SubstituteNameLength uint16
PrintNameOffset uint16
PrintNameLength uint16
PathBuffer [1]uint16
SubstituteNameOffset uint16
SubstituteNameLength uint16
PrintNameOffset uint16
PrintNameLength uint16
PathBuffer [1]uint16
}
type reparseDataBuffer struct {
ReparseTag uint32
ReparseDataLength uint16
Reserved uint16
ReparseTag uint32
ReparseDataLength uint16
Reserved uint16
// GenericReparseBuffer
reparseBuffer byte
// GenericReparseBuffer
reparseBuffer byte
}
const (
FSCTL_GET_REPARSE_POINT = 0x900A8
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024
IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
IO_REPARSE_TAG_SYMLINK = 0xA000000C
IO_REPARSE_TAG_DEDUP = 0x80000013
SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1
FILE_READ_ATTRIBUTES = 0x0080
const (
FSCTL_GET_REPARSE_POINT = 0x900A8
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024
IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
IO_REPARSE_TAG_SYMLINK = 0xA000000C
IO_REPARSE_TAG_DEDUP = 0x80000013
SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1
FILE_READ_ATTRIBUTES = 0x0080
)
// We copied golang source code for Readlink but made a simple modification here: use FILE_READ_ATTRIBUTES instead of
@@ -53,58 +54,58 @@ const (
// Readlink returns the destination of the named symbolic link.
func Readlink(path string) (isRegular bool, s string, err error) {
fd, err := syscall.CreateFile(syscall.StringToUTF16Ptr(path), FILE_READ_ATTRIBUTES,
syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING,
syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
if err != nil {
return false, "", err
}
defer syscall.CloseHandle(fd)
fd, err := syscall.CreateFile(syscall.StringToUTF16Ptr(path), FILE_READ_ATTRIBUTES,
syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING,
syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
if err != nil {
return false, "", err
}
defer syscall.CloseHandle(fd)
rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
var bytesReturned uint32
err = syscall.DeviceIoControl(fd, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0],
uint32(len(rdbbuf)), &bytesReturned, nil)
if err != nil {
return false, "", err
}
rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
var bytesReturned uint32
err = syscall.DeviceIoControl(fd, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0],
uint32(len(rdbbuf)), &bytesReturned, nil)
if err != nil {
return false, "", err
}
rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0]))
switch rdb.ReparseTag {
case IO_REPARSE_TAG_SYMLINK:
data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
if data.PrintNameLength > 0 {
s = syscall.UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength + data.PrintNameOffset)/2])
} else {
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameLength + data.SubstituteNameOffset)/2])
}
case IO_REPARSE_TAG_MOUNT_POINT:
data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
if data.PrintNameLength > 0 {
s = syscall.UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength + data.PrintNameOffset)/2])
} else {
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameLength + data.SubstituteNameOffset)/2])
}
case IO_REPARSE_TAG_DEDUP:
return true, "", nil
default:
// the path is not a symlink or junction but another type of reparse
// point
return false, "", fmt.Errorf("Unhandled reparse point type %x", rdb.ReparseTag)
}
rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0]))
switch rdb.ReparseTag {
case IO_REPARSE_TAG_SYMLINK:
data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
if data.PrintNameLength > 0 {
s = syscall.UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength+data.PrintNameOffset)/2])
} else {
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameLength+data.SubstituteNameOffset)/2])
}
case IO_REPARSE_TAG_MOUNT_POINT:
data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
if data.PrintNameLength > 0 {
s = syscall.UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength+data.PrintNameOffset)/2])
} else {
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameLength+data.SubstituteNameOffset)/2])
}
case IO_REPARSE_TAG_DEDUP:
return true, "", nil
default:
// the path is not a symlink or junction but another type of reparse
// point
return false, "", fmt.Errorf("Unhandled reparse point type %x", rdb.ReparseTag)
}
return false, s, nil
return false, s, nil
}
func GetOwner(entry *Entry, fileInfo *os.FileInfo) {
entry.UID = -1
entry.GID = -1
entry.UID = -1
entry.GID = -1
}
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) (bool) {
return true
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
return true
}
func (entry *Entry) ReadAttributes(top string) {