mirror of
https://github.com/gilbertchen/duplicacy
synced 2025-12-10 13:23:17 +00:00
Run goimports on all source files
This commit is contained in:
File diff suppressed because it is too large
Load Diff
@@ -5,448 +5,448 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"bytes"
|
||||||
"time"
|
"encoding/json"
|
||||||
"bytes"
|
"fmt"
|
||||||
"sync"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"encoding/json"
|
"math/rand"
|
||||||
"io"
|
"mime/multipart"
|
||||||
"net/http"
|
"net/http"
|
||||||
"mime/multipart"
|
"sync"
|
||||||
"math/rand"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ACDError struct {
|
type ACDError struct {
|
||||||
Status int
|
Status int
|
||||||
Message string `json:"message"`
|
Message string `json:"message"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (err ACDError) Error() string {
|
func (err ACDError) Error() string {
|
||||||
return fmt.Sprintf("%d %s", err.Status, err.Message)
|
return fmt.Sprintf("%d %s", err.Status, err.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
var ACDRefreshTokenURL = "https://duplicacy.com/acd_refresh"
|
var ACDRefreshTokenURL = "https://duplicacy.com/acd_refresh"
|
||||||
|
|
||||||
type ACDClient struct {
|
type ACDClient struct {
|
||||||
HTTPClient *http.Client
|
HTTPClient *http.Client
|
||||||
|
|
||||||
TokenFile string
|
TokenFile string
|
||||||
Token *oauth2.Token
|
Token *oauth2.Token
|
||||||
TokenLock *sync.Mutex
|
TokenLock *sync.Mutex
|
||||||
|
|
||||||
ContentURL string
|
ContentURL string
|
||||||
MetadataURL string
|
MetadataURL string
|
||||||
|
|
||||||
TestMode bool
|
TestMode bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func NewACDClient(tokenFile string) (*ACDClient, error) {
|
func NewACDClient(tokenFile string) (*ACDClient, error) {
|
||||||
|
|
||||||
description, err := ioutil.ReadFile(tokenFile)
|
description, err := ioutil.ReadFile(tokenFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
token := new(oauth2.Token)
|
token := new(oauth2.Token)
|
||||||
if err := json.Unmarshal(description, token); err != nil {
|
if err := json.Unmarshal(description, token); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
client := &ACDClient{
|
client := &ACDClient{
|
||||||
HTTPClient: http.DefaultClient,
|
HTTPClient: http.DefaultClient,
|
||||||
TokenFile: tokenFile,
|
TokenFile: tokenFile,
|
||||||
Token: token,
|
Token: token,
|
||||||
TokenLock: &sync.Mutex{},
|
TokenLock: &sync.Mutex{},
|
||||||
}
|
}
|
||||||
|
|
||||||
client.GetEndpoint()
|
client.GetEndpoint()
|
||||||
|
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *ACDClient) call(url string, method string, input interface{}, contentType string) (io.ReadCloser, int64, error) {
|
func (client *ACDClient) call(url string, method string, input interface{}, contentType string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
LOG_DEBUG("ACD_CALL", "Calling %s", url)
|
LOG_DEBUG("ACD_CALL", "Calling %s", url)
|
||||||
|
|
||||||
var response *http.Response
|
var response *http.Response
|
||||||
|
|
||||||
backoff := 1
|
backoff := 1
|
||||||
for i := 0; i < 8; i++ {
|
for i := 0; i < 8; i++ {
|
||||||
var inputReader io.Reader
|
var inputReader io.Reader
|
||||||
|
|
||||||
switch input.(type) {
|
switch input.(type) {
|
||||||
default:
|
default:
|
||||||
jsonInput, err := json.Marshal(input)
|
jsonInput, err := json.Marshal(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
inputReader = bytes.NewReader(jsonInput)
|
inputReader = bytes.NewReader(jsonInput)
|
||||||
case []byte:
|
case []byte:
|
||||||
inputReader = bytes.NewReader(input.([]byte))
|
inputReader = bytes.NewReader(input.([]byte))
|
||||||
case int:
|
case int:
|
||||||
inputReader = bytes.NewReader([]byte(""))
|
inputReader = bytes.NewReader([]byte(""))
|
||||||
case *bytes.Buffer:
|
case *bytes.Buffer:
|
||||||
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
|
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
|
||||||
case *RateLimitedReader:
|
case *RateLimitedReader:
|
||||||
input.(*RateLimitedReader).Reset()
|
input.(*RateLimitedReader).Reset()
|
||||||
inputReader = input.(*RateLimitedReader)
|
inputReader = input.(*RateLimitedReader)
|
||||||
}
|
}
|
||||||
|
|
||||||
request, err := http.NewRequest(method, url, inputReader)
|
request, err := http.NewRequest(method, url, inputReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if reader, ok := inputReader.(*RateLimitedReader); ok {
|
if reader, ok := inputReader.(*RateLimitedReader); ok {
|
||||||
request.ContentLength = reader.Length()
|
request.ContentLength = reader.Length()
|
||||||
}
|
}
|
||||||
|
|
||||||
if url != ACDRefreshTokenURL {
|
if url != ACDRefreshTokenURL {
|
||||||
client.TokenLock.Lock()
|
client.TokenLock.Lock()
|
||||||
request.Header.Set("Authorization", "Bearer " + client.Token.AccessToken)
|
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
|
||||||
client.TokenLock.Unlock()
|
client.TokenLock.Unlock()
|
||||||
}
|
}
|
||||||
if contentType != "" {
|
if contentType != "" {
|
||||||
request.Header.Set("Content-Type", contentType)
|
request.Header.Set("Content-Type", contentType)
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err = client.HTTPClient.Do(request)
|
response, err = client.HTTPClient.Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.StatusCode < 400 {
|
if response.StatusCode < 400 {
|
||||||
return response.Body, response.ContentLength, nil
|
return response.Body, response.ContentLength, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.StatusCode == 404 {
|
if response.StatusCode == 404 {
|
||||||
buffer := new(bytes.Buffer)
|
buffer := new(bytes.Buffer)
|
||||||
buffer.ReadFrom(response.Body)
|
buffer.ReadFrom(response.Body)
|
||||||
response.Body.Close()
|
response.Body.Close()
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: buffer.String()}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: buffer.String()}
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.StatusCode == 400 {
|
if response.StatusCode == 400 {
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
|
|
||||||
e := &ACDError {
|
e := &ACDError{
|
||||||
Status: response.StatusCode,
|
Status: response.StatusCode,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := json.NewDecoder(response.Body).Decode(e); err == nil {
|
if err := json.NewDecoder(response.Body).Decode(e); err == nil {
|
||||||
return nil, 0, e
|
return nil, 0, e
|
||||||
} else {
|
} else {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Bad input parameter"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Bad input parameter"}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
response.Body.Close()
|
response.Body.Close()
|
||||||
|
|
||||||
if response.StatusCode == 401 {
|
if response.StatusCode == 401 {
|
||||||
|
|
||||||
if url == ACDRefreshTokenURL {
|
if url == ACDRefreshTokenURL {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Unauthorized"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Unauthorized"}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.RefreshToken()
|
err = client.RefreshToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
continue
|
continue
|
||||||
} else if response.StatusCode == 403 {
|
} else if response.StatusCode == 403 {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Forbidden"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Forbidden"}
|
||||||
} else if response.StatusCode == 404 {
|
} else if response.StatusCode == 404 {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Resource not found"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Resource not found"}
|
||||||
} else if response.StatusCode == 409 {
|
} else if response.StatusCode == 409 {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Conflict"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Conflict"}
|
||||||
} else if response.StatusCode == 411 {
|
} else if response.StatusCode == 411 {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Length required"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Length required"}
|
||||||
} else if response.StatusCode == 412 {
|
} else if response.StatusCode == 412 {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Precondition failed"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Precondition failed"}
|
||||||
} else if response.StatusCode == 429 || response.StatusCode == 500 {
|
} else if response.StatusCode == 429 || response.StatusCode == 500 {
|
||||||
reason := "Too many requests"
|
reason := "Too many requests"
|
||||||
if response.StatusCode == 500 {
|
if response.StatusCode == 500 {
|
||||||
reason = "Internal server error"
|
reason = "Internal server error"
|
||||||
}
|
}
|
||||||
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
||||||
LOG_INFO("ACD_RETRY", "%s; retry after %d milliseconds", reason, retryAfter)
|
LOG_INFO("ACD_RETRY", "%s; retry after %d milliseconds", reason, retryAfter)
|
||||||
time.Sleep(retryAfter * time.Millisecond)
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
backoff *= 2
|
backoff *= 2
|
||||||
continue
|
continue
|
||||||
} else if response.StatusCode == 503 {
|
} else if response.StatusCode == 503 {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Service unavailable"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Service unavailable"}
|
||||||
} else {
|
} else {
|
||||||
return nil, 0, ACDError { Status: response.StatusCode, Message: "Unknown error"}
|
return nil, 0, ACDError{Status: response.StatusCode, Message: "Unknown error"}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, 0, fmt.Errorf("Maximum number of retries reached")
|
return nil, 0, fmt.Errorf("Maximum number of retries reached")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *ACDClient) RefreshToken() (err error) {
|
func (client *ACDClient) RefreshToken() (err error) {
|
||||||
|
|
||||||
client.TokenLock.Lock()
|
client.TokenLock.Lock()
|
||||||
defer client.TokenLock.Unlock()
|
defer client.TokenLock.Unlock()
|
||||||
|
|
||||||
readCloser, _, err := client.call(ACDRefreshTokenURL, "POST", client.Token, "")
|
readCloser, _, err := client.call(ACDRefreshTokenURL, "POST", client.Token, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
description, err := json.Marshal(client.Token)
|
description, err := json.Marshal(client.Token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(client.TokenFile, description, 0644)
|
err = ioutil.WriteFile(client.TokenFile, description, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type ACDGetEndpointOutput struct {
|
type ACDGetEndpointOutput struct {
|
||||||
CustomerExists bool `json:"customerExists"`
|
CustomerExists bool `json:"customerExists"`
|
||||||
ContentURL string `json:"contentUrl"`
|
ContentURL string `json:"contentUrl"`
|
||||||
MetadataURL string `json:"metadataUrl"`
|
MetadataURL string `json:"metadataUrl"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *ACDClient) GetEndpoint() (err error) {
|
func (client *ACDClient) GetEndpoint() (err error) {
|
||||||
|
|
||||||
readCloser, _, err := client.call("https://drive.amazonaws.com/drive/v1/account/endpoint", "GET", 0, "")
|
readCloser, _, err := client.call("https://drive.amazonaws.com/drive/v1/account/endpoint", "GET", 0, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := &ACDGetEndpointOutput {}
|
output := &ACDGetEndpointOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
client.ContentURL = output.ContentURL
|
client.ContentURL = output.ContentURL
|
||||||
client.MetadataURL = output.MetadataURL
|
client.MetadataURL = output.MetadataURL
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type ACDEntry struct {
|
type ACDEntry struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
ID string `json:"id"`
|
ID string `json:"id"`
|
||||||
Size int64 `json:"size"`
|
Size int64 `json:"size"`
|
||||||
Kind string `json:"kind"`
|
Kind string `json:"kind"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ACDListEntriesOutput struct {
|
type ACDListEntriesOutput struct {
|
||||||
Count int `json:"count"`
|
Count int `json:"count"`
|
||||||
NextToken string `json:"nextToken"`
|
NextToken string `json:"nextToken"`
|
||||||
Entries []ACDEntry `json:"data"`
|
Entries []ACDEntry `json:"data"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *ACDClient) ListEntries(parentID string, listFiles bool) ([]ACDEntry, error) {
|
func (client *ACDClient) ListEntries(parentID string, listFiles bool) ([]ACDEntry, error) {
|
||||||
|
|
||||||
startToken := ""
|
startToken := ""
|
||||||
|
|
||||||
entries := []ACDEntry{}
|
entries := []ACDEntry{}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
|
||||||
url := client.MetadataURL + "nodes/" + parentID + "/children?filters="
|
url := client.MetadataURL + "nodes/" + parentID + "/children?filters="
|
||||||
|
|
||||||
if listFiles {
|
if listFiles {
|
||||||
url += "kind:FILE"
|
url += "kind:FILE"
|
||||||
} else {
|
} else {
|
||||||
url += "kind:FOLDER"
|
url += "kind:FOLDER"
|
||||||
}
|
}
|
||||||
|
|
||||||
if startToken != "" {
|
if startToken != "" {
|
||||||
url += "&startToken=" + startToken
|
url += "&startToken=" + startToken
|
||||||
}
|
}
|
||||||
|
|
||||||
if client.TestMode {
|
if client.TestMode {
|
||||||
url += "&limit=8"
|
url += "&limit=8"
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "GET", 0, "")
|
readCloser, _, err := client.call(url, "GET", 0, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := &ACDListEntriesOutput {}
|
output := &ACDListEntriesOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
entries = append(entries, output.Entries...)
|
entries = append(entries, output.Entries...)
|
||||||
|
|
||||||
startToken = output.NextToken
|
startToken = output.NextToken
|
||||||
if startToken == "" {
|
if startToken == "" {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *ACDClient) ListByName(parentID string, name string) (string, bool, int64, error) {
|
func (client *ACDClient) ListByName(parentID string, name string) (string, bool, int64, error) {
|
||||||
|
|
||||||
url := client.MetadataURL + "nodes"
|
url := client.MetadataURL + "nodes"
|
||||||
|
|
||||||
if parentID == "" {
|
if parentID == "" {
|
||||||
url += "?filters=Kind:FOLDER+AND+isRoot:true"
|
url += "?filters=Kind:FOLDER+AND+isRoot:true"
|
||||||
} else {
|
} else {
|
||||||
url += "/" + parentID + "/children?filters=name:" + name
|
url += "/" + parentID + "/children?filters=name:" + name
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "GET", 0, "")
|
readCloser, _, err := client.call(url, "GET", 0, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := &ACDListEntriesOutput {}
|
output := &ACDListEntriesOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(output.Entries) == 0 {
|
if len(output.Entries) == 0 {
|
||||||
return "", false, 0, nil
|
return "", false, 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return output.Entries[0].ID, output.Entries[0].Kind == "FOLDER", output.Entries[0].Size, nil
|
return output.Entries[0].ID, output.Entries[0].Kind == "FOLDER", output.Entries[0].Size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *ACDClient) DownloadFile(fileID string) (io.ReadCloser, int64, error) {
|
func (client *ACDClient) DownloadFile(fileID string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
url := client.ContentURL + "nodes/" + fileID + "/content"
|
url := client.ContentURL + "nodes/" + fileID + "/content"
|
||||||
|
|
||||||
return client.call(url, "GET", 0, "")
|
return client.call(url, "GET", 0, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *ACDClient) UploadFile(parentID string, name string, content []byte, rateLimit int) (fileID string, err error) {
|
func (client *ACDClient) UploadFile(parentID string, name string, content []byte, rateLimit int) (fileID string, err error) {
|
||||||
|
|
||||||
url := client.ContentURL + "nodes?suppress=deduplication"
|
url := client.ContentURL + "nodes?suppress=deduplication"
|
||||||
|
|
||||||
body := &bytes.Buffer{}
|
body := &bytes.Buffer{}
|
||||||
writer := multipart.NewWriter(body)
|
writer := multipart.NewWriter(body)
|
||||||
|
|
||||||
metadata := make(map[string]interface{})
|
metadata := make(map[string]interface{})
|
||||||
metadata["name"] = name
|
metadata["name"] = name
|
||||||
metadata["kind"] = "FILE"
|
metadata["kind"] = "FILE"
|
||||||
metadata["parents"] = []string{ parentID }
|
metadata["parents"] = []string{parentID}
|
||||||
|
|
||||||
metadataJSON, err := json.Marshal(metadata)
|
metadataJSON, err := json.Marshal(metadata)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = writer.WriteField("metadata", string(metadataJSON))
|
err = writer.WriteField("metadata", string(metadataJSON))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
part, err := writer.CreateFormFile("content", name)
|
part, err := writer.CreateFormFile("content", name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = part.Write(content)
|
_, err = part.Write(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
writer.Close()
|
writer.Close()
|
||||||
|
|
||||||
var input interface{}
|
var input interface{}
|
||||||
input = body
|
input = body
|
||||||
if rateLimit > 0 {
|
if rateLimit > 0 {
|
||||||
input = CreateRateLimitedReader(body.Bytes(), rateLimit)
|
input = CreateRateLimitedReader(body.Bytes(), rateLimit)
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "POST", input, writer.FormDataContentType())
|
readCloser, _, err := client.call(url, "POST", input, writer.FormDataContentType())
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
entry := ACDEntry {}
|
entry := ACDEntry{}
|
||||||
if err = json.NewDecoder(readCloser).Decode(&entry); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&entry); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return entry.ID, nil
|
return entry.ID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *ACDClient) DeleteFile(fileID string) error {
|
func (client *ACDClient) DeleteFile(fileID string) error {
|
||||||
|
|
||||||
url := client.MetadataURL + "trash/" + fileID
|
url := client.MetadataURL + "trash/" + fileID
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "PUT", 0, "")
|
readCloser, _, err := client.call(url, "PUT", 0, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *ACDClient) MoveFile(fileID string, fromParentID string, toParentID string) error {
|
func (client *ACDClient) MoveFile(fileID string, fromParentID string, toParentID string) error {
|
||||||
|
|
||||||
url := client.MetadataURL + "nodes/" + toParentID + "/children"
|
url := client.MetadataURL + "nodes/" + toParentID + "/children"
|
||||||
|
|
||||||
parameters := make(map[string]string)
|
parameters := make(map[string]string)
|
||||||
parameters["fromParent"] = fromParentID
|
parameters["fromParent"] = fromParentID
|
||||||
parameters["childId"] = fileID
|
parameters["childId"] = fileID
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "POST", parameters, "")
|
readCloser, _, err := client.call(url, "POST", parameters, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *ACDClient) CreateDirectory(parentID string, name string) (string, error) {
|
func (client *ACDClient) CreateDirectory(parentID string, name string) (string, error) {
|
||||||
|
|
||||||
url := client.MetadataURL + "nodes"
|
url := client.MetadataURL + "nodes"
|
||||||
|
|
||||||
parameters := make(map[string]interface{})
|
parameters := make(map[string]interface{})
|
||||||
parameters["name"] = name
|
parameters["name"] = name
|
||||||
parameters["kind"] = "FOLDER"
|
parameters["kind"] = "FOLDER"
|
||||||
parameters["parents"] = []string {parentID}
|
parameters["parents"] = []string{parentID}
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "POST", parameters, "")
|
readCloser, _, err := client.call(url, "POST", parameters, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
entry := ACDEntry {}
|
entry := ACDEntry{}
|
||||||
if err = json.NewDecoder(readCloser).Decode(&entry); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&entry); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
return entry.ID, nil
|
return entry.ID, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,149 +5,149 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"crypto/sha256"
|
||||||
"fmt"
|
"encoding/hex"
|
||||||
"testing"
|
"fmt"
|
||||||
"crypto/sha256"
|
"io"
|
||||||
"encoding/hex"
|
"testing"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestACDClient(t *testing.T) {
|
func TestACDClient(t *testing.T) {
|
||||||
|
|
||||||
acdClient, err := NewACDClient("acd-token.json")
|
acdClient, err := NewACDClient("acd-token.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create the ACD client: %v", err)
|
t.Errorf("Failed to create the ACD client: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
acdClient.TestMode = true
|
acdClient.TestMode = true
|
||||||
|
|
||||||
rootID, _, _, err := acdClient.ListByName("", "")
|
rootID, _, _, err := acdClient.ListByName("", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to get the root node: %v", err)
|
t.Errorf("Failed to get the root node: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if rootID == "" {
|
if rootID == "" {
|
||||||
t.Errorf("No root node")
|
t.Errorf("No root node")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
testID, _, _, err := acdClient.ListByName(rootID, "test")
|
testID, _, _, err := acdClient.ListByName(rootID, "test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list the test directory: %v", err)
|
t.Errorf("Failed to list the test directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if testID == "" {
|
if testID == "" {
|
||||||
testID, err = acdClient.CreateDirectory(rootID, "test")
|
testID, err = acdClient.CreateDirectory(rootID, "test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create the test directory: %v", err)
|
t.Errorf("Failed to create the test directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
test1ID, _, _, err := acdClient.ListByName(testID, "test1")
|
test1ID, _, _, err := acdClient.ListByName(testID, "test1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list the test1 directory: %v", err)
|
t.Errorf("Failed to list the test1 directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if test1ID == "" {
|
if test1ID == "" {
|
||||||
test1ID, err = acdClient.CreateDirectory(testID, "test1")
|
test1ID, err = acdClient.CreateDirectory(testID, "test1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create the test1 directory: %v", err)
|
t.Errorf("Failed to create the test1 directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
test2ID, _, _, err := acdClient.ListByName(testID, "test2")
|
test2ID, _, _, err := acdClient.ListByName(testID, "test2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list the test2 directory: %v", err)
|
t.Errorf("Failed to list the test2 directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if test2ID == "" {
|
if test2ID == "" {
|
||||||
test2ID, err = acdClient.CreateDirectory(testID, "test2")
|
test2ID, err = acdClient.CreateDirectory(testID, "test2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create the test2 directory: %v", err)
|
t.Errorf("Failed to create the test2 directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Printf("test1: %s, test2: %s\n", test1ID, test2ID)
|
fmt.Printf("test1: %s, test2: %s\n", test1ID, test2ID)
|
||||||
|
|
||||||
numberOfFiles := 20
|
numberOfFiles := 20
|
||||||
maxFileSize := 64 * 1024
|
maxFileSize := 64 * 1024
|
||||||
|
|
||||||
for i := 0; i < numberOfFiles; i++ {
|
for i := 0; i < numberOfFiles; i++ {
|
||||||
content := make([]byte, rand.Int() % maxFileSize + 1)
|
content := make([]byte, rand.Int()%maxFileSize+1)
|
||||||
_, err = crypto_rand.Read(content)
|
_, err = crypto_rand.Read(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error generating random content: %v", err)
|
t.Errorf("Error generating random content: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hasher := sha256.New()
|
hasher := sha256.New()
|
||||||
hasher.Write(content)
|
hasher.Write(content)
|
||||||
filename := hex.EncodeToString(hasher.Sum(nil))
|
filename := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
fmt.Printf("file: %s\n", filename)
|
fmt.Printf("file: %s\n", filename)
|
||||||
|
|
||||||
_, err = acdClient.UploadFile(test1ID, filename, content, 100)
|
_, err = acdClient.UploadFile(test1ID, filename, content, 100)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
|
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
|
||||||
t.Errorf("Failed to upload the file %s: %v", filename, err)
|
t.Errorf("Failed to upload the file %s: %v", filename, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
entries, err := acdClient.ListEntries(test1ID, true)
|
entries, err := acdClient.ListEntries(test1ID, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error list randomly generated files: %v", err)
|
t.Errorf("Error list randomly generated files: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
err = acdClient.MoveFile(entry.ID, test1ID, test2ID)
|
err = acdClient.MoveFile(entry.ID, test1ID, test2ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to move %s: %v", entry.Name, err)
|
t.Errorf("Failed to move %s: %v", entry.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
entries, err = acdClient.ListEntries(test2ID, true)
|
entries, err = acdClient.ListEntries(test2ID, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error list randomly generated files: %v", err)
|
t.Errorf("Error list randomly generated files: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
readCloser, _, err := acdClient.DownloadFile(entry.ID)
|
readCloser, _, err := acdClient.DownloadFile(entry.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error downloading file %s: %v", entry.Name, err)
|
t.Errorf("Error downloading file %s: %v", entry.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hasher := sha256.New()
|
hasher := sha256.New()
|
||||||
io.Copy(hasher, readCloser)
|
io.Copy(hasher, readCloser)
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
if hash != entry.Name {
|
if hash != entry.Name {
|
||||||
t.Errorf("File %s, hash %s", entry.Name, hash)
|
t.Errorf("File %s, hash %s", entry.Name, hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
|
|
||||||
err = acdClient.DeleteFile(entry.ID)
|
err = acdClient.DeleteFile(entry.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
|
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,400 +5,398 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ACDStorage struct {
|
type ACDStorage struct {
|
||||||
RateLimitedStorage
|
RateLimitedStorage
|
||||||
|
|
||||||
client *ACDClient
|
client *ACDClient
|
||||||
idCache map[string]string
|
idCache map[string]string
|
||||||
idCacheLock *sync.Mutex
|
idCacheLock *sync.Mutex
|
||||||
numberOfThreads int
|
numberOfThreads int
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateACDStorage creates an ACD storage object.
|
// CreateACDStorage creates an ACD storage object.
|
||||||
func CreateACDStorage(tokenFile string, storagePath string, threads int) (storage *ACDStorage, err error) {
|
func CreateACDStorage(tokenFile string, storagePath string, threads int) (storage *ACDStorage, err error) {
|
||||||
|
|
||||||
client, err := NewACDClient(tokenFile)
|
client, err := NewACDClient(tokenFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &ACDStorage {
|
storage = &ACDStorage{
|
||||||
client: client,
|
client: client,
|
||||||
idCache: make(map[string]string),
|
idCache: make(map[string]string),
|
||||||
idCacheLock: &sync.Mutex{},
|
idCacheLock: &sync.Mutex{},
|
||||||
numberOfThreads: threads,
|
numberOfThreads: threads,
|
||||||
}
|
}
|
||||||
|
|
||||||
storagePathID, _, _, err := storage.getIDFromPath(0, storagePath)
|
storagePathID, _, _, err := storage.getIDFromPath(0, storagePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
storage.idCache[""] = storagePathID
|
storage.idCache[""] = storagePathID
|
||||||
|
|
||||||
for _, dir := range []string { "chunks", "fossils", "snapshots" } {
|
for _, dir := range []string{"chunks", "fossils", "snapshots"} {
|
||||||
dirID, isDir, _, err := client.ListByName(storagePathID, dir)
|
dirID, isDir, _, err := client.ListByName(storagePathID, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if dirID == "" {
|
if dirID == "" {
|
||||||
dirID, err = client.CreateDirectory(storagePathID, dir)
|
dirID, err = client.CreateDirectory(storagePathID, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else if !isDir {
|
} else if !isDir {
|
||||||
return nil, fmt.Errorf("%s/%s is not a directory", storagePath + "/" + dir)
|
return nil, fmt.Errorf("%s/%s is not a directory", storagePath+"/"+dir)
|
||||||
}
|
}
|
||||||
storage.idCache[dir] = dirID
|
storage.idCache[dir] = dirID
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage, nil
|
return storage, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *ACDStorage) getPathID(path string) string {
|
func (storage *ACDStorage) getPathID(path string) string {
|
||||||
storage.idCacheLock.Lock()
|
storage.idCacheLock.Lock()
|
||||||
pathID := storage.idCache[path]
|
pathID := storage.idCache[path]
|
||||||
storage.idCacheLock.Unlock()
|
storage.idCacheLock.Unlock()
|
||||||
return pathID
|
return pathID
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *ACDStorage) findPathID(path string) (string, bool) {
|
func (storage *ACDStorage) findPathID(path string) (string, bool) {
|
||||||
storage.idCacheLock.Lock()
|
storage.idCacheLock.Lock()
|
||||||
pathID, ok := storage.idCache[path]
|
pathID, ok := storage.idCache[path]
|
||||||
storage.idCacheLock.Unlock()
|
storage.idCacheLock.Unlock()
|
||||||
return pathID, ok
|
return pathID, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *ACDStorage) savePathID(path string, pathID string) {
|
func (storage *ACDStorage) savePathID(path string, pathID string) {
|
||||||
storage.idCacheLock.Lock()
|
storage.idCacheLock.Lock()
|
||||||
storage.idCache[path] = pathID
|
storage.idCache[path] = pathID
|
||||||
storage.idCacheLock.Unlock()
|
storage.idCacheLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *ACDStorage) deletePathID(path string) {
|
func (storage *ACDStorage) deletePathID(path string) {
|
||||||
storage.idCacheLock.Lock()
|
storage.idCacheLock.Lock()
|
||||||
delete(storage.idCache, path)
|
delete(storage.idCache, path)
|
||||||
storage.idCacheLock.Unlock()
|
storage.idCacheLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (storage *ACDStorage) convertFilePath(filePath string) string {
|
||||||
func (storage *ACDStorage) convertFilePath(filePath string) (string) {
|
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
||||||
if strings.HasPrefix(filePath, "chunks/") && strings.HasSuffix(filePath, ".fsl") {
|
return "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
||||||
return "fossils/" + filePath[len("chunks/"):len(filePath) - len(".fsl")]
|
}
|
||||||
}
|
return filePath
|
||||||
return filePath
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *ACDStorage) getIDFromPath(threadIndex int, path string) (fileID string, isDir bool, size int64, err error) {
|
func (storage *ACDStorage) getIDFromPath(threadIndex int, path string) (fileID string, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
parentID, ok := storage.findPathID("")
|
parentID, ok := storage.findPathID("")
|
||||||
if !ok {
|
if !ok {
|
||||||
parentID, isDir, size, err = storage.client.ListByName("", "")
|
parentID, isDir, size, err = storage.client.ListByName("", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
names := strings.Split(path, "/")
|
names := strings.Split(path, "/")
|
||||||
for i, name := range names {
|
for i, name := range names {
|
||||||
parentID, isDir, _, err = storage.client.ListByName(parentID, name)
|
parentID, isDir, _, err = storage.client.ListByName(parentID, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
}
|
}
|
||||||
if parentID == "" {
|
if parentID == "" {
|
||||||
if i == len(names) - 1 {
|
if i == len(names)-1 {
|
||||||
return "", false, 0, nil
|
return "", false, 0, nil
|
||||||
} else {
|
} else {
|
||||||
return "", false, 0, fmt.Errorf("File path '%s' does not exist", path)
|
return "", false, 0, fmt.Errorf("File path '%s' does not exist", path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if i != len(names) - 1 && !isDir {
|
if i != len(names)-1 && !isDir {
|
||||||
return "", false, 0, fmt.Errorf("Invalid path %s", path)
|
return "", false, 0, fmt.Errorf("Invalid path %s", path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return parentID, isDir, size, err
|
return parentID, isDir, size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *ACDStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
func (storage *ACDStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "snapshots" {
|
if dir == "snapshots" {
|
||||||
|
|
||||||
entries, err := storage.client.ListEntries(storage.getPathID(dir), false)
|
entries, err := storage.client.ListEntries(storage.getPathID(dir), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
subDirs := []string{}
|
subDirs := []string{}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
storage.savePathID(entry.Name, entry.ID)
|
storage.savePathID(entry.Name, entry.ID)
|
||||||
subDirs = append(subDirs, entry.Name + "/")
|
subDirs = append(subDirs, entry.Name+"/")
|
||||||
}
|
}
|
||||||
return subDirs, nil, nil
|
return subDirs, nil, nil
|
||||||
} else if strings.HasPrefix(dir, "snapshots/") {
|
} else if strings.HasPrefix(dir, "snapshots/") {
|
||||||
name := dir[len("snapshots/"):]
|
name := dir[len("snapshots/"):]
|
||||||
pathID, ok := storage.findPathID(dir)
|
pathID, ok := storage.findPathID(dir)
|
||||||
if !ok {
|
if !ok {
|
||||||
pathID, _, _, err = storage.client.ListByName(storage.getPathID("snapshots"), name)
|
pathID, _, _, err = storage.client.ListByName(storage.getPathID("snapshots"), name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
if pathID == "" {
|
if pathID == "" {
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
entries, err := storage.client.ListEntries(pathID, true)
|
entries, err := storage.client.ListEntries(pathID, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
files := []string{}
|
files := []string{}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
storage.savePathID(dir + "/" + entry.Name, entry.ID)
|
storage.savePathID(dir+"/"+entry.Name, entry.ID)
|
||||||
files = append(files, entry.Name)
|
files = append(files, entry.Name)
|
||||||
}
|
}
|
||||||
return files, nil, nil
|
return files, nil, nil
|
||||||
} else {
|
} else {
|
||||||
files := []string{}
|
files := []string{}
|
||||||
sizes := []int64{}
|
sizes := []int64{}
|
||||||
for _, parent := range []string {"chunks", "fossils" } {
|
for _, parent := range []string{"chunks", "fossils"} {
|
||||||
entries, err := storage.client.ListEntries(storage.getPathID(parent), true)
|
entries, err := storage.client.ListEntries(storage.getPathID(parent), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, entry := range entries {
|
||||||
|
name := entry.Name
|
||||||
|
if parent == "fossils" {
|
||||||
|
name += ".fsl"
|
||||||
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
storage.savePathID(parent+"/"+entry.Name, entry.ID)
|
||||||
name := entry.Name
|
files = append(files, name)
|
||||||
if parent == "fossils" {
|
sizes = append(sizes, entry.Size)
|
||||||
name += ".fsl"
|
}
|
||||||
}
|
}
|
||||||
|
return files, sizes, nil
|
||||||
storage.savePathID(parent + "/" + entry.Name, entry.ID)
|
}
|
||||||
files = append(files, name)
|
|
||||||
sizes = append(sizes, entry.Size)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return files, sizes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *ACDStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *ACDStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
filePath = storage.convertFilePath(filePath)
|
filePath = storage.convertFilePath(filePath)
|
||||||
fileID, ok := storage.findPathID(filePath)
|
fileID, ok := storage.findPathID(filePath)
|
||||||
if !ok {
|
if !ok {
|
||||||
fileID, _, _, err = storage.getIDFromPath(threadIndex, filePath)
|
fileID, _, _, err = storage.getIDFromPath(threadIndex, filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if fileID == "" {
|
if fileID == "" {
|
||||||
LOG_TRACE("ACD_STORAGE", "File %s has disappeared before deletion", filePath)
|
LOG_TRACE("ACD_STORAGE", "File %s has disappeared before deletion", filePath)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
storage.savePathID(filePath, fileID)
|
storage.savePathID(filePath, fileID)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storage.client.DeleteFile(fileID)
|
err = storage.client.DeleteFile(fileID)
|
||||||
if e, ok := err.(ACDError); ok && e.Status == 409 {
|
if e, ok := err.(ACDError); ok && e.Status == 409 {
|
||||||
LOG_DEBUG("ACD_DELETE", "Ignore 409 conflict error")
|
LOG_DEBUG("ACD_DELETE", "Ignore 409 conflict error")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *ACDStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *ACDStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
from = storage.convertFilePath(from)
|
from = storage.convertFilePath(from)
|
||||||
to = storage.convertFilePath(to)
|
to = storage.convertFilePath(to)
|
||||||
|
|
||||||
fileID, ok := storage.findPathID(from)
|
fileID, ok := storage.findPathID(from)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Attempting to rename file %s with unknown id", from)
|
return fmt.Errorf("Attempting to rename file %s with unknown id", from)
|
||||||
}
|
}
|
||||||
|
|
||||||
fromParentID := storage.getPathID("chunks")
|
fromParentID := storage.getPathID("chunks")
|
||||||
toParentID := storage.getPathID("fossils")
|
toParentID := storage.getPathID("fossils")
|
||||||
|
|
||||||
if strings.HasPrefix(from, "fossils") {
|
if strings.HasPrefix(from, "fossils") {
|
||||||
fromParentID, toParentID = toParentID, fromParentID
|
fromParentID, toParentID = toParentID, fromParentID
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storage.client.MoveFile(fileID, fromParentID, toParentID)
|
err = storage.client.MoveFile(fileID, fromParentID, toParentID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(ACDError); ok && e.Status == 409 {
|
if e, ok := err.(ACDError); ok && e.Status == 409 {
|
||||||
LOG_DEBUG("ACD_MOVE", "Ignore 409 conflict error")
|
LOG_DEBUG("ACD_MOVE", "Ignore 409 conflict error")
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
storage.savePathID(to, storage.getPathID(from))
|
storage.savePathID(to, storage.getPathID(from))
|
||||||
storage.deletePathID(from)
|
storage.deletePathID(from)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *ACDStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
func (storage *ACDStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
|
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "chunks" || dir == "snapshots" {
|
if dir == "chunks" || dir == "snapshots" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(dir, "snapshots/") {
|
if strings.HasPrefix(dir, "snapshots/") {
|
||||||
name := dir[len("snapshots/"):]
|
name := dir[len("snapshots/"):]
|
||||||
dirID, err := storage.client.CreateDirectory(storage.getPathID("snapshots"), name)
|
dirID, err := storage.client.CreateDirectory(storage.getPathID("snapshots"), name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(ACDError); ok && e.Status == 409 {
|
if e, ok := err.(ACDError); ok && e.Status == 409 {
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
storage.savePathID(dir, dirID)
|
storage.savePathID(dir, dirID)
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *ACDStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *ACDStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
for len(filePath) > 0 && filePath[len(filePath) - 1] == '/' {
|
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
|
||||||
filePath = filePath[:len(filePath) - 1]
|
filePath = filePath[:len(filePath)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
filePath = storage.convertFilePath(filePath)
|
filePath = storage.convertFilePath(filePath)
|
||||||
fileID := ""
|
fileID := ""
|
||||||
fileID, isDir, size, err = storage.getIDFromPath(threadIndex, filePath)
|
fileID, isDir, size, err = storage.getIDFromPath(threadIndex, filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, false, 0, err
|
return false, false, 0, err
|
||||||
}
|
}
|
||||||
if fileID == "" {
|
if fileID == "" {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, isDir, size, nil
|
return true, isDir, size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||||
// the suffix '.fsl'.
|
// the suffix '.fsl'.
|
||||||
func (storage *ACDStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
func (storage *ACDStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||||
parentID := ""
|
parentID := ""
|
||||||
filePath = "chunks/" + chunkID
|
filePath = "chunks/" + chunkID
|
||||||
realPath := filePath
|
realPath := filePath
|
||||||
if isFossil {
|
if isFossil {
|
||||||
parentID = storage.getPathID("fossils")
|
parentID = storage.getPathID("fossils")
|
||||||
filePath += ".fsl"
|
filePath += ".fsl"
|
||||||
realPath = "fossils/" + chunkID + ".fsl"
|
realPath = "fossils/" + chunkID + ".fsl"
|
||||||
} else {
|
} else {
|
||||||
parentID = storage.getPathID("chunks")
|
parentID = storage.getPathID("chunks")
|
||||||
}
|
}
|
||||||
|
|
||||||
fileID := ""
|
fileID := ""
|
||||||
fileID, _, size, err = storage.client.ListByName(parentID, chunkID)
|
fileID, _, size, err = storage.client.ListByName(parentID, chunkID)
|
||||||
if fileID != "" {
|
if fileID != "" {
|
||||||
storage.savePathID(realPath, fileID)
|
storage.savePathID(realPath, fileID)
|
||||||
}
|
}
|
||||||
return filePath, fileID != "", size, err
|
return filePath, fileID != "", size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *ACDStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *ACDStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
fileID, ok := storage.findPathID(filePath)
|
fileID, ok := storage.findPathID(filePath)
|
||||||
if !ok {
|
if !ok {
|
||||||
fileID, _, _, err = storage.getIDFromPath(threadIndex, filePath)
|
fileID, _, _, err = storage.getIDFromPath(threadIndex, filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if fileID == "" {
|
if fileID == "" {
|
||||||
return fmt.Errorf("File path '%s' does not exist", filePath)
|
return fmt.Errorf("File path '%s' does not exist", filePath)
|
||||||
}
|
}
|
||||||
storage.savePathID(filePath, fileID)
|
storage.savePathID(filePath, fileID)
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser, _, err := storage.client.DownloadFile(fileID)
|
readCloser, _, err := storage.client.DownloadFile(fileID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / storage.numberOfThreads)
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *ACDStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *ACDStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
parent := path.Dir(filePath)
|
parent := path.Dir(filePath)
|
||||||
|
|
||||||
if parent == "." {
|
if parent == "." {
|
||||||
parent = ""
|
parent = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
parentID, ok := storage.findPathID(parent)
|
parentID, ok := storage.findPathID(parent)
|
||||||
|
|
||||||
if !ok {
|
if !ok {
|
||||||
parentID, _, _, err = storage.getIDFromPath(threadIndex, parent)
|
parentID, _, _, err = storage.getIDFromPath(threadIndex, parent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if parentID == "" {
|
if parentID == "" {
|
||||||
return fmt.Errorf("File path '%s' does not exist", parent)
|
return fmt.Errorf("File path '%s' does not exist", parent)
|
||||||
}
|
}
|
||||||
storage.savePathID(parent, parentID)
|
storage.savePathID(parent, parentID)
|
||||||
}
|
}
|
||||||
|
|
||||||
fileID, err := storage.client.UploadFile(parentID, path.Base(filePath), content, storage.UploadRateLimit / storage.numberOfThreads)
|
fileID, err := storage.client.UploadFile(parentID, path.Base(filePath), content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
storage.savePathID(filePath, fileID)
|
storage.savePathID(filePath, fileID)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if e, ok := err.(ACDError); ok && e.Status == 409 {
|
if e, ok := err.(ACDError); ok && e.Status == 409 {
|
||||||
LOG_TRACE("ACD_UPLOAD", "File %s already exists", filePath)
|
LOG_TRACE("ACD_UPLOAD", "File %s already exists", filePath)
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *ACDStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *ACDStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *ACDStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *ACDStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *ACDStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *ACDStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *ACDStorage) IsFastListing() (bool) { return true }
|
func (storage *ACDStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *ACDStorage) EnableTestMode() {}
|
func (storage *ACDStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -5,200 +5,200 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/gilbertchen/azure-sdk-for-go/storage"
|
"github.com/gilbertchen/azure-sdk-for-go/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
type AzureStorage struct {
|
type AzureStorage struct {
|
||||||
RateLimitedStorage
|
RateLimitedStorage
|
||||||
|
|
||||||
containers []*storage.Container
|
containers []*storage.Container
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateAzureStorage(accountName string, accountKey string,
|
func CreateAzureStorage(accountName string, accountKey string,
|
||||||
containerName string, threads int) (azureStorage *AzureStorage, err error) {
|
containerName string, threads int) (azureStorage *AzureStorage, err error) {
|
||||||
|
|
||||||
var containers []*storage.Container
|
var containers []*storage.Container
|
||||||
for i := 0; i < threads; i++ {
|
for i := 0; i < threads; i++ {
|
||||||
|
|
||||||
client, err := storage.NewBasicClient(accountName, accountKey)
|
client, err := storage.NewBasicClient(accountName, accountKey)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
blobService := client.GetBlobService()
|
blobService := client.GetBlobService()
|
||||||
container := blobService.GetContainerReference(containerName)
|
container := blobService.GetContainerReference(containerName)
|
||||||
containers = append(containers, container)
|
containers = append(containers, container)
|
||||||
}
|
}
|
||||||
|
|
||||||
exist, err := containers[0].Exists()
|
exist, err := containers[0].Exists()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !exist {
|
if !exist {
|
||||||
return nil, fmt.Errorf("container %s does not exist", containerName)
|
return nil, fmt.Errorf("container %s does not exist", containerName)
|
||||||
}
|
}
|
||||||
|
|
||||||
azureStorage = &AzureStorage {
|
azureStorage = &AzureStorage{
|
||||||
containers: containers,
|
containers: containers,
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
func (azureStorage *AzureStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
|
||||||
type ListBlobsParameters struct {
|
type ListBlobsParameters struct {
|
||||||
Prefix string
|
Prefix string
|
||||||
Delimiter string
|
Delimiter string
|
||||||
Marker string
|
Marker string
|
||||||
Include string
|
Include string
|
||||||
MaxResults uint
|
MaxResults uint
|
||||||
Timeout uint
|
Timeout uint
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dir) > 0 && dir[len(dir) - 1] != '/' {
|
if len(dir) > 0 && dir[len(dir)-1] != '/' {
|
||||||
dir += "/"
|
dir += "/"
|
||||||
}
|
}
|
||||||
dirLength := len(dir)
|
dirLength := len(dir)
|
||||||
|
|
||||||
parameters := storage.ListBlobsParameters {
|
parameters := storage.ListBlobsParameters{
|
||||||
Prefix: dir,
|
Prefix: dir,
|
||||||
Delimiter: "",
|
Delimiter: "",
|
||||||
}
|
}
|
||||||
|
|
||||||
subDirs := make(map[string]bool)
|
subDirs := make(map[string]bool)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
|
||||||
results, err := azureStorage.containers[threadIndex].ListBlobs(parameters)
|
results, err := azureStorage.containers[threadIndex].ListBlobs(parameters)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "snapshots/" {
|
if dir == "snapshots/" {
|
||||||
for _, blob := range results.Blobs {
|
for _, blob := range results.Blobs {
|
||||||
name := strings.Split(blob.Name[dirLength:], "/")[0]
|
name := strings.Split(blob.Name[dirLength:], "/")[0]
|
||||||
subDirs[name + "/"] = true
|
subDirs[name+"/"] = true
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for _, blob := range results.Blobs {
|
for _, blob := range results.Blobs {
|
||||||
files = append(files, blob.Name[dirLength:])
|
files = append(files, blob.Name[dirLength:])
|
||||||
sizes = append(sizes, blob.Properties.ContentLength)
|
sizes = append(sizes, blob.Properties.ContentLength)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if results.NextMarker == "" {
|
if results.NextMarker == "" {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
parameters.Marker = results.NextMarker
|
parameters.Marker = results.NextMarker
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "snapshots/" {
|
if dir == "snapshots/" {
|
||||||
|
|
||||||
for subDir, _ := range subDirs {
|
for subDir, _ := range subDirs {
|
||||||
files = append(files, subDir)
|
files = append(files, subDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return files, sizes, nil
|
return files, sizes, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *AzureStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *AzureStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
_, err = storage.containers[threadIndex].GetBlobReference(filePath).DeleteIfExists(nil)
|
_, err = storage.containers[threadIndex].GetBlobReference(filePath).DeleteIfExists(nil)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *AzureStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *AzureStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
source := storage.containers[threadIndex].GetBlobReference(from)
|
source := storage.containers[threadIndex].GetBlobReference(from)
|
||||||
destination := storage.containers[threadIndex].GetBlobReference(to)
|
destination := storage.containers[threadIndex].GetBlobReference(to)
|
||||||
err = destination.Copy(source.GetURL(), nil)
|
err = destination.Copy(source.GetURL(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return storage.DeleteFile(threadIndex, from)
|
return storage.DeleteFile(threadIndex, from)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *AzureStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
func (storage *AzureStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *AzureStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *AzureStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||||
err = blob.GetProperties(nil)
|
err = blob.GetProperties(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "404") {
|
if strings.Contains(err.Error(), "404") {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
} else {
|
} else {
|
||||||
return false, false, 0, err
|
return false, false, 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, false, blob.Properties.ContentLength, nil
|
return true, false, blob.Properties.ContentLength, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||||
// the suffix '.fsl'.
|
// the suffix '.fsl'.
|
||||||
func (storage *AzureStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
func (storage *AzureStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||||
filePath = "chunks/" + chunkID
|
filePath = "chunks/" + chunkID
|
||||||
if isFossil {
|
if isFossil {
|
||||||
filePath += ".fsl"
|
filePath += ".fsl"
|
||||||
}
|
}
|
||||||
|
|
||||||
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
} else {
|
} else {
|
||||||
return filePath, exist, size, err
|
return filePath, exist, size, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *AzureStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
readCloser, err := storage.containers[threadIndex].GetBlobReference(filePath).Get(nil)
|
readCloser, err := storage.containers[threadIndex].GetBlobReference(filePath).Get(nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.containers))
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.containers))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *AzureStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.containers))
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.containers))
|
||||||
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
blob := storage.containers[threadIndex].GetBlobReference(filePath)
|
||||||
return blob.CreateBlockBlobFromReader(reader, nil)
|
return blob.CreateBlockBlobFromReader(reader, nil)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *AzureStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *AzureStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *AzureStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *AzureStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *AzureStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *AzureStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *AzureStorage) IsFastListing() (bool) { return true }
|
func (storage *AzureStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *AzureStorage) EnableTestMode() {}
|
func (storage *AzureStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -5,516 +5,511 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"bytes"
|
||||||
"time"
|
"crypto/sha1"
|
||||||
"bytes"
|
"encoding/base64"
|
||||||
"strconv"
|
"encoding/hex"
|
||||||
"io/ioutil"
|
"encoding/json"
|
||||||
"encoding/json"
|
"fmt"
|
||||||
"encoding/base64"
|
"io"
|
||||||
"encoding/hex"
|
"io/ioutil"
|
||||||
"io"
|
"math/rand"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strconv"
|
||||||
"crypto/sha1"
|
"strings"
|
||||||
"math/rand"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type B2Error struct {
|
type B2Error struct {
|
||||||
Status int
|
Status int
|
||||||
Code string
|
Code string
|
||||||
Message string
|
Message string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (err *B2Error) Error() string {
|
func (err *B2Error) Error() string {
|
||||||
return fmt.Sprintf("%d %s", err.Status, err.Message)
|
return fmt.Sprintf("%d %s", err.Status, err.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
type B2UploadArgument struct {
|
type B2UploadArgument struct {
|
||||||
URL string
|
URL string
|
||||||
Token string
|
Token string
|
||||||
}
|
}
|
||||||
|
|
||||||
var B2AuthorizationURL = "https://api.backblazeb2.com/b2api/v1/b2_authorize_account"
|
var B2AuthorizationURL = "https://api.backblazeb2.com/b2api/v1/b2_authorize_account"
|
||||||
|
|
||||||
type B2Client struct {
|
type B2Client struct {
|
||||||
HTTPClient *http.Client
|
HTTPClient *http.Client
|
||||||
AccountID string
|
AccountID string
|
||||||
ApplicationKey string
|
ApplicationKey string
|
||||||
AuthorizationToken string
|
AuthorizationToken string
|
||||||
APIURL string
|
APIURL string
|
||||||
DownloadURL string
|
DownloadURL string
|
||||||
BucketName string
|
BucketName string
|
||||||
BucketID string
|
BucketID string
|
||||||
|
|
||||||
UploadURL string
|
UploadURL string
|
||||||
UploadToken string
|
UploadToken string
|
||||||
|
|
||||||
TestMode bool
|
|
||||||
|
|
||||||
|
TestMode bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewB2Client(accountID string, applicationKey string) *B2Client {
|
func NewB2Client(accountID string, applicationKey string) *B2Client {
|
||||||
client := &B2Client{
|
client := &B2Client{
|
||||||
HTTPClient: http.DefaultClient,
|
HTTPClient: http.DefaultClient,
|
||||||
AccountID: accountID,
|
AccountID: accountID,
|
||||||
ApplicationKey: applicationKey,
|
ApplicationKey: applicationKey,
|
||||||
}
|
}
|
||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) retry(backoff int, response *http.Response) int {
|
func (client *B2Client) retry(backoff int, response *http.Response) int {
|
||||||
if response != nil {
|
if response != nil {
|
||||||
if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 {
|
if backoffList, found := response.Header["Retry-After"]; found && len(backoffList) > 0 {
|
||||||
retryAfter, _ := strconv.Atoi(backoffList[0])
|
retryAfter, _ := strconv.Atoi(backoffList[0])
|
||||||
if retryAfter >= 1 {
|
if retryAfter >= 1 {
|
||||||
time.Sleep(time.Duration(retryAfter) * time.Second)
|
time.Sleep(time.Duration(retryAfter) * time.Second)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if backoff == 0 {
|
if backoff == 0 {
|
||||||
backoff = 1
|
backoff = 1
|
||||||
} else {
|
} else {
|
||||||
backoff *= 2
|
backoff *= 2
|
||||||
}
|
}
|
||||||
time.Sleep(time.Duration(backoff) * time.Second)
|
time.Sleep(time.Duration(backoff) * time.Second)
|
||||||
return backoff
|
return backoff
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) call(url string, input interface{}) (io.ReadCloser, int64, error) {
|
func (client *B2Client) call(url string, input interface{}) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
var response *http.Response
|
var response *http.Response
|
||||||
|
|
||||||
backoff := 0
|
backoff := 0
|
||||||
for i := 0; i < 8; i++ {
|
for i := 0; i < 8; i++ {
|
||||||
var inputReader *bytes.Reader
|
var inputReader *bytes.Reader
|
||||||
method := "POST"
|
method := "POST"
|
||||||
|
|
||||||
switch input.(type) {
|
switch input.(type) {
|
||||||
default:
|
default:
|
||||||
jsonInput, err := json.Marshal(input)
|
jsonInput, err := json.Marshal(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
inputReader = bytes.NewReader(jsonInput)
|
inputReader = bytes.NewReader(jsonInput)
|
||||||
case []byte:
|
case []byte:
|
||||||
inputReader = bytes.NewReader(input.([]byte))
|
inputReader = bytes.NewReader(input.([]byte))
|
||||||
case int:
|
case int:
|
||||||
method = "GET"
|
method = "GET"
|
||||||
inputReader = bytes.NewReader([]byte(""))
|
inputReader = bytes.NewReader([]byte(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
request, err := http.NewRequest(method, url, inputReader)
|
request, err := http.NewRequest(method, url, inputReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if url == B2AuthorizationURL {
|
if url == B2AuthorizationURL {
|
||||||
request.Header.Set("Authorization", "Basic " + base64.StdEncoding.EncodeToString([]byte(client.AccountID + ":" + client.ApplicationKey)))
|
request.Header.Set("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(client.AccountID+":"+client.ApplicationKey)))
|
||||||
} else {
|
} else {
|
||||||
request.Header.Set("Authorization", client.AuthorizationToken)
|
request.Header.Set("Authorization", client.AuthorizationToken)
|
||||||
}
|
}
|
||||||
|
|
||||||
if client.TestMode {
|
if client.TestMode {
|
||||||
r := rand.Float32()
|
r := rand.Float32()
|
||||||
if r < 0.5 {
|
if r < 0.5 {
|
||||||
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
|
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
|
||||||
} else {
|
} else {
|
||||||
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
|
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err = client.HTTPClient.Do(request)
|
response, err = client.HTTPClient.Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if url != B2AuthorizationURL {
|
if url != B2AuthorizationURL {
|
||||||
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned an error: %v", url, err)
|
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned an error: %v", url, err)
|
||||||
backoff = client.retry(backoff, response)
|
backoff = client.retry(backoff, response)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.StatusCode < 300 {
|
if response.StatusCode < 300 {
|
||||||
return response.Body, response.ContentLength, nil
|
return response.Body, response.ContentLength, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
|
LOG_DEBUG("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
|
||||||
|
|
||||||
io.Copy(ioutil.Discard, response.Body)
|
io.Copy(ioutil.Discard, response.Body)
|
||||||
response.Body.Close()
|
response.Body.Close()
|
||||||
if response.StatusCode == 401 {
|
if response.StatusCode == 401 {
|
||||||
if url == B2AuthorizationURL {
|
if url == B2AuthorizationURL {
|
||||||
return nil, 0, fmt.Errorf("Authorization failure")
|
return nil, 0, fmt.Errorf("Authorization failure")
|
||||||
}
|
}
|
||||||
client.AuthorizeAccount()
|
client.AuthorizeAccount()
|
||||||
continue
|
continue
|
||||||
} else if response.StatusCode == 403 {
|
} else if response.StatusCode == 403 {
|
||||||
if !client.TestMode {
|
if !client.TestMode {
|
||||||
return nil, 0, fmt.Errorf("B2 cap exceeded")
|
return nil, 0, fmt.Errorf("B2 cap exceeded")
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
} else if response.StatusCode == 429 || response.StatusCode == 408 {
|
} else if response.StatusCode == 429 || response.StatusCode == 408 {
|
||||||
backoff = client.retry(backoff, response)
|
backoff = client.retry(backoff, response)
|
||||||
continue
|
continue
|
||||||
} else if response.StatusCode >= 500 && response.StatusCode <= 599 {
|
} else if response.StatusCode >= 500 && response.StatusCode <= 599 {
|
||||||
backoff = client.retry(backoff, response)
|
backoff = client.retry(backoff, response)
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
LOG_INFO("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
|
LOG_INFO("BACKBLAZE_CALL", "URL request '%s' returned status code %d", url, response.StatusCode)
|
||||||
backoff = client.retry(backoff, response)
|
backoff = client.retry(backoff, response)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
|
|
||||||
e := &B2Error {
|
e := &B2Error{}
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
|
if err := json.NewDecoder(response.Body).Decode(e); err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, 0, e
|
return nil, 0, e
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, 0, fmt.Errorf("Maximum backoff reached")
|
return nil, 0, fmt.Errorf("Maximum backoff reached")
|
||||||
}
|
}
|
||||||
|
|
||||||
type B2AuthorizeAccountOutput struct {
|
type B2AuthorizeAccountOutput struct {
|
||||||
AccountID string
|
AccountID string
|
||||||
AuthorizationToken string
|
AuthorizationToken string
|
||||||
APIURL string
|
APIURL string
|
||||||
DownloadURL string
|
DownloadURL string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) AuthorizeAccount() (err error) {
|
func (client *B2Client) AuthorizeAccount() (err error) {
|
||||||
|
|
||||||
readCloser, _, err := client.call(B2AuthorizationURL, make(map[string]string))
|
readCloser, _, err := client.call(B2AuthorizationURL, make(map[string]string))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := &B2AuthorizeAccountOutput {}
|
output := &B2AuthorizeAccountOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
client.AuthorizationToken = output.AuthorizationToken
|
client.AuthorizationToken = output.AuthorizationToken
|
||||||
client.APIURL = output.APIURL
|
client.APIURL = output.APIURL
|
||||||
client.DownloadURL = output.DownloadURL
|
client.DownloadURL = output.DownloadURL
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type ListBucketOutput struct {
|
type ListBucketOutput struct {
|
||||||
AccoundID string
|
AccoundID string
|
||||||
BucketID string
|
BucketID string
|
||||||
BucketName string
|
BucketName string
|
||||||
BucketType string
|
BucketType string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) FindBucket(bucketName string) (err error) {
|
func (client *B2Client) FindBucket(bucketName string) (err error) {
|
||||||
|
|
||||||
input := make(map[string]string)
|
input := make(map[string]string)
|
||||||
input["accountId"] = client.AccountID
|
input["accountId"] = client.AccountID
|
||||||
|
|
||||||
url := client.APIURL + "/b2api/v1/b2_list_buckets"
|
url := client.APIURL + "/b2api/v1/b2_list_buckets"
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, input)
|
readCloser, _, err := client.call(url, input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := make(map[string][]ListBucketOutput, 0)
|
output := make(map[string][]ListBucketOutput, 0)
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, bucket := range output["buckets"] {
|
for _, bucket := range output["buckets"] {
|
||||||
if bucket.BucketName == bucketName {
|
if bucket.BucketName == bucketName {
|
||||||
client.BucketName = bucket.BucketName
|
client.BucketName = bucket.BucketName
|
||||||
client.BucketID = bucket.BucketID
|
client.BucketID = bucket.BucketID
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if client.BucketID == "" {
|
if client.BucketID == "" {
|
||||||
return fmt.Errorf("Bucket %s not found", bucketName)
|
return fmt.Errorf("Bucket %s not found", bucketName)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type B2Entry struct {
|
type B2Entry struct {
|
||||||
FileID string
|
FileID string
|
||||||
FileName string
|
FileName string
|
||||||
Action string
|
Action string
|
||||||
Size int64
|
Size int64
|
||||||
UploadTimestamp int64
|
UploadTimestamp int64
|
||||||
}
|
}
|
||||||
|
|
||||||
type B2ListFileNamesOutput struct {
|
type B2ListFileNamesOutput struct {
|
||||||
Files []*B2Entry
|
Files []*B2Entry
|
||||||
NextFileName string
|
NextFileName string
|
||||||
NextFileId string
|
NextFileId string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) ListFileNames(startFileName string, singleFile bool, includeVersions bool) (files []*B2Entry, err error) {
|
func (client *B2Client) ListFileNames(startFileName string, singleFile bool, includeVersions bool) (files []*B2Entry, err error) {
|
||||||
|
|
||||||
maxFileCount := 1000
|
maxFileCount := 1000
|
||||||
if singleFile {
|
if singleFile {
|
||||||
if includeVersions {
|
if includeVersions {
|
||||||
maxFileCount = 4
|
maxFileCount = 4
|
||||||
if client.TestMode {
|
if client.TestMode {
|
||||||
maxFileCount = 1
|
maxFileCount = 1
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
maxFileCount = 1
|
maxFileCount = 1
|
||||||
}
|
}
|
||||||
} else if client.TestMode {
|
} else if client.TestMode {
|
||||||
maxFileCount = 10
|
maxFileCount = 10
|
||||||
}
|
}
|
||||||
|
|
||||||
input := make(map[string]interface{})
|
input := make(map[string]interface{})
|
||||||
input["bucketId"] = client.BucketID
|
input["bucketId"] = client.BucketID
|
||||||
input["startFileName"] = startFileName
|
input["startFileName"] = startFileName
|
||||||
input["maxFileCount"] = maxFileCount
|
input["maxFileCount"] = maxFileCount
|
||||||
|
|
||||||
for {
|
for {
|
||||||
url := client.APIURL + "/b2api/v1/b2_list_file_names"
|
url := client.APIURL + "/b2api/v1/b2_list_file_names"
|
||||||
if includeVersions {
|
if includeVersions {
|
||||||
url = client.APIURL + "/b2api/v1/b2_list_file_versions"
|
url = client.APIURL + "/b2api/v1/b2_list_file_versions"
|
||||||
}
|
}
|
||||||
readCloser, _, err := client.call(url, input)
|
readCloser, _, err := client.call(url, input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := B2ListFileNamesOutput {
|
output := B2ListFileNamesOutput{}
|
||||||
}
|
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
ioutil.ReadAll(readCloser)
|
ioutil.ReadAll(readCloser)
|
||||||
|
|
||||||
if startFileName == "" {
|
if startFileName == "" {
|
||||||
files = append(files, output.Files...)
|
files = append(files, output.Files...)
|
||||||
} else {
|
} else {
|
||||||
for _, file := range output.Files {
|
for _, file := range output.Files {
|
||||||
if singleFile {
|
if singleFile {
|
||||||
if file.FileName == startFileName {
|
if file.FileName == startFileName {
|
||||||
files = append(files, file)
|
files = append(files, file)
|
||||||
if !includeVersions {
|
if !includeVersions {
|
||||||
output.NextFileName = ""
|
output.NextFileName = ""
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
output.NextFileName = ""
|
output.NextFileName = ""
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if strings.HasPrefix(file.FileName, startFileName) {
|
if strings.HasPrefix(file.FileName, startFileName) {
|
||||||
files = append(files, file)
|
files = append(files, file)
|
||||||
} else {
|
} else {
|
||||||
output.NextFileName = ""
|
output.NextFileName = ""
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(output.NextFileName) == 0 {
|
if len(output.NextFileName) == 0 {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
input["startFileName"] = output.NextFileName
|
input["startFileName"] = output.NextFileName
|
||||||
if includeVersions {
|
if includeVersions {
|
||||||
input["startFileId"] = output.NextFileId
|
input["startFileId"] = output.NextFileId
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return files, nil
|
return files, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) DeleteFile(fileName string, fileID string) (err error) {
|
func (client *B2Client) DeleteFile(fileName string, fileID string) (err error) {
|
||||||
|
|
||||||
input := make(map[string]string)
|
input := make(map[string]string)
|
||||||
input["fileName"] = fileName
|
input["fileName"] = fileName
|
||||||
input["fileId"] = fileID
|
input["fileId"] = fileID
|
||||||
|
|
||||||
url := client.APIURL + "/b2api/v1/b2_delete_file_version"
|
url := client.APIURL + "/b2api/v1/b2_delete_file_version"
|
||||||
readCloser, _, err := client.call(url, input)
|
readCloser, _, err := client.call(url, input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type B2HideFileOutput struct {
|
type B2HideFileOutput struct {
|
||||||
FileID string
|
FileID string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) HideFile(fileName string) (fileID string, err error) {
|
func (client *B2Client) HideFile(fileName string) (fileID string, err error) {
|
||||||
|
|
||||||
input := make(map[string]string)
|
input := make(map[string]string)
|
||||||
input["bucketId"] = client.BucketID
|
input["bucketId"] = client.BucketID
|
||||||
input["fileName"] = fileName
|
input["fileName"] = fileName
|
||||||
|
|
||||||
url := client.APIURL + "/b2api/v1/b2_hide_file"
|
url := client.APIURL + "/b2api/v1/b2_hide_file"
|
||||||
readCloser, _, err := client.call(url, input)
|
readCloser, _, err := client.call(url, input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := & B2HideFileOutput {}
|
output := &B2HideFileOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
return output.FileID, nil
|
return output.FileID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) DownloadFile(filePath string) (io.ReadCloser, int64, error) {
|
func (client *B2Client) DownloadFile(filePath string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
url := client.DownloadURL + "/file/" + client.BucketName + "/" + filePath
|
url := client.DownloadURL + "/file/" + client.BucketName + "/" + filePath
|
||||||
|
|
||||||
return client.call(url, 0)
|
return client.call(url, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
type B2GetUploadArgumentOutput struct {
|
type B2GetUploadArgumentOutput struct {
|
||||||
BucketID string
|
BucketID string
|
||||||
UploadURL string
|
UploadURL string
|
||||||
AuthorizationToken string
|
AuthorizationToken string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) getUploadURL() (error) {
|
func (client *B2Client) getUploadURL() error {
|
||||||
input := make(map[string]string)
|
input := make(map[string]string)
|
||||||
input["bucketId"] = client.BucketID
|
input["bucketId"] = client.BucketID
|
||||||
|
|
||||||
url := client.APIURL + "/b2api/v1/b2_get_upload_url"
|
url := client.APIURL + "/b2api/v1/b2_get_upload_url"
|
||||||
readCloser, _, err := client.call(url, input)
|
readCloser, _, err := client.call(url, input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := & B2GetUploadArgumentOutput {}
|
output := &B2GetUploadArgumentOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
client.UploadURL = output.UploadURL
|
client.UploadURL = output.UploadURL
|
||||||
client.UploadToken = output.AuthorizationToken
|
client.UploadToken = output.AuthorizationToken
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit int) (err error) {
|
func (client *B2Client) UploadFile(filePath string, content []byte, rateLimit int) (err error) {
|
||||||
|
|
||||||
|
hasher := sha1.New()
|
||||||
|
hasher.Write(content)
|
||||||
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
hasher := sha1.New()
|
headers := make(map[string]string)
|
||||||
hasher.Write(content)
|
headers["X-Bz-File-Name"] = filePath
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
headers["Content-Type"] = "application/octet-stream"
|
||||||
|
headers["X-Bz-Content-Sha1"] = hash
|
||||||
|
|
||||||
headers := make(map[string]string)
|
var response *http.Response
|
||||||
headers["X-Bz-File-Name"] = filePath
|
|
||||||
headers["Content-Type"] = "application/octet-stream"
|
|
||||||
headers["X-Bz-Content-Sha1"] = hash
|
|
||||||
|
|
||||||
var response *http.Response
|
backoff := 0
|
||||||
|
for i := 0; i < 8; i++ {
|
||||||
|
|
||||||
backoff := 0
|
if client.UploadURL == "" || client.UploadToken == "" {
|
||||||
for i := 0; i < 8; i++ {
|
err = client.getUploadURL()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if client.UploadURL == "" || client.UploadToken == "" {
|
request, err := http.NewRequest("POST", client.UploadURL, CreateRateLimitedReader(content, rateLimit))
|
||||||
err = client.getUploadURL()
|
if err != nil {
|
||||||
if err != nil {
|
return err
|
||||||
return err
|
}
|
||||||
}
|
request.ContentLength = int64(len(content))
|
||||||
}
|
|
||||||
|
|
||||||
request, err := http.NewRequest("POST", client.UploadURL, CreateRateLimitedReader(content, rateLimit))
|
request.Header.Set("Authorization", client.UploadToken)
|
||||||
if err != nil {
|
request.Header.Set("X-Bz-File-Name", filePath)
|
||||||
return err
|
request.Header.Set("Content-Type", "application/octet-stream")
|
||||||
}
|
request.Header.Set("X-Bz-Content-Sha1", hash)
|
||||||
request.ContentLength = int64(len(content))
|
|
||||||
|
|
||||||
request.Header.Set("Authorization", client.UploadToken)
|
for key, value := range headers {
|
||||||
request.Header.Set("X-Bz-File-Name", filePath)
|
request.Header.Set(key, value)
|
||||||
request.Header.Set("Content-Type", "application/octet-stream")
|
}
|
||||||
request.Header.Set("X-Bz-Content-Sha1", hash)
|
|
||||||
|
|
||||||
for key, value := range headers {
|
if client.TestMode {
|
||||||
request.Header.Set(key, value)
|
r := rand.Float32()
|
||||||
}
|
if r < 0.8 {
|
||||||
|
request.Header.Set("X-Bz-Test-Mode", "fail_some_uploads")
|
||||||
|
} else if r < 0.9 {
|
||||||
|
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
|
||||||
|
} else {
|
||||||
|
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if client.TestMode {
|
response, err = client.HTTPClient.Do(request)
|
||||||
r := rand.Float32()
|
if err != nil {
|
||||||
if r < 0.8 {
|
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned an error: %v", client.UploadURL, err)
|
||||||
request.Header.Set("X-Bz-Test-Mode", "fail_some_uploads")
|
backoff = client.retry(backoff, response)
|
||||||
} else if r < 0.9 {
|
client.UploadURL = ""
|
||||||
request.Header.Set("X-Bz-Test-Mode", "expire_some_account_authorization_tokens")
|
client.UploadToken = ""
|
||||||
} else {
|
continue
|
||||||
request.Header.Set("X-Bz-Test-Mode", "force_cap_exceeded")
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err = client.HTTPClient.Do(request)
|
io.Copy(ioutil.Discard, response.Body)
|
||||||
if err != nil {
|
response.Body.Close()
|
||||||
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned an error: %v", client.UploadURL, err)
|
|
||||||
backoff = client.retry(backoff, response)
|
|
||||||
client.UploadURL = ""
|
|
||||||
client.UploadToken = ""
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
io.Copy(ioutil.Discard, response.Body)
|
if response.StatusCode < 300 {
|
||||||
response.Body.Close()
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
if response.StatusCode < 300 {
|
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG_DEBUG("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
|
if response.StatusCode == 401 {
|
||||||
|
LOG_INFO("BACKBLAZE_UPLOAD", "Re-authorizatoin required")
|
||||||
|
client.UploadURL = ""
|
||||||
|
client.UploadToken = ""
|
||||||
|
continue
|
||||||
|
} else if response.StatusCode == 403 {
|
||||||
|
if !client.TestMode {
|
||||||
|
return fmt.Errorf("B2 cap exceeded")
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
LOG_INFO("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
|
||||||
|
backoff = client.retry(backoff, response)
|
||||||
|
client.UploadURL = ""
|
||||||
|
client.UploadToken = ""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if response.StatusCode == 401 {
|
return fmt.Errorf("Maximum backoff reached")
|
||||||
LOG_INFO("BACKBLAZE_UPLOAD", "Re-authorizatoin required")
|
|
||||||
client.UploadURL = ""
|
|
||||||
client.UploadToken = ""
|
|
||||||
continue
|
|
||||||
} else if response.StatusCode == 403 {
|
|
||||||
if !client.TestMode {
|
|
||||||
return fmt.Errorf("B2 cap exceeded")
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
LOG_INFO("BACKBLAZE_UPLOAD", "URL request '%s' returned status code %d", client.UploadURL, response.StatusCode)
|
|
||||||
backoff = client.retry(backoff, response)
|
|
||||||
client.UploadURL = ""
|
|
||||||
client.UploadToken = ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Errorf("Maximum backoff reached")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,129 +5,129 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"crypto/sha256"
|
||||||
"crypto/sha256"
|
"encoding/hex"
|
||||||
"encoding/hex"
|
"encoding/json"
|
||||||
"encoding/json"
|
"testing"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"io"
|
||||||
"io"
|
"io/ioutil"
|
||||||
"io/ioutil"
|
"math/rand"
|
||||||
)
|
)
|
||||||
|
|
||||||
func createB2ClientForTest(t *testing.T) (*B2Client, string) {
|
func createB2ClientForTest(t *testing.T) (*B2Client, string) {
|
||||||
config, err := ioutil.ReadFile("test_storage.conf")
|
config, err := ioutil.ReadFile("test_storage.conf")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to read config file: %v", err)
|
t.Errorf("Failed to read config file: %v", err)
|
||||||
return nil, ""
|
return nil, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
storages := make(map[string]map[string]string)
|
storages := make(map[string]map[string]string)
|
||||||
|
|
||||||
err = json.Unmarshal(config, &storages)
|
err = json.Unmarshal(config, &storages)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to parse config file: %v", err)
|
t.Errorf("Failed to parse config file: %v", err)
|
||||||
return nil, ""
|
return nil, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
b2, found := storages["b2"]
|
b2, found := storages["b2"]
|
||||||
if !found {
|
if !found {
|
||||||
t.Errorf("Failed to find b2 config")
|
t.Errorf("Failed to find b2 config")
|
||||||
return nil, ""
|
return nil, ""
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewB2Client(b2["account"], b2["key"]), b2["bucket"]
|
return NewB2Client(b2["account"], b2["key"]), b2["bucket"]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestB2Client(t *testing.T) {
|
func TestB2Client(t *testing.T) {
|
||||||
|
|
||||||
b2Client, bucket := createB2ClientForTest(t)
|
b2Client, bucket := createB2ClientForTest(t)
|
||||||
if b2Client == nil {
|
if b2Client == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
b2Client.TestMode = true
|
b2Client.TestMode = true
|
||||||
|
|
||||||
err := b2Client.AuthorizeAccount()
|
err := b2Client.AuthorizeAccount()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to authorize the b2 account: %v", err)
|
t.Errorf("Failed to authorize the b2 account: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = b2Client.FindBucket(bucket)
|
err = b2Client.FindBucket(bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to find bucket '%s': %v", bucket, err)
|
t.Errorf("Failed to find bucket '%s': %v", bucket, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
testDirectory := "b2client_test/"
|
testDirectory := "b2client_test/"
|
||||||
|
|
||||||
files, err := b2Client.ListFileNames(testDirectory, false, false)
|
files, err := b2Client.ListFileNames(testDirectory, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list files: %v", err)
|
t.Errorf("Failed to list files: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
err = b2Client.DeleteFile(file.FileName, file.FileID)
|
err = b2Client.DeleteFile(file.FileName, file.FileID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
|
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
maxSize := 10000
|
maxSize := 10000
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 20; i++ {
|
||||||
size := rand.Int() % maxSize + 1
|
size := rand.Int()%maxSize + 1
|
||||||
content := make([]byte, size)
|
content := make([]byte, size)
|
||||||
_, err := crypto_rand.Read(content)
|
_, err := crypto_rand.Read(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error generating random content: %v", err)
|
t.Errorf("Error generating random content: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hash := sha256.Sum256(content)
|
hash := sha256.Sum256(content)
|
||||||
name := hex.EncodeToString(hash[:])
|
name := hex.EncodeToString(hash[:])
|
||||||
|
|
||||||
err = b2Client.UploadFile(testDirectory + name, content, 100)
|
err = b2Client.UploadFile(testDirectory+name, content, 100)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error uploading file '%s': %v", name, err)
|
t.Errorf("Error uploading file '%s': %v", name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
files, err = b2Client.ListFileNames(testDirectory, false, false)
|
files, err = b2Client.ListFileNames(testDirectory, false, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list files: %v", err)
|
t.Errorf("Failed to list files: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
|
|
||||||
readCloser, _, err := b2Client.DownloadFile(file.FileName)
|
readCloser, _, err := b2Client.DownloadFile(file.FileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error downloading file '%s': %v", file.FileName, err)
|
t.Errorf("Error downloading file '%s': %v", file.FileName, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
hasher := sha256.New()
|
hasher := sha256.New()
|
||||||
_, err = io.Copy(hasher, readCloser)
|
_, err = io.Copy(hasher, readCloser)
|
||||||
|
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
if testDirectory + hash != file.FileName {
|
if testDirectory+hash != file.FileName {
|
||||||
t.Errorf("File %s has hash %s", file.FileName, hash)
|
t.Errorf("File %s has hash %s", file.FileName, hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
err = b2Client.DeleteFile(file.FileName, file.FileID)
|
err = b2Client.DeleteFile(file.FileName, file.FileID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
|
t.Errorf("Failed to delete file '%s': %v", file.FileName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,251 +5,251 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
type B2Storage struct {
|
type B2Storage struct {
|
||||||
RateLimitedStorage
|
RateLimitedStorage
|
||||||
|
|
||||||
clients []*B2Client
|
clients []*B2Client
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateB2Storage creates a B2 storage object.
|
// CreateB2Storage creates a B2 storage object.
|
||||||
func CreateB2Storage(accountID string, applicationKey string, bucket string, threads int) (storage *B2Storage, err error) {
|
func CreateB2Storage(accountID string, applicationKey string, bucket string, threads int) (storage *B2Storage, err error) {
|
||||||
|
|
||||||
var clients []*B2Client
|
var clients []*B2Client
|
||||||
|
|
||||||
for i := 0; i < threads; i++ {
|
for i := 0; i < threads; i++ {
|
||||||
client := NewB2Client(accountID, applicationKey)
|
client := NewB2Client(accountID, applicationKey)
|
||||||
|
|
||||||
err = client.AuthorizeAccount()
|
err = client.AuthorizeAccount()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.FindBucket(bucket)
|
err = client.FindBucket(bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
clients = append(clients, client)
|
clients = append(clients, client)
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &B2Storage {
|
storage = &B2Storage{
|
||||||
clients: clients,
|
clients: clients,
|
||||||
}
|
}
|
||||||
return storage, nil
|
return storage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
length := len(dir) + 1
|
length := len(dir) + 1
|
||||||
|
|
||||||
includeVersions := false
|
includeVersions := false
|
||||||
if dir == "chunks" {
|
if dir == "chunks" {
|
||||||
includeVersions = true
|
includeVersions = true
|
||||||
}
|
}
|
||||||
|
|
||||||
entries, err := storage.clients[threadIndex].ListFileNames(dir, false, includeVersions)
|
entries, err := storage.clients[threadIndex].ListFileNames(dir, false, includeVersions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "snapshots" {
|
if dir == "snapshots" {
|
||||||
|
|
||||||
subDirs := make(map[string]bool)
|
subDirs := make(map[string]bool)
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
name := entry.FileName[length:]
|
name := entry.FileName[length:]
|
||||||
subDir := strings.Split(name, "/")[0]
|
subDir := strings.Split(name, "/")[0]
|
||||||
subDirs[subDir + "/"] = true
|
subDirs[subDir+"/"] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
for subDir, _ := range subDirs {
|
for subDir, _ := range subDirs {
|
||||||
files = append(files, subDir)
|
files = append(files, subDir)
|
||||||
}
|
}
|
||||||
} else if dir == "chunks" {
|
} else if dir == "chunks" {
|
||||||
lastFile := ""
|
lastFile := ""
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.FileName == lastFile {
|
if entry.FileName == lastFile {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
lastFile = entry.FileName
|
lastFile = entry.FileName
|
||||||
if entry.Action == "hide" {
|
if entry.Action == "hide" {
|
||||||
files = append(files, entry.FileName[length:] + ".fsl")
|
files = append(files, entry.FileName[length:]+".fsl")
|
||||||
} else {
|
} else {
|
||||||
files = append(files, entry.FileName[length:])
|
files = append(files, entry.FileName[length:])
|
||||||
}
|
}
|
||||||
sizes = append(sizes, entry.Size)
|
sizes = append(sizes, entry.Size)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
files = append(files, entry.FileName[length:])
|
files = append(files, entry.FileName[length:])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return files, sizes, nil
|
return files, sizes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *B2Storage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
|
|
||||||
if strings.HasSuffix(filePath, ".fsl") {
|
if strings.HasSuffix(filePath, ".fsl") {
|
||||||
filePath = filePath[:len(filePath) - len(".fsl")]
|
filePath = filePath[:len(filePath)-len(".fsl")]
|
||||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
|
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
toBeDeleted := false
|
toBeDeleted := false
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.FileName != filePath || (!toBeDeleted && entry.Action != "hide" ) {
|
if entry.FileName != filePath || (!toBeDeleted && entry.Action != "hide") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
toBeDeleted = true
|
toBeDeleted = true
|
||||||
|
|
||||||
err = storage.clients[threadIndex].DeleteFile(filePath, entry.FileID)
|
err = storage.clients[threadIndex].DeleteFile(filePath, entry.FileID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, false)
|
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(entries) == 0 {
|
if len(entries) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
|
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *B2Storage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *B2Storage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
|
||||||
filePath := ""
|
filePath := ""
|
||||||
|
|
||||||
if strings.HasSuffix(from, ".fsl") {
|
if strings.HasSuffix(from, ".fsl") {
|
||||||
filePath = to
|
filePath = to
|
||||||
if from != to + ".fsl" {
|
if from != to+".fsl" {
|
||||||
filePath = ""
|
filePath = ""
|
||||||
}
|
}
|
||||||
} else if strings.HasSuffix(to, ".fsl") {
|
} else if strings.HasSuffix(to, ".fsl") {
|
||||||
filePath = from
|
filePath = from
|
||||||
if to != from + ".fsl" {
|
if to != from+".fsl" {
|
||||||
filePath = ""
|
filePath = ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if filePath == "" {
|
if filePath == "" {
|
||||||
LOG_FATAL("STORAGE_MOVE", "Moving file '%s' to '%s' is not supported", from, to)
|
LOG_FATAL("STORAGE_MOVE", "Moving file '%s' to '%s' is not supported", from, to)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if filePath == from {
|
if filePath == from {
|
||||||
_, err = storage.clients[threadIndex].HideFile(from)
|
_, err = storage.clients[threadIndex].HideFile(from)
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
|
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if len(entries) == 0 || entries[0].FileName != filePath || entries[0].Action != "hide" {
|
if len(entries) == 0 || entries[0].FileName != filePath || entries[0].Action != "hide" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
|
return storage.clients[threadIndex].DeleteFile(filePath, entries[0].FileID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *B2Storage) CreateDirectory(threadIndex int, dir string) (err error) {
|
func (storage *B2Storage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *B2Storage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
isFossil := false
|
isFossil := false
|
||||||
if strings.HasSuffix(filePath, ".fsl") {
|
if strings.HasSuffix(filePath, ".fsl") {
|
||||||
isFossil = true
|
isFossil = true
|
||||||
filePath = filePath[:len(filePath) - len(".fsl")]
|
filePath = filePath[:len(filePath)-len(".fsl")]
|
||||||
}
|
}
|
||||||
|
|
||||||
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, isFossil)
|
entries, err := storage.clients[threadIndex].ListFileNames(filePath, true, isFossil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, false, 0, err
|
return false, false, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(entries) == 0 || entries[0].FileName != filePath {
|
if len(entries) == 0 || entries[0].FileName != filePath {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if isFossil {
|
if isFossil {
|
||||||
if entries[0].Action == "hide" {
|
if entries[0].Action == "hide" {
|
||||||
return true, false, entries[0].Size, nil
|
return true, false, entries[0].Size, nil
|
||||||
} else {
|
} else {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true, false, entries[0].Size, nil
|
return true, false, entries[0].Size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||||
// the suffix '.fsl'.
|
// the suffix '.fsl'.
|
||||||
func (storage *B2Storage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
func (storage *B2Storage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||||
filePath = "chunks/" + chunkID
|
filePath = "chunks/" + chunkID
|
||||||
if isFossil {
|
if isFossil {
|
||||||
filePath += ".fsl"
|
filePath += ".fsl"
|
||||||
}
|
}
|
||||||
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
||||||
return filePath, exist, size, err
|
return filePath, exist, size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *B2Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
readCloser, _, err := storage.clients[threadIndex].DownloadFile(filePath)
|
readCloser, _, err := storage.clients[threadIndex].DownloadFile(filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.clients))
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.clients))
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *B2Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
return storage.clients[threadIndex].UploadFile(filePath, content, storage.UploadRateLimit / len(storage.clients))
|
return storage.clients[threadIndex].UploadFile(filePath, content, storage.UploadRateLimit/len(storage.clients))
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *B2Storage) IsCacheNeeded() (bool) { return true }
|
func (storage *B2Storage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *B2Storage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *B2Storage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *B2Storage) IsStrongConsistent() (bool) { return true }
|
func (storage *B2Storage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *B2Storage) IsFastListing() (bool) { return true }
|
func (storage *B2Storage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *B2Storage) EnableTestMode() {
|
func (storage *B2Storage) EnableTestMode() {
|
||||||
for _, client := range storage.clients {
|
for _, client := range storage.clients {
|
||||||
client.TestMode = true
|
client.TestMode = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -5,339 +5,338 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
crypto_rand "crypto/rand"
|
||||||
"io"
|
"crypto/sha256"
|
||||||
"path"
|
"encoding/hex"
|
||||||
"testing"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"encoding/hex"
|
"os"
|
||||||
"time"
|
"path"
|
||||||
"crypto/sha256"
|
"testing"
|
||||||
crypto_rand "crypto/rand"
|
"time"
|
||||||
|
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
)
|
)
|
||||||
|
|
||||||
func createRandomFile(path string, maxSize int) {
|
func createRandomFile(path string, maxSize int) {
|
||||||
file, err := os.OpenFile(path, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0644)
|
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("RANDOM_FILE", "Can't open %s for writing: %v", path, err)
|
LOG_ERROR("RANDOM_FILE", "Can't open %s for writing: %v", path, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
size := maxSize / 2 + rand.Int() % (maxSize / 2)
|
size := maxSize/2 + rand.Int()%(maxSize/2)
|
||||||
|
|
||||||
buffer := make([]byte, 32 * 1024)
|
buffer := make([]byte, 32*1024)
|
||||||
for size > 0 {
|
for size > 0 {
|
||||||
bytes := size
|
bytes := size
|
||||||
if bytes > cap(buffer) {
|
if bytes > cap(buffer) {
|
||||||
bytes = cap(buffer)
|
bytes = cap(buffer)
|
||||||
}
|
}
|
||||||
crypto_rand.Read(buffer[:bytes])
|
crypto_rand.Read(buffer[:bytes])
|
||||||
bytes, err = file.Write(buffer[:bytes])
|
bytes, err = file.Write(buffer[:bytes])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("RANDOM_FILE", "Failed to write to %s: %v", path, err)
|
LOG_ERROR("RANDOM_FILE", "Failed to write to %s: %v", path, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
size -= bytes
|
size -= bytes
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func modifyFile(path string, portion float32) {
|
func modifyFile(path string, portion float32) {
|
||||||
|
|
||||||
stat, err := os.Stat(path)
|
stat, err := os.Stat(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("MODIFY_FILE", "Can't stat the file %s: %v", path, err)
|
LOG_ERROR("MODIFY_FILE", "Can't stat the file %s: %v", path, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
modifiedTime := stat.ModTime()
|
modifiedTime := stat.ModTime()
|
||||||
|
|
||||||
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
|
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("MODIFY_FILE", "Can't open %s for writing: %v", path, err)
|
LOG_ERROR("MODIFY_FILE", "Can't open %s for writing: %v", path, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if file != nil {
|
if file != nil {
|
||||||
file.Close()
|
file.Close()
|
||||||
}
|
}
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
size, err := file.Seek(0, 2)
|
size, err := file.Seek(0, 2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("MODIFY_FILE", "Can't seek to the end of the file %s: %v", path, err)
|
LOG_ERROR("MODIFY_FILE", "Can't seek to the end of the file %s: %v", path, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
length := int (float32(size) * portion)
|
length := int(float32(size) * portion)
|
||||||
start := rand.Int() % (int(size) - length)
|
start := rand.Int() % (int(size) - length)
|
||||||
|
|
||||||
_, err = file.Seek(int64(start), 0)
|
_, err = file.Seek(int64(start), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("MODIFY_FILE", "Can't seek to the offset %d: %v", start, err)
|
LOG_ERROR("MODIFY_FILE", "Can't seek to the offset %d: %v", start, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer := make([]byte, length)
|
buffer := make([]byte, length)
|
||||||
crypto_rand.Read(buffer)
|
crypto_rand.Read(buffer)
|
||||||
|
|
||||||
_, err = file.Write(buffer)
|
_, err = file.Write(buffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("MODIFY_FILE", "Failed to write to %s: %v", path, err)
|
LOG_ERROR("MODIFY_FILE", "Failed to write to %s: %v", path, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
file.Close()
|
file.Close()
|
||||||
file = nil
|
file = nil
|
||||||
|
|
||||||
// Add 2 seconds to the modified time for the changes to be detectable in quick mode.
|
// Add 2 seconds to the modified time for the changes to be detectable in quick mode.
|
||||||
modifiedTime = modifiedTime.Add(time.Second * 2)
|
modifiedTime = modifiedTime.Add(time.Second * 2)
|
||||||
err = os.Chtimes(path, modifiedTime, modifiedTime)
|
err = os.Chtimes(path, modifiedTime, modifiedTime)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("MODIFY_FILE", "Failed to change the modification time of %s: %v", path, err)
|
LOG_ERROR("MODIFY_FILE", "Failed to change the modification time of %s: %v", path, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkExistence(t *testing.T, path string, exists bool, isDir bool) {
|
func checkExistence(t *testing.T, path string, exists bool, isDir bool) {
|
||||||
stat, err := os.Stat(path)
|
stat, err := os.Stat(path)
|
||||||
if exists {
|
if exists {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("%s does not exist: %v", path, err)
|
t.Errorf("%s does not exist: %v", path, err)
|
||||||
} else if isDir {
|
} else if isDir {
|
||||||
if !stat.Mode().IsDir() {
|
if !stat.Mode().IsDir() {
|
||||||
t.Errorf("%s is not a directory", path)
|
t.Errorf("%s is not a directory", path)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if stat.Mode().IsDir() {
|
if stat.Mode().IsDir() {
|
||||||
t.Errorf("%s is not a file", path)
|
t.Errorf("%s is not a file", path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if err == nil || !os.IsNotExist(err) {
|
if err == nil || !os.IsNotExist(err) {
|
||||||
t.Errorf("%s may exist: %v", path, err)
|
t.Errorf("%s may exist: %v", path, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func truncateFile(path string) {
|
func truncateFile(path string) {
|
||||||
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
|
file, err := os.OpenFile(path, os.O_WRONLY, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("TRUNCATE_FILE", "Can't open %s for writing: %v", path, err)
|
LOG_ERROR("TRUNCATE_FILE", "Can't open %s for writing: %v", path, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
oldSize, err := file.Seek(0, 2)
|
oldSize, err := file.Seek(0, 2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("TRUNCATE_FILE", "Can't seek to the end of the file %s: %v", path, err)
|
LOG_ERROR("TRUNCATE_FILE", "Can't seek to the end of the file %s: %v", path, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
newSize := rand.Int63() % oldSize
|
newSize := rand.Int63() % oldSize
|
||||||
|
|
||||||
err = file.Truncate(newSize)
|
err = file.Truncate(newSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("TRUNCATE_FILE", "Can't truncate the file %s to size %d: %v", path, newSize, err)
|
LOG_ERROR("TRUNCATE_FILE", "Can't truncate the file %s to size %d: %v", path, newSize, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFileHash(path string) (hash string) {
|
func getFileHash(path string) (hash string) {
|
||||||
|
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("FILE_HASH", "Can't open %s for reading: %v", path, err)
|
LOG_ERROR("FILE_HASH", "Can't open %s for reading: %v", path, err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
hasher := sha256.New()
|
hasher := sha256.New()
|
||||||
_, err = io.Copy(hasher, file)
|
_, err = io.Copy(hasher, file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("FILE_HASH", "Can't read file %s: %v", path, err)
|
LOG_ERROR("FILE_HASH", "Can't read file %s: %v", path, err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
return hex.EncodeToString(hasher.Sum(nil))
|
return hex.EncodeToString(hasher.Sum(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackupManager(t *testing.T) {
|
func TestBackupManager(t *testing.T) {
|
||||||
|
|
||||||
rand.Seed(time.Now().UnixNano())
|
rand.Seed(time.Now().UnixNano())
|
||||||
setTestingT(t)
|
setTestingT(t)
|
||||||
SetLoggingLevel(INFO)
|
SetLoggingLevel(INFO)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
switch e := r.(type) {
|
switch e := r.(type) {
|
||||||
case Exception:
|
case Exception:
|
||||||
t.Errorf("%s %s", e.LogID, e.Message)
|
t.Errorf("%s %s", e.LogID, e.Message)
|
||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
default:
|
default:
|
||||||
t.Errorf("%v", e)
|
t.Errorf("%v", e)
|
||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
testDir := path.Join(os.TempDir(), "duplicacy_test")
|
testDir := path.Join(os.TempDir(), "duplicacy_test")
|
||||||
os.RemoveAll(testDir)
|
os.RemoveAll(testDir)
|
||||||
os.MkdirAll(testDir, 0700)
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
os.Mkdir(testDir + "/repository1", 0700)
|
os.Mkdir(testDir+"/repository1", 0700)
|
||||||
os.Mkdir(testDir + "/repository1/dir1", 0700)
|
os.Mkdir(testDir+"/repository1/dir1", 0700)
|
||||||
os.Mkdir(testDir + "/repository1/.duplicacy", 0700)
|
os.Mkdir(testDir+"/repository1/.duplicacy", 0700)
|
||||||
os.Mkdir(testDir + "/repository2", 0700)
|
os.Mkdir(testDir+"/repository2", 0700)
|
||||||
os.Mkdir(testDir + "/repository2/.duplicacy", 0700)
|
os.Mkdir(testDir+"/repository2/.duplicacy", 0700)
|
||||||
|
|
||||||
maxFileSize := 1000000
|
maxFileSize := 1000000
|
||||||
//maxFileSize := 200000
|
//maxFileSize := 200000
|
||||||
|
|
||||||
createRandomFile(testDir + "/repository1/file1", maxFileSize)
|
createRandomFile(testDir+"/repository1/file1", maxFileSize)
|
||||||
createRandomFile(testDir + "/repository1/file2", maxFileSize)
|
createRandomFile(testDir+"/repository1/file2", maxFileSize)
|
||||||
createRandomFile(testDir + "/repository1/dir1/file3", maxFileSize)
|
createRandomFile(testDir+"/repository1/dir1/file3", maxFileSize)
|
||||||
|
|
||||||
threads := 1
|
threads := 1
|
||||||
|
|
||||||
storage, err := loadStorage(testDir + "/storage", threads)
|
storage, err := loadStorage(testDir+"/storage", threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create storage: %v", err)
|
t.Errorf("Failed to create storage: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
delay := 0
|
delay := 0
|
||||||
if _, ok := storage.(*ACDStorage); ok {
|
if _, ok := storage.(*ACDStorage); ok {
|
||||||
delay = 1
|
delay = 1
|
||||||
}
|
}
|
||||||
if _, ok := storage.(*OneDriveStorage); ok {
|
if _, ok := storage.(*OneDriveStorage); ok {
|
||||||
delay = 5
|
delay = 5
|
||||||
}
|
}
|
||||||
|
|
||||||
password := "duplicacy"
|
password := "duplicacy"
|
||||||
|
|
||||||
cleanStorage(storage)
|
cleanStorage(storage)
|
||||||
|
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
if testFixedChunkSize {
|
if testFixedChunkSize {
|
||||||
if !ConfigStorage(storage, 100, 64 * 1024, 64 * 1024, 64 * 1024, password, nil) {
|
if !ConfigStorage(storage, 100, 64*1024, 64*1024, 64*1024, password, nil) {
|
||||||
t.Errorf("Failed to initialize the storage")
|
t.Errorf("Failed to initialize the storage")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !ConfigStorage(storage, 100, 64 * 1024, 256 * 1024, 16 * 1024, password, nil) {
|
if !ConfigStorage(storage, 100, 64*1024, 256*1024, 16*1024, password, nil) {
|
||||||
t.Errorf("Failed to initialize the storage")
|
t.Errorf("Failed to initialize the storage")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
backupManager := CreateBackupManager("host1", storage, testDir, password)
|
||||||
|
backupManager.SetupSnapshotCache("default")
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
backupManager := CreateBackupManager("host1", storage, testDir, password)
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "first", false, false)
|
||||||
backupManager.SetupSnapshotCache("default")
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
|
backupManager.Restore(testDir+"/repository2", threads /*inPlace=*/, false /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
|
/*deleteMode=*/ false /*showStatistics=*/, false /*patterns=*/, nil)
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "first", false, false)
|
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
t.Errorf("File %s does not exist", f)
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
continue
|
||||||
backupManager.Restore(testDir + "/repository2", threads, /*inPlace=*/false, /*quickMode=*/false, threads, /*overwrite=*/true,
|
}
|
||||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
|
||||||
|
|
||||||
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
if _, err := os.Stat(testDir + "/repository2/" + f); os.IsNotExist(err) {
|
hash2 := getFileHash(testDir + "/repository2/" + f)
|
||||||
t.Errorf("File %s does not exist", f)
|
if hash1 != hash2 {
|
||||||
continue
|
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
modifyFile(testDir+"/repository1/file1", 0.1)
|
||||||
hash2 := getFileHash(testDir + "/repository2/" + f)
|
modifyFile(testDir+"/repository1/file2", 0.2)
|
||||||
if hash1 != hash2 {
|
modifyFile(testDir+"/repository1/dir1/file3", 0.3)
|
||||||
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
modifyFile(testDir + "/repository1/file1", 0.1)
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
modifyFile(testDir + "/repository1/file2", 0.2)
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, true, threads, "second", false, false)
|
||||||
modifyFile(testDir + "/repository1/dir1/file3", 0.3)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
|
backupManager.Restore(testDir+"/repository2", 2 /*inPlace=*/, true /*quickMode=*/, true, threads /*overwrite=*/, true,
|
||||||
|
/*deleteMode=*/ false /*showStatistics=*/, false /*patterns=*/, nil)
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/true, threads, "second", false, false)
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
hash2 := getFileHash(testDir + "/repository2/" + f)
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
if hash1 != hash2 {
|
||||||
backupManager.Restore(testDir + "/repository2", 2, /*inPlace=*/true, /*quickMode=*/true, threads, /*overwrite=*/true,
|
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/nil)
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
|
// Truncate file2 and add a few empty directories
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
truncateFile(testDir + "/repository1/file2")
|
||||||
hash2 := getFileHash(testDir + "/repository2/" + f)
|
os.Mkdir(testDir+"/repository1/dir2", 0700)
|
||||||
if hash1 != hash2 {
|
os.Mkdir(testDir+"/repository1/dir2/dir3", 0700)
|
||||||
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
os.Mkdir(testDir+"/repository1/dir4", 0700)
|
||||||
}
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
}
|
backupManager.Backup(testDir+"/repository1" /*quickMode=*/, false, threads, "third", false, false)
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
// Truncate file2 and add a few empty directories
|
// Create some directories and files under repository2 that will be deleted during restore
|
||||||
truncateFile(testDir + "/repository1/file2")
|
os.Mkdir(testDir+"/repository2/dir5", 0700)
|
||||||
os.Mkdir(testDir + "/repository1/dir2", 0700)
|
os.Mkdir(testDir+"/repository2/dir5/dir6", 0700)
|
||||||
os.Mkdir(testDir + "/repository1/dir2/dir3", 0700)
|
os.Mkdir(testDir+"/repository2/dir7", 0700)
|
||||||
os.Mkdir(testDir + "/repository1/dir4", 0700)
|
createRandomFile(testDir+"/repository2/file4", 100)
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
createRandomFile(testDir+"/repository2/dir5/file5", 100)
|
||||||
backupManager.Backup(testDir + "/repository1", /*quickMode=*/false, threads, "third", false, false)
|
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
|
||||||
|
|
||||||
// Create some directories and files under repository2 that will be deleted during restore
|
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
||||||
os.Mkdir(testDir + "/repository2/dir5", 0700)
|
backupManager.Restore(testDir+"/repository2", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
os.Mkdir(testDir + "/repository2/dir5/dir6", 0700)
|
/*deleteMode=*/ true /*showStatistics=*/, false /*patterns=*/, nil)
|
||||||
os.Mkdir(testDir + "/repository2/dir7", 0700)
|
|
||||||
createRandomFile(testDir + "/repository2/file4", 100)
|
|
||||||
createRandomFile(testDir + "/repository2/dir5/file5", 100)
|
|
||||||
|
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository2/.duplicacy")
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
backupManager.Restore(testDir + "/repository2", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
/*deleteMode=*/true, /*showStatistics=*/false, /*patterns=*/nil)
|
hash2 := getFileHash(testDir + "/repository2/" + f)
|
||||||
|
if hash1 != hash2 {
|
||||||
|
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
|
// These files/dirs should not exist because deleteMode == true
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
checkExistence(t, testDir+"/repository2/dir5", false, false)
|
||||||
hash2 := getFileHash(testDir + "/repository2/" + f)
|
checkExistence(t, testDir+"/repository2/dir5/dir6", false, false)
|
||||||
if hash1 != hash2 {
|
checkExistence(t, testDir+"/repository2/dir7", false, false)
|
||||||
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
checkExistence(t, testDir+"/repository2/file4", false, false)
|
||||||
}
|
checkExistence(t, testDir+"/repository2/dir5/file5", false, false)
|
||||||
}
|
|
||||||
|
|
||||||
// These files/dirs should not exist because deleteMode == true
|
// These empty dirs should exist
|
||||||
checkExistence(t, testDir + "/repository2/dir5", false, false);
|
checkExistence(t, testDir+"/repository2/dir2", true, true)
|
||||||
checkExistence(t, testDir + "/repository2/dir5/dir6", false, false);
|
checkExistence(t, testDir+"/repository2/dir2/dir3", true, true)
|
||||||
checkExistence(t, testDir + "/repository2/dir7", false, false);
|
checkExistence(t, testDir+"/repository2/dir4", true, true)
|
||||||
checkExistence(t, testDir + "/repository2/file4", false, false);
|
|
||||||
checkExistence(t, testDir + "/repository2/dir5/file5", false, false);
|
|
||||||
|
|
||||||
// These empty dirs should exist
|
// Remove file2 and dir1/file3 and restore them from revision 3
|
||||||
checkExistence(t, testDir + "/repository2/dir2", true, true);
|
os.Remove(testDir + "/repository1/file2")
|
||||||
checkExistence(t, testDir + "/repository2/dir2/dir3", true, true);
|
os.Remove(testDir + "/repository1/dir1/file3")
|
||||||
checkExistence(t, testDir + "/repository2/dir4", true, true);
|
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
||||||
|
backupManager.Restore(testDir+"/repository1", 3 /*inPlace=*/, true /*quickMode=*/, false, threads /*overwrite=*/, true,
|
||||||
|
/*deleteMode=*/ false /*showStatistics=*/, false /*patterns=*/, []string{"+file2", "+dir1/file3", "-*"})
|
||||||
|
|
||||||
// Remove file2 and dir1/file3 and restore them from revision 3
|
for _, f := range []string{"file1", "file2", "dir1/file3"} {
|
||||||
os.Remove(testDir + "/repository1/file2")
|
hash1 := getFileHash(testDir + "/repository1/" + f)
|
||||||
os.Remove(testDir + "/repository1/dir1/file3")
|
hash2 := getFileHash(testDir + "/repository2/" + f)
|
||||||
SetDuplicacyPreferencePath(testDir + "/repository1/.duplicacy")
|
if hash1 != hash2 {
|
||||||
backupManager.Restore(testDir + "/repository1", 3, /*inPlace=*/true, /*quickMode=*/false, threads, /*overwrite=*/true,
|
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
||||||
/*deleteMode=*/false, /*showStatistics=*/false, /*patterns=*/[]string{"+file2", "+dir1/file3", "-*"})
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, f := range []string{ "file1", "file2", "dir1/file3" } {
|
/*buf := make([]byte, 1<<16)
|
||||||
hash1 := getFileHash(testDir + "/repository1/" + f)
|
runtime.Stack(buf, true)
|
||||||
hash2 := getFileHash(testDir + "/repository2/" + f)
|
fmt.Printf("%s", buf)*/
|
||||||
if hash1 != hash2 {
|
|
||||||
t.Errorf("File %s has different hashes: %s vs %s", f, hash1, hash2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/*buf := make([]byte, 1<<16)
|
|
||||||
runtime.Stack(buf, true)
|
|
||||||
fmt.Printf("%s", buf)*/
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,59 +5,58 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"bytes"
|
||||||
"fmt"
|
"compress/zlib"
|
||||||
"hash"
|
"crypto/aes"
|
||||||
"bytes"
|
"crypto/cipher"
|
||||||
"runtime"
|
"crypto/rand"
|
||||||
"crypto/cipher"
|
"encoding/hex"
|
||||||
"crypto/aes"
|
"fmt"
|
||||||
"crypto/rand"
|
"hash"
|
||||||
"encoding/hex"
|
"io"
|
||||||
"compress/zlib"
|
"runtime"
|
||||||
|
|
||||||
"github.com/bkaradzic/go-lz4"
|
|
||||||
|
|
||||||
|
"github.com/bkaradzic/go-lz4"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
|
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
|
||||||
// we maintain a pool of previously used buffers.
|
// we maintain a pool of previously used buffers.
|
||||||
var chunkBufferPool chan *bytes.Buffer = make(chan *bytes.Buffer, runtime.NumCPU() * 16)
|
var chunkBufferPool chan *bytes.Buffer = make(chan *bytes.Buffer, runtime.NumCPU()*16)
|
||||||
|
|
||||||
func AllocateChunkBuffer() (buffer *bytes.Buffer) {
|
func AllocateChunkBuffer() (buffer *bytes.Buffer) {
|
||||||
select {
|
select {
|
||||||
case buffer = <- chunkBufferPool:
|
case buffer = <-chunkBufferPool:
|
||||||
default:
|
default:
|
||||||
buffer = new(bytes.Buffer)
|
buffer = new(bytes.Buffer)
|
||||||
}
|
}
|
||||||
return buffer
|
return buffer
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReleaseChunkBuffer(buffer *bytes.Buffer) {
|
func ReleaseChunkBuffer(buffer *bytes.Buffer) {
|
||||||
select {
|
select {
|
||||||
case chunkBufferPool <- buffer:
|
case chunkBufferPool <- buffer:
|
||||||
default:
|
default:
|
||||||
LOG_INFO("CHUNK_BUFFER", "Discarding a free chunk buffer due to a full pool")
|
LOG_INFO("CHUNK_BUFFER", "Discarding a free chunk buffer due to a full pool")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Chunk is the object being passed between the chunk maker, the chunk uploader, and chunk downloader. It can be
|
// Chunk is the object being passed between the chunk maker, the chunk uploader, and chunk downloader. It can be
|
||||||
// read and written like a bytes.Buffer, and provides convenient functions to calculate the hash and id of the chunk.
|
// read and written like a bytes.Buffer, and provides convenient functions to calculate the hash and id of the chunk.
|
||||||
type Chunk struct {
|
type Chunk struct {
|
||||||
buffer *bytes.Buffer // Where the actual data is stored. It may be nil for hash-only chunks, where chunks
|
buffer *bytes.Buffer // Where the actual data is stored. It may be nil for hash-only chunks, where chunks
|
||||||
// are only used to compute the hashes
|
// are only used to compute the hashes
|
||||||
|
|
||||||
size int // The size of data stored. This field is needed if buffer is nil
|
size int // The size of data stored. This field is needed if buffer is nil
|
||||||
|
|
||||||
hasher hash.Hash // Keeps track of the hash of data stored in the buffer. It may be nil, since sometimes
|
hasher hash.Hash // Keeps track of the hash of data stored in the buffer. It may be nil, since sometimes
|
||||||
// it isn't necessary to compute the hash, for instance, when the encrypted data is being
|
// it isn't necessary to compute the hash, for instance, when the encrypted data is being
|
||||||
// read into the primary buffer
|
// read into the primary buffer
|
||||||
|
|
||||||
hash []byte // The hash of the chunk data. It is always in the binary format
|
hash []byte // The hash of the chunk data. It is always in the binary format
|
||||||
id string // The id of the chunk data (used as the file name for saving the chunk); always in hex format
|
id string // The id of the chunk data (used as the file name for saving the chunk); always in hex format
|
||||||
|
|
||||||
config *Config // Every chunk is associated with a Config object. Which hashing algorithm to use is determined
|
config *Config // Every chunk is associated with a Config object. Which hashing algorithm to use is determined
|
||||||
// by the config
|
// by the config
|
||||||
}
|
}
|
||||||
|
|
||||||
// Magic word to identify a duplicacy format encrypted file, plus a version number.
|
// Magic word to identify a duplicacy format encrypted file, plus a version number.
|
||||||
@@ -66,205 +65,205 @@ var ENCRYPTION_HEADER = "duplicacy\000"
|
|||||||
// CreateChunk creates a new chunk.
|
// CreateChunk creates a new chunk.
|
||||||
func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
|
func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
|
||||||
|
|
||||||
var buffer *bytes.Buffer
|
var buffer *bytes.Buffer
|
||||||
|
|
||||||
if bufferNeeded {
|
if bufferNeeded {
|
||||||
buffer = AllocateChunkBuffer()
|
buffer = AllocateChunkBuffer()
|
||||||
buffer.Reset()
|
buffer.Reset()
|
||||||
if buffer.Cap() < config.MaximumChunkSize {
|
if buffer.Cap() < config.MaximumChunkSize {
|
||||||
buffer.Grow(config.MaximumChunkSize - buffer.Cap())
|
buffer.Grow(config.MaximumChunkSize - buffer.Cap())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Chunk {
|
return &Chunk{
|
||||||
buffer : buffer,
|
buffer: buffer,
|
||||||
config : config,
|
config: config,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLength returns the length of available data
|
// GetLength returns the length of available data
|
||||||
func (chunk *Chunk) GetLength() int {
|
func (chunk *Chunk) GetLength() int {
|
||||||
if chunk.buffer != nil {
|
if chunk.buffer != nil {
|
||||||
return len(chunk.buffer.Bytes())
|
return len(chunk.buffer.Bytes())
|
||||||
} else {
|
} else {
|
||||||
return chunk.size
|
return chunk.size
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBytes returns data available in this chunk
|
// GetBytes returns data available in this chunk
|
||||||
func (chunk *Chunk) GetBytes() [] byte {
|
func (chunk *Chunk) GetBytes() []byte {
|
||||||
return chunk.buffer.Bytes()
|
return chunk.buffer.Bytes()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset makes the chunk reusable by clearing the existing data in the buffers. 'hashNeeded' indicates whether the
|
// Reset makes the chunk reusable by clearing the existing data in the buffers. 'hashNeeded' indicates whether the
|
||||||
// hash of the new data to be read is needed. If the data to be read in is encrypted, there is no need to
|
// hash of the new data to be read is needed. If the data to be read in is encrypted, there is no need to
|
||||||
// calculate the hash so hashNeeded should be 'false'.
|
// calculate the hash so hashNeeded should be 'false'.
|
||||||
func (chunk *Chunk) Reset(hashNeeded bool) {
|
func (chunk *Chunk) Reset(hashNeeded bool) {
|
||||||
if chunk.buffer != nil {
|
if chunk.buffer != nil {
|
||||||
chunk.buffer.Reset()
|
chunk.buffer.Reset()
|
||||||
}
|
}
|
||||||
if hashNeeded {
|
if hashNeeded {
|
||||||
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
||||||
} else {
|
} else {
|
||||||
chunk.hasher = nil
|
chunk.hasher = nil
|
||||||
}
|
}
|
||||||
chunk.hash = nil
|
chunk.hash = nil
|
||||||
chunk.id = ""
|
chunk.id = ""
|
||||||
chunk.size = 0
|
chunk.size = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write implements the Writer interface.
|
// Write implements the Writer interface.
|
||||||
func (chunk *Chunk) Write(p []byte) (int, error){
|
func (chunk *Chunk) Write(p []byte) (int, error) {
|
||||||
|
|
||||||
// buffer may be nil, when the chunk is used for computing the hash only.
|
// buffer may be nil, when the chunk is used for computing the hash only.
|
||||||
if chunk.buffer == nil {
|
if chunk.buffer == nil {
|
||||||
chunk.size += len(p)
|
chunk.size += len(p)
|
||||||
} else {
|
} else {
|
||||||
chunk.buffer.Write(p)
|
chunk.buffer.Write(p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// hasher may be nil, when the chunk is used to stored encrypted content
|
// hasher may be nil, when the chunk is used to stored encrypted content
|
||||||
if chunk.hasher != nil {
|
if chunk.hasher != nil {
|
||||||
chunk.hasher.Write(p)
|
chunk.hasher.Write(p)
|
||||||
}
|
}
|
||||||
return len(p), nil
|
return len(p), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetHash returns the chunk hash.
|
// GetHash returns the chunk hash.
|
||||||
func (chunk *Chunk) GetHash() string {
|
func (chunk *Chunk) GetHash() string {
|
||||||
if (len(chunk.hash) == 0) {
|
if len(chunk.hash) == 0 {
|
||||||
chunk.hash = chunk.hasher.Sum(nil)
|
chunk.hash = chunk.hasher.Sum(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
return string(chunk.hash)
|
return string(chunk.hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetID returns the chunk id.
|
// GetID returns the chunk id.
|
||||||
func (chunk *Chunk) GetID() string {
|
func (chunk *Chunk) GetID() string {
|
||||||
if len(chunk.id) == 0 {
|
if len(chunk.id) == 0 {
|
||||||
if len(chunk.hash) == 0 {
|
if len(chunk.hash) == 0 {
|
||||||
chunk.hash = chunk.hasher.Sum(nil)
|
chunk.hash = chunk.hasher.Sum(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
hasher := chunk.config.NewKeyedHasher(chunk.config.IDKey)
|
hasher := chunk.config.NewKeyedHasher(chunk.config.IDKey)
|
||||||
hasher.Write([]byte(chunk.hash))
|
hasher.Write([]byte(chunk.hash))
|
||||||
chunk.id = hex.EncodeToString(hasher.Sum(nil))
|
chunk.id = hex.EncodeToString(hasher.Sum(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
return chunk.id
|
return chunk.id
|
||||||
}
|
}
|
||||||
|
|
||||||
func (chunk *Chunk) VerifyID() {
|
func (chunk *Chunk) VerifyID() {
|
||||||
hasher := chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
hasher := chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
||||||
hasher.Write(chunk.buffer.Bytes())
|
hasher.Write(chunk.buffer.Bytes())
|
||||||
hash := hasher.Sum(nil)
|
hash := hasher.Sum(nil)
|
||||||
hasher = chunk.config.NewKeyedHasher(chunk.config.IDKey)
|
hasher = chunk.config.NewKeyedHasher(chunk.config.IDKey)
|
||||||
hasher.Write([]byte(hash))
|
hasher.Write([]byte(hash))
|
||||||
chunkID := hex.EncodeToString(hasher.Sum(nil))
|
chunkID := hex.EncodeToString(hasher.Sum(nil))
|
||||||
if chunkID != chunk.GetID() {
|
if chunkID != chunk.GetID() {
|
||||||
LOG_ERROR("CHUNK_ID", "The chunk id should be %s instead of %s, length: %d", chunkID, chunk.GetID(), len(chunk.buffer.Bytes()))
|
LOG_ERROR("CHUNK_ID", "The chunk id should be %s instead of %s, length: %d", chunkID, chunk.GetID(), len(chunk.buffer.Bytes()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
|
// Encrypt encrypts the plain data stored in the chunk buffer. If derivationKey is not nil, the actual
|
||||||
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
|
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
|
||||||
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err error) {
|
func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err error) {
|
||||||
|
|
||||||
var aesBlock cipher.Block
|
var aesBlock cipher.Block
|
||||||
var gcm cipher.AEAD
|
var gcm cipher.AEAD
|
||||||
var nonce []byte
|
var nonce []byte
|
||||||
var offset int
|
var offset int
|
||||||
|
|
||||||
encryptedBuffer := AllocateChunkBuffer()
|
encryptedBuffer := AllocateChunkBuffer()
|
||||||
encryptedBuffer.Reset()
|
encryptedBuffer.Reset()
|
||||||
defer func() {
|
defer func() {
|
||||||
ReleaseChunkBuffer(encryptedBuffer)
|
ReleaseChunkBuffer(encryptedBuffer)
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
if len(encryptionKey) > 0 {
|
if len(encryptionKey) > 0 {
|
||||||
|
|
||||||
key := encryptionKey
|
key := encryptionKey
|
||||||
|
|
||||||
if len(derivationKey) > 0 {
|
if len(derivationKey) > 0 {
|
||||||
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
|
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
|
||||||
hasher.Write(encryptionKey)
|
hasher.Write(encryptionKey)
|
||||||
key = hasher.Sum(nil)
|
key = hasher.Sum(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
aesBlock, err = aes.NewCipher(key)
|
aesBlock, err = aes.NewCipher(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
gcm, err = cipher.NewGCM(aesBlock)
|
gcm, err = cipher.NewGCM(aesBlock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start with the magic number and the version number.
|
// Start with the magic number and the version number.
|
||||||
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER))
|
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER))
|
||||||
|
|
||||||
// Followed by the nonce
|
// Followed by the nonce
|
||||||
nonce = make([]byte, gcm.NonceSize())
|
nonce = make([]byte, gcm.NonceSize())
|
||||||
_, err := rand.Read(nonce)
|
_, err := rand.Read(nonce)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
encryptedBuffer.Write(nonce)
|
encryptedBuffer.Write(nonce)
|
||||||
offset = encryptedBuffer.Len()
|
offset = encryptedBuffer.Len()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// offset is either 0 or the length of header + nonce
|
// offset is either 0 or the length of header + nonce
|
||||||
|
|
||||||
if chunk.config.CompressionLevel >= -1 && chunk.config.CompressionLevel <= 9 {
|
if chunk.config.CompressionLevel >= -1 && chunk.config.CompressionLevel <= 9 {
|
||||||
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
|
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
|
||||||
deflater.Write(chunk.buffer.Bytes())
|
deflater.Write(chunk.buffer.Bytes())
|
||||||
deflater.Close()
|
deflater.Close()
|
||||||
} else if chunk.config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
} else if chunk.config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
||||||
encryptedBuffer.Write([]byte("LZ4 "))
|
encryptedBuffer.Write([]byte("LZ4 "))
|
||||||
// Make sure we have enough space in encryptedBuffer
|
// Make sure we have enough space in encryptedBuffer
|
||||||
availableLength := encryptedBuffer.Cap() - len(encryptedBuffer.Bytes())
|
availableLength := encryptedBuffer.Cap() - len(encryptedBuffer.Bytes())
|
||||||
maximumLength := lz4.CompressBound(len(chunk.buffer.Bytes()))
|
maximumLength := lz4.CompressBound(len(chunk.buffer.Bytes()))
|
||||||
if availableLength < maximumLength {
|
if availableLength < maximumLength {
|
||||||
encryptedBuffer.Grow(maximumLength - availableLength)
|
encryptedBuffer.Grow(maximumLength - availableLength)
|
||||||
}
|
}
|
||||||
written, err := lz4.Encode(encryptedBuffer.Bytes()[offset + 4:], chunk.buffer.Bytes())
|
written, err := lz4.Encode(encryptedBuffer.Bytes()[offset+4:], chunk.buffer.Bytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("LZ4 compression error: %v", err)
|
return fmt.Errorf("LZ4 compression error: %v", err)
|
||||||
}
|
}
|
||||||
// written is actually encryptedBuffer[offset + 4:], but we need to move the write pointer
|
// written is actually encryptedBuffer[offset + 4:], but we need to move the write pointer
|
||||||
// and this seems to be the only way
|
// and this seems to be the only way
|
||||||
encryptedBuffer.Write(written)
|
encryptedBuffer.Write(written)
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Invalid compression level: %d", chunk.config.CompressionLevel)
|
return fmt.Errorf("Invalid compression level: %d", chunk.config.CompressionLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(encryptionKey) == 0 {
|
if len(encryptionKey) == 0 {
|
||||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes
|
// PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes
|
||||||
// to be the maximum allowed by PKCS7
|
// to be the maximum allowed by PKCS7
|
||||||
dataLength := encryptedBuffer.Len() - offset
|
dataLength := encryptedBuffer.Len() - offset
|
||||||
paddingLength := dataLength % 256
|
paddingLength := dataLength % 256
|
||||||
if paddingLength == 0 {
|
if paddingLength == 0 {
|
||||||
paddingLength = 256
|
paddingLength = 256
|
||||||
}
|
}
|
||||||
|
|
||||||
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
|
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
|
||||||
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
|
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
|
||||||
|
|
||||||
// The encrypted data will be appended to the duplicacy header and the once.
|
// The encrypted data will be appended to the duplicacy header and the once.
|
||||||
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
|
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
|
||||||
encryptedBuffer.Bytes()[offset: offset + dataLength + paddingLength], nil)
|
encryptedBuffer.Bytes()[offset:offset+dataLength+paddingLength], nil)
|
||||||
|
|
||||||
encryptedBuffer.Truncate(len(encryptedBytes))
|
encryptedBuffer.Truncate(len(encryptedBytes))
|
||||||
|
|
||||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -272,111 +271,110 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
|
// encryption key will be HMAC-SHA256(encryptionKey, derivationKey).
|
||||||
func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err error) {
|
func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err error) {
|
||||||
|
|
||||||
var offset int
|
var offset int
|
||||||
|
|
||||||
encryptedBuffer := AllocateChunkBuffer()
|
encryptedBuffer := AllocateChunkBuffer()
|
||||||
encryptedBuffer.Reset()
|
encryptedBuffer.Reset()
|
||||||
defer func() {
|
defer func() {
|
||||||
ReleaseChunkBuffer(encryptedBuffer)
|
ReleaseChunkBuffer(encryptedBuffer)
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||||
|
|
||||||
if len(encryptionKey) > 0 {
|
if len(encryptionKey) > 0 {
|
||||||
|
|
||||||
key := encryptionKey
|
key := encryptionKey
|
||||||
|
|
||||||
if len(derivationKey) > 0 {
|
if len(derivationKey) > 0 {
|
||||||
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
|
hasher := chunk.config.NewKeyedHasher([]byte(derivationKey))
|
||||||
hasher.Write(encryptionKey)
|
hasher.Write(encryptionKey)
|
||||||
key = hasher.Sum(nil)
|
key = hasher.Sum(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
aesBlock, err := aes.NewCipher(key)
|
aesBlock, err := aes.NewCipher(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
gcm, err := cipher.NewGCM(aesBlock)
|
gcm, err := cipher.NewGCM(aesBlock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
headerLength := len(ENCRYPTION_HEADER)
|
headerLength := len(ENCRYPTION_HEADER)
|
||||||
offset = headerLength + gcm.NonceSize()
|
offset = headerLength + gcm.NonceSize()
|
||||||
|
|
||||||
if len(encryptedBuffer.Bytes()) < offset {
|
if len(encryptedBuffer.Bytes()) < offset {
|
||||||
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
if string(encryptedBuffer.Bytes()[:headerLength - 1]) != ENCRYPTION_HEADER[:headerLength - 1] {
|
if string(encryptedBuffer.Bytes()[:headerLength-1]) != ENCRYPTION_HEADER[:headerLength-1] {
|
||||||
return fmt.Errorf("The storage doesn't seem to be encrypted")
|
return fmt.Errorf("The storage doesn't seem to be encrypted")
|
||||||
}
|
}
|
||||||
|
|
||||||
if encryptedBuffer.Bytes()[headerLength - 1] != 0 {
|
if encryptedBuffer.Bytes()[headerLength-1] != 0 {
|
||||||
return fmt.Errorf("Unsupported encryption version %d", encryptedBuffer.Bytes()[headerLength - 1])
|
return fmt.Errorf("Unsupported encryption version %d", encryptedBuffer.Bytes()[headerLength-1])
|
||||||
}
|
}
|
||||||
|
|
||||||
nonce := encryptedBuffer.Bytes()[headerLength: offset]
|
nonce := encryptedBuffer.Bytes()[headerLength:offset]
|
||||||
|
|
||||||
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
|
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
|
||||||
encryptedBuffer.Bytes()[offset:], nil)
|
encryptedBuffer.Bytes()[offset:], nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
paddingLength := int(decryptedBytes[len(decryptedBytes) - 1])
|
paddingLength := int(decryptedBytes[len(decryptedBytes)-1])
|
||||||
if paddingLength == 0 {
|
if paddingLength == 0 {
|
||||||
paddingLength = 256
|
paddingLength = 256
|
||||||
}
|
}
|
||||||
if len(decryptedBytes) <= paddingLength {
|
if len(decryptedBytes) <= paddingLength {
|
||||||
return fmt.Errorf("Incorrect padding length %d out of %d bytes", paddingLength, len(decryptedBytes))
|
return fmt.Errorf("Incorrect padding length %d out of %d bytes", paddingLength, len(decryptedBytes))
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < paddingLength; i++ {
|
for i := 0; i < paddingLength; i++ {
|
||||||
padding := decryptedBytes[len(decryptedBytes) - 1 - i]
|
padding := decryptedBytes[len(decryptedBytes)-1-i]
|
||||||
if padding != byte(paddingLength) {
|
if padding != byte(paddingLength) {
|
||||||
return fmt.Errorf("Incorrect padding of length %d: %x", paddingLength,
|
return fmt.Errorf("Incorrect padding of length %d: %x", paddingLength,
|
||||||
decryptedBytes[len(decryptedBytes) - paddingLength:])
|
decryptedBytes[len(decryptedBytes)-paddingLength:])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
encryptedBuffer.Truncate(len(decryptedBytes) - paddingLength)
|
encryptedBuffer.Truncate(len(decryptedBytes) - paddingLength)
|
||||||
}
|
}
|
||||||
|
|
||||||
encryptedBuffer.Read(encryptedBuffer.Bytes()[:offset])
|
encryptedBuffer.Read(encryptedBuffer.Bytes()[:offset])
|
||||||
|
|
||||||
compressed := encryptedBuffer.Bytes()
|
compressed := encryptedBuffer.Bytes()
|
||||||
if len(compressed) > 4 && string(compressed[:4]) == "LZ4 " {
|
if len(compressed) > 4 && string(compressed[:4]) == "LZ4 " {
|
||||||
chunk.buffer.Reset()
|
chunk.buffer.Reset()
|
||||||
decompressed, err := lz4.Decode(chunk.buffer.Bytes(), encryptedBuffer.Bytes()[4:])
|
decompressed, err := lz4.Decode(chunk.buffer.Bytes(), encryptedBuffer.Bytes()[4:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk.buffer.Write(decompressed)
|
chunk.buffer.Write(decompressed)
|
||||||
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
||||||
chunk.hasher.Write(decompressed)
|
chunk.hasher.Write(decompressed)
|
||||||
chunk.hash = nil
|
chunk.hash = nil
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
inflater, err := zlib.NewReader(encryptedBuffer)
|
inflater, err := zlib.NewReader(encryptedBuffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer inflater.Close()
|
defer inflater.Close()
|
||||||
|
|
||||||
chunk.buffer.Reset()
|
chunk.buffer.Reset()
|
||||||
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
chunk.hasher = chunk.config.NewKeyedHasher(chunk.config.HashKey)
|
||||||
chunk.hash = nil
|
chunk.hash = nil
|
||||||
|
|
||||||
if _, err = io.Copy(chunk, inflater); err != nil {
|
if _, err = io.Copy(chunk, inflater); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,69 +5,68 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"bytes"
|
||||||
"bytes"
|
crypto_rand "crypto/rand"
|
||||||
crypto_rand "crypto/rand"
|
"math/rand"
|
||||||
"math/rand"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestChunk(t *testing.T) {
|
func TestChunk(t *testing.T) {
|
||||||
|
|
||||||
key := []byte("duplicacydefault")
|
key := []byte("duplicacydefault")
|
||||||
|
|
||||||
config := CreateConfig()
|
config := CreateConfig()
|
||||||
config.HashKey = key
|
config.HashKey = key
|
||||||
config.IDKey = key
|
config.IDKey = key
|
||||||
config.MinimumChunkSize = 100
|
config.MinimumChunkSize = 100
|
||||||
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
||||||
maxSize := 1000000
|
maxSize := 1000000
|
||||||
|
|
||||||
for i := 0; i < 500; i++ {
|
for i := 0; i < 500; i++ {
|
||||||
|
|
||||||
size := rand.Int() % maxSize
|
size := rand.Int() % maxSize
|
||||||
|
|
||||||
plainData := make([]byte, size)
|
plainData := make([]byte, size)
|
||||||
crypto_rand.Read(plainData)
|
crypto_rand.Read(plainData)
|
||||||
chunk := CreateChunk(config, true)
|
chunk := CreateChunk(config, true)
|
||||||
chunk.Reset(true)
|
chunk.Reset(true)
|
||||||
chunk.Write(plainData)
|
chunk.Write(plainData)
|
||||||
|
|
||||||
hash := chunk.GetHash()
|
hash := chunk.GetHash()
|
||||||
id := chunk.GetID()
|
id := chunk.GetID()
|
||||||
|
|
||||||
err := chunk.Encrypt(key, "")
|
err := chunk.Encrypt(key, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to encrypt the data: %v", err)
|
t.Errorf("Failed to encrypt the data: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
encryptedData := make([]byte, chunk.GetLength())
|
encryptedData := make([]byte, chunk.GetLength())
|
||||||
copy(encryptedData, chunk.GetBytes())
|
copy(encryptedData, chunk.GetBytes())
|
||||||
|
|
||||||
chunk.Reset(false)
|
chunk.Reset(false)
|
||||||
chunk.Write(encryptedData)
|
chunk.Write(encryptedData)
|
||||||
err = chunk.Decrypt(key, "")
|
err = chunk.Decrypt(key, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to decrypt the data: %v", err)
|
t.Errorf("Failed to decrypt the data: %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
decryptedData := chunk.GetBytes()
|
decryptedData := chunk.GetBytes()
|
||||||
|
|
||||||
if hash != chunk.GetHash() {
|
if hash != chunk.GetHash() {
|
||||||
t.Errorf("Original hash: %x, decrypted hash: %x", hash, chunk.GetHash())
|
t.Errorf("Original hash: %x, decrypted hash: %x", hash, chunk.GetHash())
|
||||||
}
|
}
|
||||||
|
|
||||||
if id != chunk.GetID() {
|
if id != chunk.GetID() {
|
||||||
t.Errorf("Original id: %s, decrypted hash: %s", id, chunk.GetID())
|
t.Errorf("Original id: %s, decrypted hash: %s", id, chunk.GetID())
|
||||||
}
|
}
|
||||||
|
|
||||||
if bytes.Compare(plainData, decryptedData) != 0 {
|
if bytes.Compare(plainData, decryptedData) != 0 {
|
||||||
t.Logf("orginal length: %d, decrypted length: %d", len(plainData), len(decryptedData))
|
t.Logf("orginal length: %d, decrypted length: %d", len(plainData), len(decryptedData))
|
||||||
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
|
t.Errorf("Original data:\n%x\nDecrypted data:\n%x\n", plainData, decryptedData)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,24 +5,24 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ChunkDownloadTask encapsulates information need to download a chunk.
|
// ChunkDownloadTask encapsulates information need to download a chunk.
|
||||||
type ChunkDownloadTask struct {
|
type ChunkDownloadTask struct {
|
||||||
chunk *Chunk // The chunk that will be downloaded; initially nil
|
chunk *Chunk // The chunk that will be downloaded; initially nil
|
||||||
chunkIndex int // The index of this chunk in the chunk list
|
chunkIndex int // The index of this chunk in the chunk list
|
||||||
chunkHash string // The chunk hash
|
chunkHash string // The chunk hash
|
||||||
chunkLength int // The length of the chunk; may be zero
|
chunkLength int // The length of the chunk; may be zero
|
||||||
needed bool // Whether this chunk can be skipped if a local copy exists
|
needed bool // Whether this chunk can be skipped if a local copy exists
|
||||||
isDownloading bool // 'true' means the chunk has been downloaded or is being downloaded
|
isDownloading bool // 'true' means the chunk has been downloaded or is being downloaded
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChunkDownloadCompletion represents the nofication when a chunk has been downloaded.
|
// ChunkDownloadCompletion represents the nofication when a chunk has been downloaded.
|
||||||
type ChunkDownloadCompletion struct {
|
type ChunkDownloadCompletion struct {
|
||||||
chunkIndex int // The index of this chunk in the chunk list
|
chunkIndex int // The index of this chunk in the chunk list
|
||||||
chunk *Chunk // The chunk that has been downloaded
|
chunk *Chunk // The chunk that has been downloaded
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChunkDownloader is capable of performing multi-threaded downloading. Chunks to be downloaded are first organized
|
// ChunkDownloader is capable of performing multi-threaded downloading. Chunks to be downloaded are first organized
|
||||||
@@ -30,348 +30,347 @@ type ChunkDownloadCompletion struct {
|
|||||||
// corresponding ChunkDownloadTask is sent to the dowloading goroutine. Once a chunk is downloaded, it will be
|
// corresponding ChunkDownloadTask is sent to the dowloading goroutine. Once a chunk is downloaded, it will be
|
||||||
// inserted in the completed task list.
|
// inserted in the completed task list.
|
||||||
type ChunkDownloader struct {
|
type ChunkDownloader struct {
|
||||||
config *Config // Associated config
|
config *Config // Associated config
|
||||||
storage Storage // Download from this storage
|
storage Storage // Download from this storage
|
||||||
snapshotCache *FileStorage // Used as cache if not nil; usually for downloading snapshot chunks
|
snapshotCache *FileStorage // Used as cache if not nil; usually for downloading snapshot chunks
|
||||||
showStatistics bool // Show a stats log for each chunk if true
|
showStatistics bool // Show a stats log for each chunk if true
|
||||||
threads int // Number of threads
|
threads int // Number of threads
|
||||||
|
|
||||||
taskList [] ChunkDownloadTask // The list of chunks to be downloaded
|
taskList []ChunkDownloadTask // The list of chunks to be downloaded
|
||||||
completedTasks map[int]bool // Store downloaded chunks
|
completedTasks map[int]bool // Store downloaded chunks
|
||||||
lastChunkIndex int // a monotonically increasing number indicating the last chunk to be downloaded
|
lastChunkIndex int // a monotonically increasing number indicating the last chunk to be downloaded
|
||||||
|
|
||||||
taskQueue chan ChunkDownloadTask // Downloading goroutines are waiting on this channel for input
|
taskQueue chan ChunkDownloadTask // Downloading goroutines are waiting on this channel for input
|
||||||
stopChannel chan bool // Used to stop the dowloading goroutines
|
stopChannel chan bool // Used to stop the dowloading goroutines
|
||||||
completionChannel chan ChunkDownloadCompletion // A downloading goroutine sends back the chunk via this channel after downloading
|
completionChannel chan ChunkDownloadCompletion // A downloading goroutine sends back the chunk via this channel after downloading
|
||||||
|
|
||||||
startTime int64 // The time it starts downloading
|
startTime int64 // The time it starts downloading
|
||||||
totalChunkSize int64 // Total chunk size
|
totalChunkSize int64 // Total chunk size
|
||||||
downloadedChunkSize int64 // Downloaded chunk size
|
downloadedChunkSize int64 // Downloaded chunk size
|
||||||
numberOfDownloadedChunks int // The number of chunks that have been downloaded
|
numberOfDownloadedChunks int // The number of chunks that have been downloaded
|
||||||
numberOfDownloadingChunks int // The number of chunks still being downloaded
|
numberOfDownloadingChunks int // The number of chunks still being downloaded
|
||||||
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
|
numberOfActiveChunks int // The number of chunks that is being downloaded or has been downloaded but not reclaimed
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int) *ChunkDownloader {
|
func CreateChunkDownloader(config *Config, storage Storage, snapshotCache *FileStorage, showStatistics bool, threads int) *ChunkDownloader {
|
||||||
downloader := &ChunkDownloader {
|
downloader := &ChunkDownloader{
|
||||||
config: config,
|
config: config,
|
||||||
storage: storage,
|
storage: storage,
|
||||||
snapshotCache: snapshotCache,
|
snapshotCache: snapshotCache,
|
||||||
showStatistics: showStatistics,
|
showStatistics: showStatistics,
|
||||||
threads: threads,
|
threads: threads,
|
||||||
|
|
||||||
taskList: nil,
|
taskList: nil,
|
||||||
completedTasks: make(map[int]bool),
|
completedTasks: make(map[int]bool),
|
||||||
lastChunkIndex: 0,
|
lastChunkIndex: 0,
|
||||||
|
|
||||||
taskQueue: make(chan ChunkDownloadTask, threads),
|
taskQueue: make(chan ChunkDownloadTask, threads),
|
||||||
stopChannel: make(chan bool),
|
stopChannel: make(chan bool),
|
||||||
completionChannel: make(chan ChunkDownloadCompletion),
|
completionChannel: make(chan ChunkDownloadCompletion),
|
||||||
|
|
||||||
startTime: time.Now().Unix(),
|
startTime: time.Now().Unix(),
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the downloading goroutines
|
// Start the downloading goroutines
|
||||||
for i := 0; i < downloader.threads; i++ {
|
for i := 0; i < downloader.threads; i++ {
|
||||||
go func(threadIndex int) {
|
go func(threadIndex int) {
|
||||||
defer CatchLogException()
|
defer CatchLogException()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case task := <- downloader.taskQueue:
|
case task := <-downloader.taskQueue:
|
||||||
downloader.Download(threadIndex, task)
|
downloader.Download(threadIndex, task)
|
||||||
case <- downloader.stopChannel:
|
case <-downloader.stopChannel:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} (i)
|
}(i)
|
||||||
}
|
}
|
||||||
|
|
||||||
return downloader
|
return downloader
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddFiles adds chunks needed by the specified files to the download list.
|
// AddFiles adds chunks needed by the specified files to the download list.
|
||||||
func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files [] *Entry) {
|
func (downloader *ChunkDownloader) AddFiles(snapshot *Snapshot, files []*Entry) {
|
||||||
|
|
||||||
downloader.taskList = nil
|
downloader.taskList = nil
|
||||||
lastChunkIndex := -1
|
lastChunkIndex := -1
|
||||||
maximumChunks := 0
|
maximumChunks := 0
|
||||||
downloader.totalChunkSize = 0
|
downloader.totalChunkSize = 0
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if file.Size == 0 {
|
if file.Size == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for i := file.StartChunk; i <= file.EndChunk; i++ {
|
for i := file.StartChunk; i <= file.EndChunk; i++ {
|
||||||
if lastChunkIndex != i {
|
if lastChunkIndex != i {
|
||||||
task := ChunkDownloadTask {
|
task := ChunkDownloadTask{
|
||||||
chunkIndex: len(downloader.taskList),
|
chunkIndex: len(downloader.taskList),
|
||||||
chunkHash: snapshot.ChunkHashes[i],
|
chunkHash: snapshot.ChunkHashes[i],
|
||||||
chunkLength: snapshot.ChunkLengths[i],
|
chunkLength: snapshot.ChunkLengths[i],
|
||||||
needed: false,
|
needed: false,
|
||||||
}
|
}
|
||||||
downloader.taskList = append(downloader.taskList, task)
|
downloader.taskList = append(downloader.taskList, task)
|
||||||
downloader.totalChunkSize += int64(snapshot.ChunkLengths[i])
|
downloader.totalChunkSize += int64(snapshot.ChunkLengths[i])
|
||||||
} else {
|
} else {
|
||||||
downloader.taskList[len(downloader.taskList) - 1].needed = true
|
downloader.taskList[len(downloader.taskList)-1].needed = true
|
||||||
}
|
}
|
||||||
lastChunkIndex = i
|
lastChunkIndex = i
|
||||||
}
|
}
|
||||||
file.StartChunk = len(downloader.taskList) - (file.EndChunk - file.StartChunk) - 1
|
file.StartChunk = len(downloader.taskList) - (file.EndChunk - file.StartChunk) - 1
|
||||||
file.EndChunk = len(downloader.taskList) - 1
|
file.EndChunk = len(downloader.taskList) - 1
|
||||||
if file.EndChunk - file.StartChunk > maximumChunks {
|
if file.EndChunk-file.StartChunk > maximumChunks {
|
||||||
maximumChunks = file.EndChunk - file.StartChunk
|
maximumChunks = file.EndChunk - file.StartChunk
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddChunk adds a single chunk the download list.
|
// AddChunk adds a single chunk the download list.
|
||||||
func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
|
func (downloader *ChunkDownloader) AddChunk(chunkHash string) int {
|
||||||
task := ChunkDownloadTask {
|
task := ChunkDownloadTask{
|
||||||
chunkIndex: len(downloader.taskList),
|
chunkIndex: len(downloader.taskList),
|
||||||
chunkHash: chunkHash,
|
chunkHash: chunkHash,
|
||||||
chunkLength: 0,
|
chunkLength: 0,
|
||||||
needed: true,
|
needed: true,
|
||||||
isDownloading: false,
|
isDownloading: false,
|
||||||
}
|
}
|
||||||
downloader.taskList = append(downloader.taskList, task)
|
downloader.taskList = append(downloader.taskList, task)
|
||||||
if downloader.numberOfActiveChunks < downloader.threads {
|
if downloader.numberOfActiveChunks < downloader.threads {
|
||||||
downloader.taskQueue <- task
|
downloader.taskQueue <- task
|
||||||
downloader.numberOfDownloadingChunks++
|
downloader.numberOfDownloadingChunks++
|
||||||
downloader.numberOfActiveChunks++
|
downloader.numberOfActiveChunks++
|
||||||
downloader.taskList[len(downloader.taskList) - 1].isDownloading = true
|
downloader.taskList[len(downloader.taskList)-1].isDownloading = true
|
||||||
}
|
}
|
||||||
return len(downloader.taskList) - 1
|
return len(downloader.taskList) - 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prefetch adds up to 'threads' chunks needed by a file to the download list
|
// Prefetch adds up to 'threads' chunks needed by a file to the download list
|
||||||
func (downloader *ChunkDownloader) Prefetch(file *Entry) {
|
func (downloader *ChunkDownloader) Prefetch(file *Entry) {
|
||||||
|
|
||||||
// Any chunks before the first chunk of this filea are not needed any more, so they can be reclaimed.
|
// Any chunks before the first chunk of this filea are not needed any more, so they can be reclaimed.
|
||||||
downloader.Reclaim(file.StartChunk)
|
downloader.Reclaim(file.StartChunk)
|
||||||
|
|
||||||
for i := file.StartChunk; i <= file.EndChunk; i++ {
|
for i := file.StartChunk; i <= file.EndChunk; i++ {
|
||||||
task := &downloader.taskList[i]
|
task := &downloader.taskList[i]
|
||||||
if task.needed {
|
if task.needed {
|
||||||
if !task.isDownloading {
|
if !task.isDownloading {
|
||||||
if downloader.numberOfActiveChunks >= downloader.threads {
|
if downloader.numberOfActiveChunks >= downloader.threads {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching %s chunk %s", file.Path,
|
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching %s chunk %s", file.Path,
|
||||||
downloader.config.GetChunkIDFromHash(task.chunkHash))
|
downloader.config.GetChunkIDFromHash(task.chunkHash))
|
||||||
downloader.taskQueue <- *task
|
downloader.taskQueue <- *task
|
||||||
task.isDownloading = true
|
task.isDownloading = true
|
||||||
downloader.numberOfDownloadingChunks++
|
downloader.numberOfDownloadingChunks++
|
||||||
downloader.numberOfActiveChunks++
|
downloader.numberOfActiveChunks++
|
||||||
}
|
}
|
||||||
} else{
|
} else {
|
||||||
LOG_DEBUG("DOWNLOAD_PREFETCH", "%s chunk %s is not needed", file.Path,
|
LOG_DEBUG("DOWNLOAD_PREFETCH", "%s chunk %s is not needed", file.Path,
|
||||||
downloader.config.GetChunkIDFromHash(task.chunkHash))
|
downloader.config.GetChunkIDFromHash(task.chunkHash))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reclaim releases the downloaded chunk to the chunk pool
|
// Reclaim releases the downloaded chunk to the chunk pool
|
||||||
func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
|
func (downloader *ChunkDownloader) Reclaim(chunkIndex int) {
|
||||||
|
|
||||||
if downloader.lastChunkIndex == chunkIndex {
|
if downloader.lastChunkIndex == chunkIndex {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, _ := range downloader.completedTasks {
|
for i, _ := range downloader.completedTasks {
|
||||||
if i < chunkIndex && downloader.taskList[i].chunk != nil {
|
if i < chunkIndex && downloader.taskList[i].chunk != nil {
|
||||||
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
||||||
downloader.taskList[i].chunk = nil
|
downloader.taskList[i].chunk = nil
|
||||||
delete(downloader.completedTasks, i)
|
delete(downloader.completedTasks, i)
|
||||||
downloader.numberOfActiveChunks--
|
downloader.numberOfActiveChunks--
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
downloader.lastChunkIndex = chunkIndex
|
downloader.lastChunkIndex = chunkIndex
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForChunk waits until the specified chunk is ready
|
// WaitForChunk waits until the specified chunk is ready
|
||||||
func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
|
func (downloader *ChunkDownloader) WaitForChunk(chunkIndex int) (chunk *Chunk) {
|
||||||
|
|
||||||
// Reclain any chunk not needed
|
// Reclain any chunk not needed
|
||||||
downloader.Reclaim(chunkIndex)
|
downloader.Reclaim(chunkIndex)
|
||||||
|
|
||||||
// If we haven't started download the specified chunk, download it now
|
// If we haven't started download the specified chunk, download it now
|
||||||
if !downloader.taskList[chunkIndex].isDownloading {
|
if !downloader.taskList[chunkIndex].isDownloading {
|
||||||
LOG_DEBUG("DOWNLOAD_FETCH", "Fetching chunk %s",
|
LOG_DEBUG("DOWNLOAD_FETCH", "Fetching chunk %s",
|
||||||
downloader.config.GetChunkIDFromHash(downloader.taskList[chunkIndex].chunkHash))
|
downloader.config.GetChunkIDFromHash(downloader.taskList[chunkIndex].chunkHash))
|
||||||
downloader.taskQueue <- downloader.taskList[chunkIndex]
|
downloader.taskQueue <- downloader.taskList[chunkIndex]
|
||||||
downloader.taskList[chunkIndex].isDownloading = true
|
downloader.taskList[chunkIndex].isDownloading = true
|
||||||
downloader.numberOfDownloadingChunks++
|
downloader.numberOfDownloadingChunks++
|
||||||
downloader.numberOfActiveChunks++
|
downloader.numberOfActiveChunks++
|
||||||
}
|
}
|
||||||
|
|
||||||
// We also need to look ahead and prefetch other chunks as many as permitted by the number of threads
|
// We also need to look ahead and prefetch other chunks as many as permitted by the number of threads
|
||||||
for i := chunkIndex + 1; i < len(downloader.taskList); i++ {
|
for i := chunkIndex + 1; i < len(downloader.taskList); i++ {
|
||||||
if downloader.numberOfActiveChunks >= downloader.threads {
|
if downloader.numberOfActiveChunks >= downloader.threads {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
task := &downloader.taskList[i]
|
task := &downloader.taskList[i]
|
||||||
if !task.needed {
|
if !task.needed {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if !task.isDownloading {
|
if !task.isDownloading {
|
||||||
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching chunk %s", downloader.config.GetChunkIDFromHash(task.chunkHash))
|
LOG_DEBUG("DOWNLOAD_PREFETCH", "Prefetching chunk %s", downloader.config.GetChunkIDFromHash(task.chunkHash))
|
||||||
downloader.taskQueue <- *task
|
downloader.taskQueue <- *task
|
||||||
task.isDownloading = true
|
task.isDownloading = true
|
||||||
downloader.numberOfDownloadingChunks++
|
downloader.numberOfDownloadingChunks++
|
||||||
downloader.numberOfActiveChunks++
|
downloader.numberOfActiveChunks++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now wait until the chunk to be downloaded appears in the completed tasks
|
// Now wait until the chunk to be downloaded appears in the completed tasks
|
||||||
for _, found := downloader.completedTasks[chunkIndex]; !found; _, found = downloader.completedTasks[chunkIndex] {
|
for _, found := downloader.completedTasks[chunkIndex]; !found; _, found = downloader.completedTasks[chunkIndex] {
|
||||||
completion := <- downloader.completionChannel
|
completion := <-downloader.completionChannel
|
||||||
downloader.completedTasks[completion.chunkIndex] = true
|
downloader.completedTasks[completion.chunkIndex] = true
|
||||||
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
||||||
downloader.numberOfDownloadedChunks++
|
downloader.numberOfDownloadedChunks++
|
||||||
downloader.numberOfDownloadingChunks--
|
downloader.numberOfDownloadingChunks--
|
||||||
}
|
}
|
||||||
return downloader.taskList[chunkIndex].chunk
|
return downloader.taskList[chunkIndex].chunk
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop terminates all downloading goroutines
|
// Stop terminates all downloading goroutines
|
||||||
func (downloader *ChunkDownloader) Stop() {
|
func (downloader *ChunkDownloader) Stop() {
|
||||||
for downloader.numberOfDownloadingChunks > 0 {
|
for downloader.numberOfDownloadingChunks > 0 {
|
||||||
completion := <- downloader.completionChannel
|
completion := <-downloader.completionChannel
|
||||||
downloader.completedTasks[completion.chunkIndex] = true
|
downloader.completedTasks[completion.chunkIndex] = true
|
||||||
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
downloader.taskList[completion.chunkIndex].chunk = completion.chunk
|
||||||
downloader.numberOfDownloadedChunks++
|
downloader.numberOfDownloadedChunks++
|
||||||
downloader.numberOfDownloadingChunks--
|
downloader.numberOfDownloadingChunks--
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, _ := range downloader.completedTasks {
|
for i, _ := range downloader.completedTasks {
|
||||||
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
downloader.config.PutChunk(downloader.taskList[i].chunk)
|
||||||
downloader.taskList[i].chunk = nil
|
downloader.taskList[i].chunk = nil
|
||||||
downloader.numberOfActiveChunks--
|
downloader.numberOfActiveChunks--
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < downloader.threads; i++ {
|
for i := 0; i < downloader.threads; i++ {
|
||||||
downloader.stopChannel <- true
|
downloader.stopChannel <- true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Download downloads a chunk from the storage.
|
// Download downloads a chunk from the storage.
|
||||||
func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadTask) bool {
|
func (downloader *ChunkDownloader) Download(threadIndex int, task ChunkDownloadTask) bool {
|
||||||
|
|
||||||
cachedPath := ""
|
cachedPath := ""
|
||||||
chunk := downloader.config.GetChunk()
|
chunk := downloader.config.GetChunk()
|
||||||
chunkID := downloader.config.GetChunkIDFromHash(task.chunkHash)
|
chunkID := downloader.config.GetChunkIDFromHash(task.chunkHash)
|
||||||
|
|
||||||
if downloader.snapshotCache != nil && downloader.storage.IsCacheNeeded() {
|
if downloader.snapshotCache != nil && downloader.storage.IsCacheNeeded() {
|
||||||
|
|
||||||
var exist bool
|
var exist bool
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// Reset the chunk with a hasher -- we're reading from the cache where chunk are not encrypted or compressed
|
// Reset the chunk with a hasher -- we're reading from the cache where chunk are not encrypted or compressed
|
||||||
chunk.Reset(true)
|
chunk.Reset(true)
|
||||||
|
|
||||||
cachedPath, exist, _, err = downloader.snapshotCache.FindChunk(threadIndex, chunkID, false)
|
cachedPath, exist, _, err = downloader.snapshotCache.FindChunk(threadIndex, chunkID, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("DOWNLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
|
LOG_WARN("DOWNLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
|
||||||
} else if exist {
|
} else if exist {
|
||||||
err = downloader.snapshotCache.DownloadFile(0, cachedPath, chunk)
|
err = downloader.snapshotCache.DownloadFile(0, cachedPath, chunk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("DOWNLOAD_CACHE", "Failed to load the chunk %s from the snapshot cache: %v", chunkID, err)
|
LOG_WARN("DOWNLOAD_CACHE", "Failed to load the chunk %s from the snapshot cache: %v", chunkID, err)
|
||||||
} else {
|
} else {
|
||||||
actualChunkID := chunk.GetID()
|
actualChunkID := chunk.GetID()
|
||||||
if actualChunkID != chunkID {
|
if actualChunkID != chunkID {
|
||||||
LOG_WARN("DOWNLOAD_CACHE_CORRUPTED",
|
LOG_WARN("DOWNLOAD_CACHE_CORRUPTED",
|
||||||
"The chunk %s load from the snapshot cache has a hash id of %s", chunkID, actualChunkID)
|
"The chunk %s load from the snapshot cache has a hash id of %s", chunkID, actualChunkID)
|
||||||
} else {
|
} else {
|
||||||
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been loaded from the snapshot cache", chunkID)
|
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been loaded from the snapshot cache", chunkID)
|
||||||
|
|
||||||
downloader.completionChannel <- ChunkDownloadCompletion{ chunk: chunk, chunkIndex:task.chunkIndex }
|
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset the chunk without a hasher -- the downloaded content will be encrypted and/or compressed and the hasher
|
// Reset the chunk without a hasher -- the downloaded content will be encrypted and/or compressed and the hasher
|
||||||
// will be set up before the encryption
|
// will be set up before the encryption
|
||||||
chunk.Reset(false)
|
chunk.Reset(false)
|
||||||
|
|
||||||
// Find the chunk by ID first.
|
// Find the chunk by ID first.
|
||||||
chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false)
|
chunkPath, exist, _, err := downloader.storage.FindChunk(threadIndex, chunkID, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !exist {
|
if !exist {
|
||||||
// No chunk is found. Have to find it in the fossil pool again.
|
// No chunk is found. Have to find it in the fossil pool again.
|
||||||
chunkPath, exist, _, err = downloader.storage.FindChunk(threadIndex, chunkID, true)
|
chunkPath, exist, _, err = downloader.storage.FindChunk(threadIndex, chunkID, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
LOG_ERROR("DOWNLOAD_CHUNK", "Failed to find the chunk %s: %v", chunkID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !exist {
|
if !exist {
|
||||||
// A chunk is not found. This is a serious error and hopefully it will never happen.
|
// A chunk is not found. This is a serious error and hopefully it will never happen.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
|
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found: %v", chunkID, err)
|
||||||
} else {
|
} else {
|
||||||
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
|
LOG_FATAL("DOWNLOAD_CHUNK", "Chunk %s can't be found", chunkID)
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
LOG_DEBUG("CHUNK_FOSSIL", "Chunk %s has been marked as a fossil", chunkID)
|
LOG_DEBUG("CHUNK_FOSSIL", "Chunk %s has been marked as a fossil", chunkID)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = downloader.storage.DownloadFile(threadIndex, chunkPath, chunk)
|
err = downloader.storage.DownloadFile(threadIndex, chunkPath, chunk)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("UPLOAD_FATAL", "Failed to download the chunk %s: %v", chunkID, err)
|
LOG_ERROR("UPLOAD_FATAL", "Failed to download the chunk %s: %v", chunkID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("UPLOAD_CHUNK", "Failed to decrypt the chunk %s: %v", chunkID, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
err = chunk.Decrypt(downloader.config.ChunkKey, task.chunkHash)
|
actualChunkID := chunk.GetID()
|
||||||
if err != nil {
|
if actualChunkID != chunkID {
|
||||||
LOG_ERROR("UPLOAD_CHUNK", "Failed to decrypt the chunk %s: %v", chunkID, err)
|
LOG_FATAL("UPLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
actualChunkID := chunk.GetID()
|
if len(cachedPath) > 0 {
|
||||||
if actualChunkID != chunkID {
|
// Save a copy to the local snapshot cache
|
||||||
LOG_FATAL("UPLOAD_CORRUPTED", "The chunk %s has a hash id of %s", chunkID, actualChunkID)
|
err = downloader.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes())
|
||||||
return false
|
if err != nil {
|
||||||
}
|
LOG_WARN("DOWNLOAD_CACHE", "Failed to add the chunk %s to the snapshot cache: %v", chunkID, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if len(cachedPath) > 0 {
|
downloadedChunkSize := atomic.AddInt64(&downloader.downloadedChunkSize, int64(chunk.GetLength()))
|
||||||
// Save a copy to the local snapshot cache
|
|
||||||
err = downloader.snapshotCache.UploadFile(threadIndex, cachedPath, chunk.GetBytes())
|
|
||||||
if err != nil {
|
|
||||||
LOG_WARN("DOWNLOAD_CACHE", "Failed to add the chunk %s to the snapshot cache: %v", chunkID, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
downloadedChunkSize := atomic.AddInt64(&downloader.downloadedChunkSize, int64(chunk.GetLength()))
|
if (downloader.showStatistics || IsTracing()) && downloader.totalChunkSize > 0 {
|
||||||
|
|
||||||
if (downloader.showStatistics || IsTracing()) && downloader.totalChunkSize > 0 {
|
now := time.Now().Unix()
|
||||||
|
if now <= downloader.startTime {
|
||||||
|
now = downloader.startTime + 1
|
||||||
|
}
|
||||||
|
speed := downloadedChunkSize / (now - downloader.startTime)
|
||||||
|
remainingTime := int64(0)
|
||||||
|
if speed > 0 {
|
||||||
|
remainingTime = (downloader.totalChunkSize-downloadedChunkSize)/speed + 1
|
||||||
|
}
|
||||||
|
percentage := float32(downloadedChunkSize * 1000 / downloader.totalChunkSize)
|
||||||
|
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
|
||||||
|
task.chunkIndex+1, chunk.GetLength(),
|
||||||
|
PrettySize(speed), PrettyTime(remainingTime), percentage/10)
|
||||||
|
} else {
|
||||||
|
LOG_DEBUG("CHUNK_DOWNLOAD", "Chunk %s has been downloaded", chunkID)
|
||||||
|
}
|
||||||
|
|
||||||
now := time.Now().Unix()
|
downloader.completionChannel <- ChunkDownloadCompletion{chunk: chunk, chunkIndex: task.chunkIndex}
|
||||||
if now <= downloader.startTime {
|
return true
|
||||||
now = downloader.startTime + 1
|
|
||||||
}
|
|
||||||
speed := downloadedChunkSize / (now - downloader.startTime)
|
|
||||||
remainingTime := int64(0)
|
|
||||||
if speed > 0 {
|
|
||||||
remainingTime = (downloader.totalChunkSize - downloadedChunkSize) / speed + 1
|
|
||||||
}
|
|
||||||
percentage := float32(downloadedChunkSize * 1000 / downloader.totalChunkSize)
|
|
||||||
LOG_INFO("DOWNLOAD_PROGRESS", "Downloaded chunk %d size %d, %sB/s %s %.1f%%",
|
|
||||||
task.chunkIndex + 1, chunk.GetLength(),
|
|
||||||
PrettySize(speed), PrettyTime(remainingTime), percentage / 10)
|
|
||||||
} else {
|
|
||||||
LOG_DEBUG("CHUNK_DOWNLOAD", "Chunk %s has been downloaded", chunkID)
|
|
||||||
}
|
|
||||||
|
|
||||||
downloader.completionChannel <- ChunkDownloadCompletion{ chunk: chunk, chunkIndex:task.chunkIndex }
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,294 +5,293 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"crypto/sha256"
|
||||||
"crypto/sha256"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/binary"
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ChunkMaker breaks data into chunks using buzhash. To save memory, the chunk maker only use a circular buffer
|
// ChunkMaker breaks data into chunks using buzhash. To save memory, the chunk maker only use a circular buffer
|
||||||
// whose size is double the minimum chunk size.
|
// whose size is double the minimum chunk size.
|
||||||
type ChunkMaker struct {
|
type ChunkMaker struct {
|
||||||
maximumChunkSize int
|
maximumChunkSize int
|
||||||
minimumChunkSize int
|
minimumChunkSize int
|
||||||
bufferCapacity int
|
bufferCapacity int
|
||||||
|
|
||||||
hashMask uint64
|
hashMask uint64
|
||||||
randomTable [256]uint64
|
randomTable [256]uint64
|
||||||
|
|
||||||
buffer []byte
|
buffer []byte
|
||||||
bufferSize int
|
bufferSize int
|
||||||
bufferStart int
|
bufferStart int
|
||||||
|
|
||||||
config *Config
|
config *Config
|
||||||
|
|
||||||
hashOnly bool
|
hashOnly bool
|
||||||
hashOnlyChunk *Chunk
|
hashOnlyChunk *Chunk
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateChunkMaker creates a chunk maker. 'randomSeed' is used to generate the character-to-integer table needed by
|
// CreateChunkMaker creates a chunk maker. 'randomSeed' is used to generate the character-to-integer table needed by
|
||||||
// buzhash.
|
// buzhash.
|
||||||
func CreateChunkMaker(config *Config, hashOnly bool) *ChunkMaker {
|
func CreateChunkMaker(config *Config, hashOnly bool) *ChunkMaker {
|
||||||
size := 1
|
size := 1
|
||||||
for size * 2 <= config.AverageChunkSize {
|
for size*2 <= config.AverageChunkSize {
|
||||||
size *= 2
|
size *= 2
|
||||||
}
|
}
|
||||||
|
|
||||||
if size != config.AverageChunkSize {
|
if size != config.AverageChunkSize {
|
||||||
LOG_FATAL("CHUNK_SIZE", "Invalid average chunk size: %d is not a power of 2", config.AverageChunkSize)
|
LOG_FATAL("CHUNK_SIZE", "Invalid average chunk size: %d is not a power of 2", config.AverageChunkSize)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
maker := &ChunkMaker {
|
maker := &ChunkMaker{
|
||||||
hashMask: uint64(config.AverageChunkSize - 1),
|
hashMask: uint64(config.AverageChunkSize - 1),
|
||||||
maximumChunkSize: config.MaximumChunkSize,
|
maximumChunkSize: config.MaximumChunkSize,
|
||||||
minimumChunkSize: config.MinimumChunkSize,
|
minimumChunkSize: config.MinimumChunkSize,
|
||||||
bufferCapacity: 2 * config.MinimumChunkSize,
|
bufferCapacity: 2 * config.MinimumChunkSize,
|
||||||
config: config,
|
config: config,
|
||||||
hashOnly: hashOnly,
|
hashOnly: hashOnly,
|
||||||
}
|
}
|
||||||
|
|
||||||
if hashOnly {
|
if hashOnly {
|
||||||
maker.hashOnlyChunk = CreateChunk(config, false)
|
maker.hashOnlyChunk = CreateChunk(config, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
randomData := sha256.Sum256(config.ChunkSeed)
|
randomData := sha256.Sum256(config.ChunkSeed)
|
||||||
|
|
||||||
for i := 0; i < 64; i++ {
|
for i := 0; i < 64; i++ {
|
||||||
for j := 0; j < 4; j++ {
|
for j := 0; j < 4; j++ {
|
||||||
maker.randomTable[4 * i + j] = binary.LittleEndian.Uint64(randomData[8 * j : 8 * j + 8])
|
maker.randomTable[4*i+j] = binary.LittleEndian.Uint64(randomData[8*j : 8*j+8])
|
||||||
}
|
}
|
||||||
randomData = sha256.Sum256(randomData[:])
|
randomData = sha256.Sum256(randomData[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
maker.buffer = make([]byte, 2 * config.MinimumChunkSize)
|
maker.buffer = make([]byte, 2*config.MinimumChunkSize)
|
||||||
|
|
||||||
return maker
|
return maker
|
||||||
}
|
}
|
||||||
|
|
||||||
func rotateLeft(value uint64, bits uint) uint64 {
|
func rotateLeft(value uint64, bits uint) uint64 {
|
||||||
return (value << (bits & 0x3f)) | (value >> (64 - (bits & 0x3f)))
|
return (value << (bits & 0x3f)) | (value >> (64 - (bits & 0x3f)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func rotateLeftByOne(value uint64) uint64 {
|
func rotateLeftByOne(value uint64) uint64 {
|
||||||
return (value << 1) | (value >> 63)
|
return (value << 1) | (value >> 63)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (maker *ChunkMaker) buzhashSum(sum uint64, data [] byte) uint64 {
|
func (maker *ChunkMaker) buzhashSum(sum uint64, data []byte) uint64 {
|
||||||
for i := 0; i < len(data); i++ {
|
for i := 0; i < len(data); i++ {
|
||||||
sum = rotateLeftByOne(sum) ^ maker.randomTable[data[i]]
|
sum = rotateLeftByOne(sum) ^ maker.randomTable[data[i]]
|
||||||
}
|
}
|
||||||
return sum
|
return sum
|
||||||
}
|
}
|
||||||
|
|
||||||
func (maker *ChunkMaker) buzhashUpdate(sum uint64, out byte, in byte, length int) uint64 {
|
func (maker *ChunkMaker) buzhashUpdate(sum uint64, out byte, in byte, length int) uint64 {
|
||||||
return rotateLeftByOne(sum) ^ rotateLeft(maker.randomTable[out], uint(length)) ^ maker.randomTable[in]
|
return rotateLeftByOne(sum) ^ rotateLeft(maker.randomTable[out], uint(length)) ^ maker.randomTable[in]
|
||||||
}
|
}
|
||||||
|
|
||||||
// ForEachChunk reads data from 'reader'. If EOF is encountered, it will call 'nextReader' to ask for next file. If
|
// ForEachChunk reads data from 'reader'. If EOF is encountered, it will call 'nextReader' to ask for next file. If
|
||||||
// 'nextReader' returns false, it will process remaining data in the buffer and then quit. When a chunk is identified,
|
// 'nextReader' returns false, it will process remaining data in the buffer and then quit. When a chunk is identified,
|
||||||
// it will call 'endOfChunk' to return the chunk size and a boolean flag indicating if it is the last chunk.
|
// it will call 'endOfChunk' to return the chunk size and a boolean flag indicating if it is the last chunk.
|
||||||
func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *Chunk, final bool),
|
func (maker *ChunkMaker) ForEachChunk(reader io.Reader, endOfChunk func(chunk *Chunk, final bool),
|
||||||
nextReader func(size int64, hash string)(io.Reader, bool)) {
|
nextReader func(size int64, hash string) (io.Reader, bool)) {
|
||||||
|
|
||||||
maker.bufferStart = 0
|
maker.bufferStart = 0
|
||||||
maker.bufferSize = 0
|
maker.bufferSize = 0
|
||||||
|
|
||||||
var minimumReached bool
|
var minimumReached bool
|
||||||
var hashSum uint64
|
var hashSum uint64
|
||||||
var chunk *Chunk
|
var chunk *Chunk
|
||||||
|
|
||||||
fileSize := int64(0)
|
fileSize := int64(0)
|
||||||
fileHasher := maker.config.NewFileHasher()
|
fileHasher := maker.config.NewFileHasher()
|
||||||
|
|
||||||
// Start a new chunk.
|
// Start a new chunk.
|
||||||
startNewChunk := func() {
|
startNewChunk := func() {
|
||||||
hashSum = 0
|
hashSum = 0
|
||||||
minimumReached = false
|
minimumReached = false
|
||||||
if maker.hashOnly {
|
if maker.hashOnly {
|
||||||
chunk = maker.hashOnlyChunk
|
chunk = maker.hashOnlyChunk
|
||||||
chunk.Reset(true)
|
chunk.Reset(true)
|
||||||
} else {
|
} else {
|
||||||
chunk = maker.config.GetChunk()
|
chunk = maker.config.GetChunk()
|
||||||
chunk.Reset(true)
|
chunk.Reset(true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move data from the buffer to the chunk.
|
// Move data from the buffer to the chunk.
|
||||||
fill := func(count int) {
|
fill := func(count int) {
|
||||||
if maker.bufferStart + count < maker.bufferCapacity {
|
if maker.bufferStart+count < maker.bufferCapacity {
|
||||||
chunk.Write(maker.buffer[maker.bufferStart : maker.bufferStart + count])
|
chunk.Write(maker.buffer[maker.bufferStart : maker.bufferStart+count])
|
||||||
maker.bufferStart += count
|
maker.bufferStart += count
|
||||||
maker.bufferSize -= count
|
maker.bufferSize -= count
|
||||||
} else {
|
} else {
|
||||||
chunk.Write(maker.buffer[maker.bufferStart :])
|
chunk.Write(maker.buffer[maker.bufferStart:])
|
||||||
chunk.Write(maker.buffer[: count - (maker.bufferCapacity - maker.bufferStart)])
|
chunk.Write(maker.buffer[:count-(maker.bufferCapacity-maker.bufferStart)])
|
||||||
maker.bufferStart = count - (maker.bufferCapacity - maker.bufferStart)
|
maker.bufferStart = count - (maker.bufferCapacity - maker.bufferStart)
|
||||||
maker.bufferSize -= count
|
maker.bufferSize -= count
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
startNewChunk()
|
startNewChunk()
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
isEOF := false
|
isEOF := false
|
||||||
|
|
||||||
if maker.minimumChunkSize == maker.maximumChunkSize {
|
if maker.minimumChunkSize == maker.maximumChunkSize {
|
||||||
|
|
||||||
if maker.bufferCapacity < maker.minimumChunkSize {
|
if maker.bufferCapacity < maker.minimumChunkSize {
|
||||||
maker.buffer = make([]byte, maker.minimumChunkSize)
|
maker.buffer = make([]byte, maker.minimumChunkSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
maker.bufferStart = 0
|
maker.bufferStart = 0
|
||||||
for maker.bufferStart < maker.minimumChunkSize && !isEOF {
|
for maker.bufferStart < maker.minimumChunkSize && !isEOF {
|
||||||
count, err := reader.Read(maker.buffer[maker.bufferStart : maker.minimumChunkSize])
|
count, err := reader.Read(maker.buffer[maker.bufferStart:maker.minimumChunkSize])
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF {
|
||||||
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
|
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
isEOF = true
|
isEOF = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
maker.bufferStart += count
|
maker.bufferStart += count
|
||||||
}
|
}
|
||||||
|
|
||||||
fileHasher.Write(maker.buffer[:maker.bufferStart])
|
fileHasher.Write(maker.buffer[:maker.bufferStart])
|
||||||
fileSize += int64(maker.bufferStart)
|
fileSize += int64(maker.bufferStart)
|
||||||
chunk.Write(maker.buffer[:maker.bufferStart])
|
chunk.Write(maker.buffer[:maker.bufferStart])
|
||||||
|
|
||||||
if isEOF {
|
if isEOF {
|
||||||
var ok bool
|
var ok bool
|
||||||
reader, ok = nextReader(fileSize, hex.EncodeToString(fileHasher.Sum(nil)))
|
reader, ok = nextReader(fileSize, hex.EncodeToString(fileHasher.Sum(nil)))
|
||||||
if !ok {
|
if !ok {
|
||||||
endOfChunk(chunk, true)
|
endOfChunk(chunk, true)
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
endOfChunk(chunk, false)
|
endOfChunk(chunk, false)
|
||||||
startNewChunk()
|
startNewChunk()
|
||||||
fileSize = 0
|
fileSize = 0
|
||||||
fileHasher = maker.config.NewFileHasher()
|
fileHasher = maker.config.NewFileHasher()
|
||||||
isEOF = false
|
isEOF = false
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
endOfChunk(chunk, false)
|
endOfChunk(chunk, false)
|
||||||
startNewChunk()
|
startNewChunk()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
|
||||||
// If the buffer still has some space left and EOF is not seen, read more data.
|
// If the buffer still has some space left and EOF is not seen, read more data.
|
||||||
for maker.bufferSize < maker.bufferCapacity && !isEOF {
|
for maker.bufferSize < maker.bufferCapacity && !isEOF {
|
||||||
start := maker.bufferStart + maker.bufferSize
|
start := maker.bufferStart + maker.bufferSize
|
||||||
count := maker.bufferCapacity - start
|
count := maker.bufferCapacity - start
|
||||||
if start >= maker.bufferCapacity {
|
if start >= maker.bufferCapacity {
|
||||||
start -= maker.bufferCapacity
|
start -= maker.bufferCapacity
|
||||||
count = maker.bufferStart - start
|
count = maker.bufferStart - start
|
||||||
}
|
}
|
||||||
|
|
||||||
count, err = reader.Read(maker.buffer[start : start + count])
|
count, err = reader.Read(maker.buffer[start : start+count])
|
||||||
|
|
||||||
if err != nil && err != io.EOF {
|
if err != nil && err != io.EOF {
|
||||||
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
|
LOG_ERROR("CHUNK_MAKER", "Failed to read %d bytes: %s", count, err.Error())
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
maker.bufferSize += count
|
maker.bufferSize += count
|
||||||
fileHasher.Write(maker.buffer[start : start + count])
|
fileHasher.Write(maker.buffer[start : start+count])
|
||||||
fileSize += int64(count)
|
fileSize += int64(count)
|
||||||
|
|
||||||
// if EOF is seen, try to switch to next file and continue
|
// if EOF is seen, try to switch to next file and continue
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
var ok bool
|
var ok bool
|
||||||
reader, ok = nextReader(fileSize, hex.EncodeToString(fileHasher.Sum(nil)))
|
reader, ok = nextReader(fileSize, hex.EncodeToString(fileHasher.Sum(nil)))
|
||||||
if !ok {
|
if !ok {
|
||||||
isEOF = true
|
isEOF = true
|
||||||
} else {
|
} else {
|
||||||
fileSize = 0
|
fileSize = 0
|
||||||
fileHasher = maker.config.NewFileHasher()
|
fileHasher = maker.config.NewFileHasher()
|
||||||
isEOF = false
|
isEOF = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// No eough data to meet the minimum chunk size requirement, so just return as a chunk.
|
// No eough data to meet the minimum chunk size requirement, so just return as a chunk.
|
||||||
if maker.bufferSize < maker.minimumChunkSize {
|
if maker.bufferSize < maker.minimumChunkSize {
|
||||||
fill(maker.bufferSize)
|
fill(maker.bufferSize)
|
||||||
endOfChunk(chunk, true)
|
endOfChunk(chunk, true)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Minimum chunk size has been reached. Calculate the buzhash for the minimum size chunk.
|
||||||
|
if !minimumReached {
|
||||||
|
|
||||||
// Minimum chunk size has been reached. Calculate the buzhash for the minimum size chunk.
|
bytes := maker.minimumChunkSize
|
||||||
if (!minimumReached) {
|
|
||||||
|
|
||||||
bytes := maker.minimumChunkSize
|
if maker.bufferStart+bytes < maker.bufferCapacity {
|
||||||
|
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:maker.bufferStart+bytes])
|
||||||
|
} else {
|
||||||
|
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart:])
|
||||||
|
hashSum = maker.buzhashSum(hashSum,
|
||||||
|
maker.buffer[:bytes-(maker.bufferCapacity-maker.bufferStart)])
|
||||||
|
}
|
||||||
|
|
||||||
if maker.bufferStart + bytes < maker.bufferCapacity {
|
if (hashSum & maker.hashMask) == 0 {
|
||||||
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart : maker.bufferStart + bytes])
|
// This is a minimum size chunk
|
||||||
} else {
|
fill(bytes)
|
||||||
hashSum = maker.buzhashSum(0, maker.buffer[maker.bufferStart :])
|
endOfChunk(chunk, false)
|
||||||
hashSum = maker.buzhashSum(hashSum,
|
startNewChunk()
|
||||||
maker.buffer[: bytes - (maker.bufferCapacity - maker.bufferStart)])
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if (hashSum & maker.hashMask) == 0 {
|
minimumReached = true
|
||||||
// This is a minimum size chunk
|
}
|
||||||
fill(bytes)
|
|
||||||
endOfChunk(chunk, false)
|
|
||||||
startNewChunk()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
minimumReached = true
|
// Now check the buzhash of the data in the buffer, shifting one byte at a time.
|
||||||
}
|
bytes := maker.bufferSize - maker.minimumChunkSize
|
||||||
|
isEOC := false
|
||||||
|
maxSize := maker.maximumChunkSize - chunk.GetLength()
|
||||||
|
for i := 0; i < maker.bufferSize-maker.minimumChunkSize; i++ {
|
||||||
|
out := maker.bufferStart + i
|
||||||
|
if out >= maker.bufferCapacity {
|
||||||
|
out -= maker.bufferCapacity
|
||||||
|
}
|
||||||
|
in := maker.bufferStart + i + maker.minimumChunkSize
|
||||||
|
if in >= maker.bufferCapacity {
|
||||||
|
in -= maker.bufferCapacity
|
||||||
|
}
|
||||||
|
|
||||||
// Now check the buzhash of the data in the buffer, shifting one byte at a time.
|
hashSum = maker.buzhashUpdate(hashSum, maker.buffer[out], maker.buffer[in], maker.minimumChunkSize)
|
||||||
bytes := maker.bufferSize - maker.minimumChunkSize
|
if (hashSum&maker.hashMask) == 0 || i == maxSize-maker.minimumChunkSize-1 {
|
||||||
isEOC := false
|
// A chunk is completed.
|
||||||
maxSize := maker.maximumChunkSize - chunk.GetLength()
|
bytes = i + 1 + maker.minimumChunkSize
|
||||||
for i := 0; i < maker.bufferSize - maker.minimumChunkSize; i++ {
|
isEOC = true
|
||||||
out := maker.bufferStart + i
|
break
|
||||||
if out >= maker.bufferCapacity {
|
}
|
||||||
out -= maker.bufferCapacity
|
}
|
||||||
}
|
|
||||||
in := maker.bufferStart + i + maker.minimumChunkSize
|
|
||||||
if in >= maker.bufferCapacity {
|
|
||||||
in -= maker.bufferCapacity
|
|
||||||
}
|
|
||||||
|
|
||||||
hashSum = maker.buzhashUpdate(hashSum, maker.buffer[out], maker.buffer[in], maker.minimumChunkSize)
|
fill(bytes)
|
||||||
if (hashSum & maker.hashMask) == 0 || i == maxSize - maker.minimumChunkSize - 1 {
|
|
||||||
// A chunk is completed.
|
|
||||||
bytes = i + 1 + maker.minimumChunkSize
|
|
||||||
isEOC = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fill(bytes)
|
if isEOC {
|
||||||
|
if isEOF && maker.bufferSize == 0 {
|
||||||
|
endOfChunk(chunk, true)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
endOfChunk(chunk, false)
|
||||||
|
startNewChunk()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if isEOC {
|
if isEOF {
|
||||||
if isEOF && maker.bufferSize == 0 {
|
fill(maker.bufferSize)
|
||||||
endOfChunk(chunk, true)
|
endOfChunk(chunk, true)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
endOfChunk(chunk, false)
|
}
|
||||||
startNewChunk()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if isEOF {
|
|
||||||
fill(maker.bufferSize)
|
|
||||||
endOfChunk(chunk, true)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,128 +5,127 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"bytes"
|
||||||
"bytes"
|
crypto_rand "crypto/rand"
|
||||||
crypto_rand "crypto/rand"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"io"
|
"sort"
|
||||||
"sort"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunkSize,
|
func splitIntoChunks(content []byte, n, averageChunkSize, maxChunkSize, minChunkSize,
|
||||||
bufferCapacity int) ([]string, int) {
|
bufferCapacity int) ([]string, int) {
|
||||||
|
|
||||||
config := CreateConfig()
|
config := CreateConfig()
|
||||||
|
|
||||||
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
||||||
config.AverageChunkSize = averageChunkSize
|
config.AverageChunkSize = averageChunkSize
|
||||||
config.MaximumChunkSize = maxChunkSize
|
config.MaximumChunkSize = maxChunkSize
|
||||||
config.MinimumChunkSize = minChunkSize
|
config.MinimumChunkSize = minChunkSize
|
||||||
config.ChunkSeed = []byte("duplicacy")
|
config.ChunkSeed = []byte("duplicacy")
|
||||||
|
|
||||||
config.HashKey = DEFAULT_KEY
|
config.HashKey = DEFAULT_KEY
|
||||||
config.IDKey = DEFAULT_KEY
|
config.IDKey = DEFAULT_KEY
|
||||||
|
|
||||||
maker := CreateChunkMaker(config, false)
|
maker := CreateChunkMaker(config, false)
|
||||||
|
|
||||||
var chunks [] string
|
var chunks []string
|
||||||
totalChunkSize := 0
|
totalChunkSize := 0
|
||||||
totalFileSize := int64(0)
|
totalFileSize := int64(0)
|
||||||
|
|
||||||
//LOG_INFO("CHUNK_SPLIT", "bufferCapacity: %d", bufferCapacity)
|
//LOG_INFO("CHUNK_SPLIT", "bufferCapacity: %d", bufferCapacity)
|
||||||
|
|
||||||
buffers := make([] *bytes.Buffer, n)
|
buffers := make([]*bytes.Buffer, n)
|
||||||
sizes := make([] int, n)
|
sizes := make([]int, n)
|
||||||
sizes[0] = 0
|
sizes[0] = 0
|
||||||
for i := 1; i < n; i++ {
|
for i := 1; i < n; i++ {
|
||||||
same := true
|
same := true
|
||||||
for same {
|
for same {
|
||||||
same = false
|
same = false
|
||||||
sizes[i] = rand.Int() % n
|
sizes[i] = rand.Int() % n
|
||||||
for j := 0; j < i; j++ {
|
for j := 0; j < i; j++ {
|
||||||
if sizes[i] == sizes[j] {
|
if sizes[i] == sizes[j] {
|
||||||
same = true
|
same = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Sort(sort.IntSlice(sizes))
|
sort.Sort(sort.IntSlice(sizes))
|
||||||
|
|
||||||
for i := 0; i < n - 1; i++ {
|
for i := 0; i < n-1; i++ {
|
||||||
buffers[i] = bytes.NewBuffer(content[sizes[i] : sizes[i + 1]])
|
buffers[i] = bytes.NewBuffer(content[sizes[i]:sizes[i+1]])
|
||||||
}
|
}
|
||||||
buffers[n - 1] = bytes.NewBuffer(content[sizes[n - 1]:])
|
buffers[n-1] = bytes.NewBuffer(content[sizes[n-1]:])
|
||||||
|
|
||||||
i := 0
|
i := 0
|
||||||
|
|
||||||
maker.ForEachChunk(buffers[0],
|
maker.ForEachChunk(buffers[0],
|
||||||
func (chunk *Chunk, final bool) {
|
func(chunk *Chunk, final bool) {
|
||||||
//LOG_INFO("CHUNK_SPLIT", "i: %d, chunk: %s, size: %d", i, chunk.GetHash(), size)
|
//LOG_INFO("CHUNK_SPLIT", "i: %d, chunk: %s, size: %d", i, chunk.GetHash(), size)
|
||||||
chunks = append(chunks, chunk.GetHash())
|
chunks = append(chunks, chunk.GetHash())
|
||||||
totalChunkSize += chunk.GetLength()
|
totalChunkSize += chunk.GetLength()
|
||||||
},
|
},
|
||||||
func (size int64, hash string) (io.Reader, bool) {
|
func(size int64, hash string) (io.Reader, bool) {
|
||||||
totalFileSize += size
|
totalFileSize += size
|
||||||
i++
|
i++
|
||||||
if i >= len(buffers) {
|
if i >= len(buffers) {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
return buffers[i], true
|
return buffers[i], true
|
||||||
})
|
})
|
||||||
|
|
||||||
if (totalFileSize != int64(totalChunkSize)) {
|
if totalFileSize != int64(totalChunkSize) {
|
||||||
LOG_ERROR("CHUNK_SPLIT", "total chunk size: %d, total file size: %d", totalChunkSize, totalFileSize)
|
LOG_ERROR("CHUNK_SPLIT", "total chunk size: %d, total file size: %d", totalChunkSize, totalFileSize)
|
||||||
}
|
}
|
||||||
return chunks, totalChunkSize
|
return chunks, totalChunkSize
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestChunkMaker(t *testing.T) {
|
func TestChunkMaker(t *testing.T) {
|
||||||
|
|
||||||
|
//sizes := [...] int { 64 }
|
||||||
|
sizes := [...]int{64, 256, 1024, 1024 * 10}
|
||||||
|
|
||||||
//sizes := [...] int { 64 }
|
for _, size := range sizes {
|
||||||
sizes := [...] int { 64, 256, 1024, 1024 * 10 }
|
|
||||||
|
|
||||||
for _, size := range sizes {
|
content := make([]byte, size)
|
||||||
|
_, err := crypto_rand.Read(content)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error generating random content: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
content := make([]byte, size)
|
chunkArray1, totalSize1 := splitIntoChunks(content, 10, 32, 64, 16, 32)
|
||||||
_, err := crypto_rand.Read(content)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error generating random content: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
chunkArray1, totalSize1 := splitIntoChunks(content, 10, 32, 64, 16, 32)
|
capacities := [...]int{32, 33, 34, 61, 62, 63, 64, 65, 66, 126, 127, 128, 129, 130,
|
||||||
|
255, 256, 257, 511, 512, 513, 1023, 1024, 1025,
|
||||||
|
32, 48, 64, 128, 256, 512, 1024, 2048}
|
||||||
|
|
||||||
capacities := [...]int { 32, 33, 34, 61, 62, 63, 64, 65, 66, 126, 127, 128, 129, 130,
|
//capacities := [...]int { 32 }
|
||||||
255, 256, 257, 511, 512, 513, 1023, 1024, 1025,
|
|
||||||
32, 48, 64, 128, 256, 512, 1024, 2048, }
|
|
||||||
|
|
||||||
//capacities := [...]int { 32 }
|
for _, capacity := range capacities {
|
||||||
|
|
||||||
for _, capacity := range capacities {
|
for _, n := range [...]int{6, 7, 8, 9, 10} {
|
||||||
|
chunkArray2, totalSize2 := splitIntoChunks(content, n, 32, 64, 16, capacity)
|
||||||
|
|
||||||
for _, n := range [...]int { 6, 7, 8, 9, 10 } {
|
if totalSize1 != totalSize2 {
|
||||||
chunkArray2, totalSize2 := splitIntoChunks(content, n, 32, 64, 16, capacity)
|
t.Errorf("[size %d, capacity %d] total size is %d instead of %d",
|
||||||
|
size, capacity, totalSize2, totalSize1)
|
||||||
|
}
|
||||||
|
|
||||||
if totalSize1 != totalSize2 {
|
if len(chunkArray1) != len(chunkArray2) {
|
||||||
t.Errorf("[size %d, capacity %d] total size is %d instead of %d",
|
t.Errorf("[size %d, capacity %d] number of chunks is %d instead of %d",
|
||||||
size, capacity, totalSize2, totalSize1)
|
size, capacity, len(chunkArray2), len(chunkArray1))
|
||||||
}
|
} else {
|
||||||
|
for i := 0; i < len(chunkArray1); i++ {
|
||||||
if len(chunkArray1) != len(chunkArray2) {
|
if chunkArray1[i] != chunkArray2[i] {
|
||||||
t.Errorf("[size %d, capacity %d] number of chunks is %d instead of %d",
|
t.Errorf("[size %d, capacity %d, chunk %d] chunk is different", size, capacity, i)
|
||||||
size, capacity, len(chunkArray2), len(chunkArray1))
|
}
|
||||||
} else {
|
}
|
||||||
for i := 0; i < len(chunkArray1); i++ {
|
}
|
||||||
if chunkArray1[i] != chunkArray2[i] {
|
}
|
||||||
t.Errorf("[size %d, capacity %d, chunk %d] chunk is different", size, capacity, i)
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,14 +5,14 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ChunkUploadTask represents a chunk to be uploaded.
|
// ChunkUploadTask represents a chunk to be uploaded.
|
||||||
type ChunkUploadTask struct {
|
type ChunkUploadTask struct {
|
||||||
chunk * Chunk
|
chunk *Chunk
|
||||||
chunkIndex int
|
chunkIndex int
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChunkUploader uploads chunks to the storage using one or more uploading goroutines. Chunks are added
|
// ChunkUploader uploads chunks to the storage using one or more uploading goroutines. Chunks are added
|
||||||
@@ -20,132 +20,132 @@ type ChunkUploadTask struct {
|
|||||||
// called when the downloading is completed. Note that ChunkUploader does not release chunks to the
|
// called when the downloading is completed. Note that ChunkUploader does not release chunks to the
|
||||||
// chunk pool; instead
|
// chunk pool; instead
|
||||||
type ChunkUploader struct {
|
type ChunkUploader struct {
|
||||||
config *Config // Associated config
|
config *Config // Associated config
|
||||||
storage Storage // Download from this storage
|
storage Storage // Download from this storage
|
||||||
snapshotCache *FileStorage // Used as cache if not nil; usually for uploading snapshot chunks
|
snapshotCache *FileStorage // Used as cache if not nil; usually for uploading snapshot chunks
|
||||||
threads int // Number of uploading goroutines
|
threads int // Number of uploading goroutines
|
||||||
taskQueue chan ChunkUploadTask // Uploading goroutines are listening on this channel for upload jobs
|
taskQueue chan ChunkUploadTask // Uploading goroutines are listening on this channel for upload jobs
|
||||||
stopChannel chan bool // Used to terminate uploading goroutines
|
stopChannel chan bool // Used to terminate uploading goroutines
|
||||||
|
|
||||||
numberOfUploadingTasks int32 // The number of uploading tasks
|
numberOfUploadingTasks int32 // The number of uploading tasks
|
||||||
|
|
||||||
// Uploading goroutines call this function after having downloaded chunks
|
// Uploading goroutines call this function after having downloaded chunks
|
||||||
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)
|
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateChunkUploader creates a chunk uploader.
|
// CreateChunkUploader creates a chunk uploader.
|
||||||
func CreateChunkUploader(config *Config, storage Storage, snapshotCache *FileStorage, threads int,
|
func CreateChunkUploader(config *Config, storage Storage, snapshotCache *FileStorage, threads int,
|
||||||
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)) *ChunkUploader {
|
completionFunc func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int)) *ChunkUploader {
|
||||||
uploader := &ChunkUploader {
|
uploader := &ChunkUploader{
|
||||||
config: config,
|
config: config,
|
||||||
storage: storage,
|
storage: storage,
|
||||||
snapshotCache: snapshotCache,
|
snapshotCache: snapshotCache,
|
||||||
threads: threads,
|
threads: threads,
|
||||||
taskQueue: make(chan ChunkUploadTask, 1),
|
taskQueue: make(chan ChunkUploadTask, 1),
|
||||||
stopChannel: make(chan bool),
|
stopChannel: make(chan bool),
|
||||||
completionFunc: completionFunc,
|
completionFunc: completionFunc,
|
||||||
}
|
}
|
||||||
|
|
||||||
return uploader
|
return uploader
|
||||||
}
|
}
|
||||||
|
|
||||||
// Starts starts uploading goroutines.
|
// Starts starts uploading goroutines.
|
||||||
func (uploader *ChunkUploader) Start() {
|
func (uploader *ChunkUploader) Start() {
|
||||||
for i := 0; i < uploader.threads; i++ {
|
for i := 0; i < uploader.threads; i++ {
|
||||||
go func(threadIndex int) {
|
go func(threadIndex int) {
|
||||||
defer CatchLogException()
|
defer CatchLogException()
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case task := <- uploader.taskQueue:
|
case task := <-uploader.taskQueue:
|
||||||
uploader.Upload(threadIndex, task)
|
uploader.Upload(threadIndex, task)
|
||||||
case <- uploader.stopChannel:
|
case <-uploader.stopChannel:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} (i)
|
}(i)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartChunk sends a chunk to be uploaded to a waiting uploading goroutine. It may block if all uploading goroutines are busy.
|
// StartChunk sends a chunk to be uploaded to a waiting uploading goroutine. It may block if all uploading goroutines are busy.
|
||||||
func (uploader *ChunkUploader) StartChunk(chunk *Chunk, chunkIndex int) {
|
func (uploader *ChunkUploader) StartChunk(chunk *Chunk, chunkIndex int) {
|
||||||
atomic.AddInt32(&uploader.numberOfUploadingTasks, 1)
|
atomic.AddInt32(&uploader.numberOfUploadingTasks, 1)
|
||||||
uploader.taskQueue <- ChunkUploadTask {
|
uploader.taskQueue <- ChunkUploadTask{
|
||||||
chunk: chunk,
|
chunk: chunk,
|
||||||
chunkIndex: chunkIndex,
|
chunkIndex: chunkIndex,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop stops all uploading goroutines.
|
// Stop stops all uploading goroutines.
|
||||||
func (uploader *ChunkUploader) Stop() {
|
func (uploader *ChunkUploader) Stop() {
|
||||||
for atomic.LoadInt32(&uploader.numberOfUploadingTasks) > 0 {
|
for atomic.LoadInt32(&uploader.numberOfUploadingTasks) > 0 {
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
}
|
}
|
||||||
for i := 0; i < uploader.threads; i++ {
|
for i := 0; i < uploader.threads; i++ {
|
||||||
uploader.stopChannel <- false
|
uploader.stopChannel <- false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Upload is called by the uploading goroutines to perform the actual uploading
|
// Upload is called by the uploading goroutines to perform the actual uploading
|
||||||
func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) bool {
|
func (uploader *ChunkUploader) Upload(threadIndex int, task ChunkUploadTask) bool {
|
||||||
|
|
||||||
chunk := task.chunk
|
chunk := task.chunk
|
||||||
chunkSize := chunk.GetLength()
|
chunkSize := chunk.GetLength()
|
||||||
chunkID := chunk.GetID()
|
chunkID := chunk.GetID()
|
||||||
|
|
||||||
// For a snapshot chunk, verify that its chunk id is correct
|
// For a snapshot chunk, verify that its chunk id is correct
|
||||||
if uploader.snapshotCache != nil {
|
if uploader.snapshotCache != nil {
|
||||||
chunk.VerifyID()
|
chunk.VerifyID()
|
||||||
}
|
}
|
||||||
|
|
||||||
if uploader.snapshotCache != nil && uploader.storage.IsCacheNeeded() {
|
if uploader.snapshotCache != nil && uploader.storage.IsCacheNeeded() {
|
||||||
// Save a copy to the local snapshot.
|
// Save a copy to the local snapshot.
|
||||||
chunkPath, exist, _, err := uploader.snapshotCache.FindChunk(threadIndex, chunkID, false)
|
chunkPath, exist, _, err := uploader.snapshotCache.FindChunk(threadIndex, chunkID, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("UPLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
|
LOG_WARN("UPLOAD_CACHE", "Failed to find the cache path for the chunk %s: %v", chunkID, err)
|
||||||
} else if exist {
|
} else if exist {
|
||||||
LOG_DEBUG("CHUNK_CACHE", "Chunk %s already exists in the snapshot cache", chunkID)
|
LOG_DEBUG("CHUNK_CACHE", "Chunk %s already exists in the snapshot cache", chunkID)
|
||||||
} else if err = uploader.snapshotCache.UploadFile(threadIndex, chunkPath, chunk.GetBytes()); err != nil {
|
} else if err = uploader.snapshotCache.UploadFile(threadIndex, chunkPath, chunk.GetBytes()); err != nil {
|
||||||
LOG_WARN("UPLOAD_CACHE", "Failed to save the chunk %s to the snapshot cache: %v", chunkID, err)
|
LOG_WARN("UPLOAD_CACHE", "Failed to save the chunk %s to the snapshot cache: %v", chunkID, err)
|
||||||
} else {
|
} else {
|
||||||
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been saved to the snapshot cache", chunkID)
|
LOG_DEBUG("CHUNK_CACHE", "Chunk %s has been saved to the snapshot cache", chunkID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This returns the path the chunk file should be at.
|
// This returns the path the chunk file should be at.
|
||||||
chunkPath, exist, _, err := uploader.storage.FindChunk(threadIndex, chunkID, false)
|
chunkPath, exist, _, err := uploader.storage.FindChunk(threadIndex, chunkID, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("UPLOAD_CHUNK", "Failed to find the path for the chunk %s: %v", chunkID, err)
|
LOG_ERROR("UPLOAD_CHUNK", "Failed to find the path for the chunk %s: %v", chunkID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if exist {
|
if exist {
|
||||||
// Chunk deduplication by name in effect here.
|
// Chunk deduplication by name in effect here.
|
||||||
LOG_DEBUG("CHUNK_DUPLICATE", "Chunk %s already exists", chunkID)
|
LOG_DEBUG("CHUNK_DUPLICATE", "Chunk %s already exists", chunkID)
|
||||||
|
|
||||||
uploader.completionFunc(chunk, task.chunkIndex, true, chunkSize, 0)
|
uploader.completionFunc(chunk, task.chunkIndex, true, chunkSize, 0)
|
||||||
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
|
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Encrypt the chunk only after we know that it must be uploaded.
|
// Encrypt the chunk only after we know that it must be uploaded.
|
||||||
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash())
|
err = chunk.Encrypt(uploader.config.ChunkKey, chunk.GetHash())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("UPLOAD_CHUNK", "Failed to encrypt the chunk %s: %v", chunkID, err)
|
LOG_ERROR("UPLOAD_CHUNK", "Failed to encrypt the chunk %s: %v", chunkID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !uploader.config.dryRun {
|
if !uploader.config.dryRun {
|
||||||
err = uploader.storage.UploadFile(threadIndex, chunkPath, chunk.GetBytes())
|
err = uploader.storage.UploadFile(threadIndex, chunkPath, chunk.GetBytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("UPLOAD_CHUNK", "Failed to upload the chunk %s: %v", chunkID, err)
|
LOG_ERROR("UPLOAD_CHUNK", "Failed to upload the chunk %s: %v", chunkID, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
LOG_DEBUG("CHUNK_UPLOAD", "Chunk %s has been uploaded", chunkID)
|
LOG_DEBUG("CHUNK_UPLOAD", "Chunk %s has been uploaded", chunkID)
|
||||||
} else {
|
} else {
|
||||||
LOG_DEBUG("CHUNK_UPLOAD", "Uploading was skipped for chunk %s", chunkID)
|
LOG_DEBUG("CHUNK_UPLOAD", "Uploading was skipped for chunk %s", chunkID)
|
||||||
}
|
}
|
||||||
|
|
||||||
uploader.completionFunc(chunk, task.chunkIndex, false, chunkSize, chunk.GetLength())
|
uploader.completionFunc(chunk, task.chunkIndex, false, chunkSize, chunk.GetLength())
|
||||||
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
|
atomic.AddInt32(&uploader.numberOfUploadingTasks, -1)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,126 +5,124 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"path"
|
||||||
"path"
|
"runtime/debug"
|
||||||
"testing"
|
"testing"
|
||||||
"runtime/debug"
|
"time"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestUploaderAndDownloader(t *testing.T) {
|
func TestUploaderAndDownloader(t *testing.T) {
|
||||||
|
|
||||||
rand.Seed(time.Now().UnixNano())
|
rand.Seed(time.Now().UnixNano())
|
||||||
setTestingT(t)
|
setTestingT(t)
|
||||||
SetLoggingLevel(INFO)
|
SetLoggingLevel(INFO)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
switch e := r.(type) {
|
switch e := r.(type) {
|
||||||
case Exception:
|
case Exception:
|
||||||
t.Errorf("%s %s", e.LogID, e.Message)
|
t.Errorf("%s %s", e.LogID, e.Message)
|
||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
default:
|
default:
|
||||||
t.Errorf("%v", e)
|
t.Errorf("%v", e)
|
||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
||||||
os.RemoveAll(testDir)
|
os.RemoveAll(testDir)
|
||||||
os.MkdirAll(testDir, 0700)
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
t.Logf("storage: %s", testStorageName)
|
t.Logf("storage: %s", testStorageName)
|
||||||
|
|
||||||
storage, err := loadStorage(testDir, 1)
|
storage, err := loadStorage(testDir, 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create storage: %v", err)
|
t.Errorf("Failed to create storage: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
storage.EnableTestMode()
|
storage.EnableTestMode()
|
||||||
storage.SetRateLimits(testRateLimit, testRateLimit)
|
storage.SetRateLimits(testRateLimit, testRateLimit)
|
||||||
|
|
||||||
for _, dir := range []string { "chunks", "snapshots" } {
|
for _, dir := range []string{"chunks", "snapshots"} {
|
||||||
err = storage.CreateDirectory(0, dir)
|
err = storage.CreateDirectory(0, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create directory %s: %v", dir, err)
|
t.Errorf("Failed to create directory %s: %v", dir, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
numberOfChunks := 100
|
||||||
|
maxChunkSize := 64 * 1024
|
||||||
|
|
||||||
numberOfChunks := 100
|
if testQuickMode {
|
||||||
maxChunkSize := 64 * 1024
|
numberOfChunks = 10
|
||||||
|
}
|
||||||
|
|
||||||
if testQuickMode {
|
var chunks []*Chunk
|
||||||
numberOfChunks = 10
|
|
||||||
}
|
|
||||||
|
|
||||||
var chunks []*Chunk
|
config := CreateConfig()
|
||||||
|
config.MinimumChunkSize = 100
|
||||||
|
config.chunkPool = make(chan *Chunk, numberOfChunks*2)
|
||||||
|
totalFileSize := 0
|
||||||
|
|
||||||
config := CreateConfig()
|
for i := 0; i < numberOfChunks; i++ {
|
||||||
config.MinimumChunkSize = 100
|
content := make([]byte, rand.Int()%maxChunkSize+1)
|
||||||
config.chunkPool = make(chan *Chunk, numberOfChunks * 2)
|
_, err = crypto_rand.Read(content)
|
||||||
totalFileSize := 0
|
if err != nil {
|
||||||
|
t.Errorf("Error generating random content: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < numberOfChunks; i++ {
|
chunk := CreateChunk(config, true)
|
||||||
content := make([]byte, rand.Int() % maxChunkSize + 1)
|
chunk.Reset(true)
|
||||||
_, err = crypto_rand.Read(content)
|
chunk.Write(content)
|
||||||
if err != nil {
|
chunks = append(chunks, chunk)
|
||||||
t.Errorf("Error generating random content: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
chunk := CreateChunk(config, true)
|
t.Logf("Chunk: %s, size: %d", chunk.GetID(), chunk.GetLength())
|
||||||
chunk.Reset(true)
|
totalFileSize += chunk.GetLength()
|
||||||
chunk.Write(content)
|
}
|
||||||
chunks = append(chunks, chunk)
|
|
||||||
|
|
||||||
t.Logf("Chunk: %s, size: %d", chunk.GetID(), chunk.GetLength())
|
completionFunc := func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
|
||||||
totalFileSize += chunk.GetLength()
|
t.Logf("Chunk %s size %d (%d/%d) uploaded", chunk.GetID(), chunkSize, chunkIndex, len(chunks))
|
||||||
}
|
}
|
||||||
|
|
||||||
completionFunc := func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
|
chunkUploader := CreateChunkUploader(config, storage, nil, testThreads, nil)
|
||||||
t.Logf("Chunk %s size %d (%d/%d) uploaded", chunk.GetID(), chunkSize, chunkIndex, len(chunks))
|
chunkUploader.completionFunc = completionFunc
|
||||||
}
|
chunkUploader.Start()
|
||||||
|
|
||||||
chunkUploader := CreateChunkUploader(config, storage, nil, testThreads, nil)
|
for i, chunk := range chunks {
|
||||||
chunkUploader.completionFunc = completionFunc
|
chunkUploader.StartChunk(chunk, i)
|
||||||
chunkUploader.Start()
|
}
|
||||||
|
|
||||||
for i, chunk := range chunks {
|
chunkUploader.Stop()
|
||||||
chunkUploader.StartChunk(chunk, i)
|
|
||||||
}
|
|
||||||
|
|
||||||
chunkUploader.Stop()
|
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
|
||||||
|
chunkDownloader.totalChunkSize = int64(totalFileSize)
|
||||||
|
|
||||||
|
for _, chunk := range chunks {
|
||||||
|
chunkDownloader.AddChunk(chunk.GetHash())
|
||||||
|
}
|
||||||
|
|
||||||
chunkDownloader := CreateChunkDownloader(config, storage, nil, true, testThreads)
|
for i, chunk := range chunks {
|
||||||
chunkDownloader.totalChunkSize = int64(totalFileSize)
|
downloaded := chunkDownloader.WaitForChunk(i)
|
||||||
|
if downloaded.GetID() != chunk.GetID() {
|
||||||
|
t.Error("Uploaded: %s, downloaded: %s", chunk.GetID(), downloaded.GetID())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, chunk := range chunks {
|
chunkDownloader.Stop()
|
||||||
chunkDownloader.AddChunk(chunk.GetHash())
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, chunk := range chunks {
|
for _, file := range listChunks(storage) {
|
||||||
downloaded := chunkDownloader.WaitForChunk(i)
|
err = storage.DeleteFile(0, "chunks/"+file)
|
||||||
if downloaded.GetID() != chunk.GetID() {
|
if err != nil {
|
||||||
t.Error("Uploaded: %s, downloaded: %s", chunk.GetID(), downloaded.GetID())
|
t.Errorf("Failed to delete the file %s: %v", file, err)
|
||||||
}
|
return
|
||||||
}
|
}
|
||||||
|
}
|
||||||
chunkDownloader.Stop()
|
|
||||||
|
|
||||||
for _, file := range listChunks(storage) {
|
|
||||||
err = storage.DeleteFile(0, "chunks/" + file)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to delete the file %s: %v", file, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,20 +5,20 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"bytes"
|
||||||
"bytes"
|
"crypto/hmac"
|
||||||
"os"
|
"crypto/rand"
|
||||||
"fmt"
|
"crypto/sha256"
|
||||||
"hash"
|
"encoding/hex"
|
||||||
"runtime"
|
"encoding/json"
|
||||||
"runtime/debug"
|
"fmt"
|
||||||
"sync/atomic"
|
"hash"
|
||||||
"crypto/rand"
|
"os"
|
||||||
"crypto/hmac"
|
"runtime"
|
||||||
"crypto/sha256"
|
"runtime/debug"
|
||||||
"encoding/hex"
|
"sync/atomic"
|
||||||
|
|
||||||
blake2 "github.com/minio/blake2b-simd"
|
blake2 "github.com/minio/blake2b-simd"
|
||||||
)
|
)
|
||||||
|
|
||||||
// If encryption is turned off, use this key for HMAC-SHA256 or chunk ID generation etc.
|
// If encryption is turned off, use this key for HMAC-SHA256 or chunk ID generation etc.
|
||||||
@@ -29,209 +29,209 @@ var DEFAULT_KEY = []byte("duplicacy")
|
|||||||
var DEFAULT_COMPRESSION_LEVEL = 100
|
var DEFAULT_COMPRESSION_LEVEL = 100
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
CompressionLevel int `json:"compression-level"`
|
CompressionLevel int `json:"compression-level"`
|
||||||
AverageChunkSize int `json:"average-chunk-size"`
|
AverageChunkSize int `json:"average-chunk-size"`
|
||||||
MaximumChunkSize int `json:"max-chunk-size"`
|
MaximumChunkSize int `json:"max-chunk-size"`
|
||||||
MinimumChunkSize int `json:"min-chunk-size"`
|
MinimumChunkSize int `json:"min-chunk-size"`
|
||||||
|
|
||||||
ChunkSeed []byte `json:"chunk-seed"`
|
ChunkSeed []byte `json:"chunk-seed"`
|
||||||
|
|
||||||
// Use HMAC-SHA256(hashKey, plaintext) as the chunk hash.
|
// Use HMAC-SHA256(hashKey, plaintext) as the chunk hash.
|
||||||
// Use HMAC-SHA256(idKey, chunk hash) as the file name of the chunk
|
// Use HMAC-SHA256(idKey, chunk hash) as the file name of the chunk
|
||||||
// For chunks, use HMAC-SHA256(chunkKey, chunk hash) as the encryption key
|
// For chunks, use HMAC-SHA256(chunkKey, chunk hash) as the encryption key
|
||||||
// For files, use HMAC-SHA256(fileKey, file path) as the encryption key
|
// For files, use HMAC-SHA256(fileKey, file path) as the encryption key
|
||||||
|
|
||||||
// the HMAC-SHA256 key of the chunk data
|
// the HMAC-SHA256 key of the chunk data
|
||||||
HashKey []byte `json:"-"`
|
HashKey []byte `json:"-"`
|
||||||
|
|
||||||
// used to generate an id from the chunk hash
|
// used to generate an id from the chunk hash
|
||||||
IDKey []byte `json:"-"`
|
IDKey []byte `json:"-"`
|
||||||
|
|
||||||
// for encrypting a chunk
|
// for encrypting a chunk
|
||||||
ChunkKey []byte `json:"-"`
|
ChunkKey []byte `json:"-"`
|
||||||
|
|
||||||
// for encrypting a non-chunk file
|
// for encrypting a non-chunk file
|
||||||
FileKey []byte `json:"-"`
|
FileKey []byte `json:"-"`
|
||||||
|
|
||||||
chunkPool chan *Chunk `json:"-"`
|
chunkPool chan *Chunk `json:"-"`
|
||||||
numberOfChunks int32
|
numberOfChunks int32
|
||||||
dryRun bool
|
dryRun bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create an alias to avoid recursive calls on Config.MarshalJSON
|
// Create an alias to avoid recursive calls on Config.MarshalJSON
|
||||||
type aliasedConfig Config
|
type aliasedConfig Config
|
||||||
|
|
||||||
type jsonableConfig struct {
|
type jsonableConfig struct {
|
||||||
*aliasedConfig
|
*aliasedConfig
|
||||||
ChunkSeed string `json:"chunk-seed"`
|
ChunkSeed string `json:"chunk-seed"`
|
||||||
HashKey string `json:"hash-key"`
|
HashKey string `json:"hash-key"`
|
||||||
IDKey string `json:"id-key"`
|
IDKey string `json:"id-key"`
|
||||||
ChunkKey string `json:"chunk-key"`
|
ChunkKey string `json:"chunk-key"`
|
||||||
FileKey string `json:"file-key"`
|
FileKey string `json:"file-key"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) MarshalJSON() ([] byte, error) {
|
func (config *Config) MarshalJSON() ([]byte, error) {
|
||||||
|
|
||||||
return json.Marshal(&jsonableConfig {
|
return json.Marshal(&jsonableConfig{
|
||||||
aliasedConfig: (*aliasedConfig)(config),
|
aliasedConfig: (*aliasedConfig)(config),
|
||||||
ChunkSeed: hex.EncodeToString(config.ChunkSeed),
|
ChunkSeed: hex.EncodeToString(config.ChunkSeed),
|
||||||
HashKey: hex.EncodeToString(config.HashKey),
|
HashKey: hex.EncodeToString(config.HashKey),
|
||||||
IDKey: hex.EncodeToString(config.IDKey),
|
IDKey: hex.EncodeToString(config.IDKey),
|
||||||
ChunkKey: hex.EncodeToString(config.ChunkKey),
|
ChunkKey: hex.EncodeToString(config.ChunkKey),
|
||||||
FileKey: hex.EncodeToString(config.FileKey),
|
FileKey: hex.EncodeToString(config.FileKey),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) UnmarshalJSON(description []byte) (err error) {
|
func (config *Config) UnmarshalJSON(description []byte) (err error) {
|
||||||
|
|
||||||
aliased := &jsonableConfig {
|
aliased := &jsonableConfig{
|
||||||
aliasedConfig: (*aliasedConfig)(config),
|
aliasedConfig: (*aliasedConfig)(config),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = json.Unmarshal(description, &aliased); err != nil {
|
if err = json.Unmarshal(description, &aliased); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.ChunkSeed, err = hex.DecodeString(aliased.ChunkSeed); err != nil {
|
if config.ChunkSeed, err = hex.DecodeString(aliased.ChunkSeed); err != nil {
|
||||||
return fmt.Errorf("Invalid representation of the chunk seed in the config")
|
return fmt.Errorf("Invalid representation of the chunk seed in the config")
|
||||||
}
|
}
|
||||||
if config.HashKey, err = hex.DecodeString(aliased.HashKey); err != nil {
|
if config.HashKey, err = hex.DecodeString(aliased.HashKey); err != nil {
|
||||||
return fmt.Errorf("Invalid representation of the hash key in the config")
|
return fmt.Errorf("Invalid representation of the hash key in the config")
|
||||||
}
|
}
|
||||||
if config.IDKey, err = hex.DecodeString(aliased.IDKey); err != nil {
|
if config.IDKey, err = hex.DecodeString(aliased.IDKey); err != nil {
|
||||||
return fmt.Errorf("Invalid representation of the id key in the config")
|
return fmt.Errorf("Invalid representation of the id key in the config")
|
||||||
}
|
}
|
||||||
if config.ChunkKey, err = hex.DecodeString(aliased.ChunkKey); err != nil {
|
if config.ChunkKey, err = hex.DecodeString(aliased.ChunkKey); err != nil {
|
||||||
return fmt.Errorf("Invalid representation of the chunk key in the config")
|
return fmt.Errorf("Invalid representation of the chunk key in the config")
|
||||||
}
|
}
|
||||||
if config.FileKey, err = hex.DecodeString(aliased.FileKey); err != nil {
|
if config.FileKey, err = hex.DecodeString(aliased.FileKey); err != nil {
|
||||||
return fmt.Errorf("Invalid representation of the file key in the config")
|
return fmt.Errorf("Invalid representation of the file key in the config")
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) IsCompatiableWith(otherConfig *Config) bool {
|
func (config *Config) IsCompatiableWith(otherConfig *Config) bool {
|
||||||
|
|
||||||
return config.CompressionLevel == otherConfig.CompressionLevel &&
|
return config.CompressionLevel == otherConfig.CompressionLevel &&
|
||||||
config.AverageChunkSize == otherConfig.AverageChunkSize &&
|
config.AverageChunkSize == otherConfig.AverageChunkSize &&
|
||||||
config.MaximumChunkSize == otherConfig.MaximumChunkSize &&
|
config.MaximumChunkSize == otherConfig.MaximumChunkSize &&
|
||||||
config.MinimumChunkSize == otherConfig.MinimumChunkSize &&
|
config.MinimumChunkSize == otherConfig.MinimumChunkSize &&
|
||||||
bytes.Equal(config.ChunkSeed, otherConfig.ChunkSeed) &&
|
bytes.Equal(config.ChunkSeed, otherConfig.ChunkSeed) &&
|
||||||
bytes.Equal(config.HashKey, otherConfig.HashKey)
|
bytes.Equal(config.HashKey, otherConfig.HashKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) Print() {
|
func (config *Config) Print() {
|
||||||
|
|
||||||
LOG_INFO("CONFIG_INFO", "Compression level: %d", config.CompressionLevel)
|
LOG_INFO("CONFIG_INFO", "Compression level: %d", config.CompressionLevel)
|
||||||
LOG_INFO("CONFIG_INFO", "Average chunk size: %d", config.AverageChunkSize)
|
LOG_INFO("CONFIG_INFO", "Average chunk size: %d", config.AverageChunkSize)
|
||||||
LOG_INFO("CONFIG_INFO", "Maximum chunk size: %d", config.MaximumChunkSize)
|
LOG_INFO("CONFIG_INFO", "Maximum chunk size: %d", config.MaximumChunkSize)
|
||||||
LOG_INFO("CONFIG_INFO", "Minimum chunk size: %d", config.MinimumChunkSize)
|
LOG_INFO("CONFIG_INFO", "Minimum chunk size: %d", config.MinimumChunkSize)
|
||||||
LOG_INFO("CONFIG_INFO", "Chunk seed: %x", config.ChunkSeed)
|
LOG_INFO("CONFIG_INFO", "Chunk seed: %x", config.ChunkSeed)
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
|
func CreateConfigFromParameters(compressionLevel int, averageChunkSize int, maximumChunkSize int, mininumChunkSize int,
|
||||||
isEncrypted bool, copyFrom *Config) (config *Config) {
|
isEncrypted bool, copyFrom *Config) (config *Config) {
|
||||||
|
|
||||||
config = &Config {
|
config = &Config{
|
||||||
CompressionLevel: compressionLevel,
|
CompressionLevel: compressionLevel,
|
||||||
AverageChunkSize: averageChunkSize,
|
AverageChunkSize: averageChunkSize,
|
||||||
MaximumChunkSize: maximumChunkSize,
|
MaximumChunkSize: maximumChunkSize,
|
||||||
MinimumChunkSize: mininumChunkSize,
|
MinimumChunkSize: mininumChunkSize,
|
||||||
}
|
}
|
||||||
|
|
||||||
if isEncrypted {
|
if isEncrypted {
|
||||||
// Randomly generate keys
|
// Randomly generate keys
|
||||||
keys := make([]byte, 32 * 5)
|
keys := make([]byte, 32*5)
|
||||||
_, err := rand.Read(keys)
|
_, err := rand.Read(keys)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("CONFIG_KEY", "Failed to generate random keys: %v", err)
|
LOG_ERROR("CONFIG_KEY", "Failed to generate random keys: %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
config.ChunkSeed = keys[:32]
|
config.ChunkSeed = keys[:32]
|
||||||
config.HashKey = keys[32:64]
|
config.HashKey = keys[32:64]
|
||||||
config.IDKey = keys[64:96]
|
config.IDKey = keys[64:96]
|
||||||
config.ChunkKey = keys[96:128]
|
config.ChunkKey = keys[96:128]
|
||||||
config.FileKey = keys[128:]
|
config.FileKey = keys[128:]
|
||||||
} else {
|
} else {
|
||||||
config.ChunkSeed = DEFAULT_KEY
|
config.ChunkSeed = DEFAULT_KEY
|
||||||
config.HashKey = DEFAULT_KEY
|
config.HashKey = DEFAULT_KEY
|
||||||
config.IDKey = DEFAULT_KEY
|
config.IDKey = DEFAULT_KEY
|
||||||
}
|
}
|
||||||
|
|
||||||
if copyFrom != nil {
|
if copyFrom != nil {
|
||||||
config.CompressionLevel = copyFrom.CompressionLevel
|
config.CompressionLevel = copyFrom.CompressionLevel
|
||||||
|
|
||||||
config.AverageChunkSize = copyFrom.AverageChunkSize
|
config.AverageChunkSize = copyFrom.AverageChunkSize
|
||||||
config.MaximumChunkSize = copyFrom.MaximumChunkSize
|
config.MaximumChunkSize = copyFrom.MaximumChunkSize
|
||||||
config.MinimumChunkSize = copyFrom.MinimumChunkSize
|
config.MinimumChunkSize = copyFrom.MinimumChunkSize
|
||||||
|
|
||||||
config.ChunkSeed = copyFrom.ChunkSeed
|
config.ChunkSeed = copyFrom.ChunkSeed
|
||||||
config.HashKey = copyFrom.HashKey
|
config.HashKey = copyFrom.HashKey
|
||||||
}
|
}
|
||||||
|
|
||||||
config.chunkPool = make(chan *Chunk, runtime.NumCPU() * 16)
|
config.chunkPool = make(chan *Chunk, runtime.NumCPU()*16)
|
||||||
|
|
||||||
return config
|
return config
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateConfig() (config *Config) {
|
func CreateConfig() (config *Config) {
|
||||||
return &Config {
|
return &Config{
|
||||||
HashKey: DEFAULT_KEY,
|
HashKey: DEFAULT_KEY,
|
||||||
IDKey: DEFAULT_KEY,
|
IDKey: DEFAULT_KEY,
|
||||||
CompressionLevel: DEFAULT_COMPRESSION_LEVEL,
|
CompressionLevel: DEFAULT_COMPRESSION_LEVEL,
|
||||||
chunkPool: make(chan *Chunk, runtime.NumCPU() * 16),
|
chunkPool: make(chan *Chunk, runtime.NumCPU()*16),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) GetChunk() (chunk *Chunk) {
|
func (config *Config) GetChunk() (chunk *Chunk) {
|
||||||
select {
|
select {
|
||||||
case chunk = <- config.chunkPool :
|
case chunk = <-config.chunkPool:
|
||||||
default:
|
default:
|
||||||
numberOfChunks := atomic.AddInt32(&config.numberOfChunks, 1)
|
numberOfChunks := atomic.AddInt32(&config.numberOfChunks, 1)
|
||||||
if numberOfChunks >= int32(runtime.NumCPU() * 16) {
|
if numberOfChunks >= int32(runtime.NumCPU()*16) {
|
||||||
LOG_WARN("CONFIG_CHUNK", "%d chunks have been allocated", numberOfChunks)
|
LOG_WARN("CONFIG_CHUNK", "%d chunks have been allocated", numberOfChunks)
|
||||||
if _, found := os.LookupEnv("DUPLICACY_CHUNK_DEBUG"); found {
|
if _, found := os.LookupEnv("DUPLICACY_CHUNK_DEBUG"); found {
|
||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
chunk = CreateChunk(config, true)
|
chunk = CreateChunk(config, true)
|
||||||
}
|
}
|
||||||
return chunk
|
return chunk
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) PutChunk(chunk *Chunk){
|
func (config *Config) PutChunk(chunk *Chunk) {
|
||||||
|
|
||||||
if chunk == nil {
|
if chunk == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case config.chunkPool <- chunk:
|
case config.chunkPool <- chunk:
|
||||||
default:
|
default:
|
||||||
LOG_INFO("CHUNK_BUFFER", "Discarding a free chunk due to a full pool")
|
LOG_INFO("CHUNK_BUFFER", "Discarding a free chunk due to a full pool")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) NewKeyedHasher(key []byte) hash.Hash {
|
func (config *Config) NewKeyedHasher(key []byte) hash.Hash {
|
||||||
if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
||||||
hasher, err := blake2.New(&blake2.Config{ Size: 32, Key:key })
|
hasher, err := blake2.New(&blake2.Config{Size: 32, Key: key})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("HASH_KEY", "Invalid hash key: %x", key)
|
LOG_ERROR("HASH_KEY", "Invalid hash key: %x", key)
|
||||||
}
|
}
|
||||||
return hasher
|
return hasher
|
||||||
} else {
|
} else {
|
||||||
return hmac.New(sha256.New, key)
|
return hmac.New(sha256.New, key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var SkipFileHash = false
|
var SkipFileHash = false
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
if value, found := os.LookupEnv("DUPLICACY_SKIP_FILE_HASH"); found && value != "" && value != "0" {
|
if value, found := os.LookupEnv("DUPLICACY_SKIP_FILE_HASH"); found && value != "" && value != "0" {
|
||||||
SkipFileHash = true
|
SkipFileHash = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implement a dummy hasher to be used when SkipFileHash is true.
|
// Implement a dummy hasher to be used when SkipFileHash is true.
|
||||||
@@ -239,190 +239,189 @@ type DummyHasher struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (hasher *DummyHasher) Write(p []byte) (int, error) {
|
func (hasher *DummyHasher) Write(p []byte) (int, error) {
|
||||||
return len(p), nil
|
return len(p), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hasher *DummyHasher) Sum(b []byte) []byte {
|
func (hasher *DummyHasher) Sum(b []byte) []byte {
|
||||||
return []byte("")
|
return []byte("")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hasher *DummyHasher) Reset() {
|
func (hasher *DummyHasher) Reset() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hasher *DummyHasher) Size() int {
|
func (hasher *DummyHasher) Size() int {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hasher *DummyHasher) BlockSize() int {
|
func (hasher *DummyHasher) BlockSize() int {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (config *Config) NewFileHasher() hash.Hash {
|
func (config *Config) NewFileHasher() hash.Hash {
|
||||||
if SkipFileHash {
|
if SkipFileHash {
|
||||||
return &DummyHasher {}
|
return &DummyHasher{}
|
||||||
} else if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
} else if config.CompressionLevel == DEFAULT_COMPRESSION_LEVEL {
|
||||||
hasher, _ := blake2.New(&blake2.Config{ Size: 32 })
|
hasher, _ := blake2.New(&blake2.Config{Size: 32})
|
||||||
return hasher
|
return hasher
|
||||||
} else {
|
} else {
|
||||||
return sha256.New()
|
return sha256.New()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate the file hash using the corresponding hasher
|
// Calculate the file hash using the corresponding hasher
|
||||||
func (config *Config) ComputeFileHash(path string, buffer []byte) string {
|
func (config *Config) ComputeFileHash(path string, buffer []byte) string {
|
||||||
|
|
||||||
file, err := os.Open(path)
|
file, err := os.Open(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
hasher := config.NewFileHasher()
|
hasher := config.NewFileHasher()
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
count := 1
|
count := 1
|
||||||
for count > 0 {
|
for count > 0 {
|
||||||
count, err = file.Read(buffer)
|
count, err = file.Read(buffer)
|
||||||
hasher.Write(buffer[:count])
|
hasher.Write(buffer[:count])
|
||||||
}
|
}
|
||||||
|
|
||||||
return hex.EncodeToString(hasher.Sum(nil))
|
return hex.EncodeToString(hasher.Sum(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetChunkIDFromHash creates a chunk id from the chunk hash. The chunk id will be used as the name of the chunk
|
// GetChunkIDFromHash creates a chunk id from the chunk hash. The chunk id will be used as the name of the chunk
|
||||||
// file, so it is publicly exposed. The chunk hash is the HMAC-SHA256 of what is contained in the chunk and should
|
// file, so it is publicly exposed. The chunk hash is the HMAC-SHA256 of what is contained in the chunk and should
|
||||||
// never be exposed.
|
// never be exposed.
|
||||||
func (config *Config) GetChunkIDFromHash(hash string) string {
|
func (config *Config) GetChunkIDFromHash(hash string) string {
|
||||||
hasher := config.NewKeyedHasher(config.IDKey)
|
hasher := config.NewKeyedHasher(config.IDKey)
|
||||||
hasher.Write([]byte(hash))
|
hasher.Write([]byte(hash))
|
||||||
return hex.EncodeToString(hasher.Sum(nil))
|
return hex.EncodeToString(hasher.Sum(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func DownloadConfig(storage Storage, password string) (config *Config, isEncrypted bool, err error) {
|
func DownloadConfig(storage Storage, password string) (config *Config, isEncrypted bool, err error) {
|
||||||
// Although the default key is passed to the function call the key is not actually used since there is no need to
|
// Although the default key is passed to the function call the key is not actually used since there is no need to
|
||||||
// calculate the hash or id of the config file.
|
// calculate the hash or id of the config file.
|
||||||
configFile := CreateChunk(CreateConfig(), true)
|
configFile := CreateChunk(CreateConfig(), true)
|
||||||
|
|
||||||
exist, _, _, err := storage.GetFileInfo(0, "config")
|
exist, _, _, err := storage.GetFileInfo(0, "config")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !exist {
|
if !exist {
|
||||||
return nil, false, nil
|
return nil, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storage.DownloadFile(0, "config", configFile)
|
err = storage.DownloadFile(0, "config", configFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var masterKey []byte
|
var masterKey []byte
|
||||||
|
|
||||||
if len(password) > 0 {
|
if len(password) > 0 {
|
||||||
masterKey = GenerateKeyFromPassword(password)
|
masterKey = GenerateKeyFromPassword(password)
|
||||||
|
|
||||||
// Decrypt the config file. masterKey == nil means no encryption.
|
// Decrypt the config file. masterKey == nil means no encryption.
|
||||||
err = configFile.Decrypt(masterKey, "")
|
err = configFile.Decrypt(masterKey, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, false, fmt.Errorf("Failed to retrieve the config file: %v", err)
|
return nil, false, fmt.Errorf("Failed to retrieve the config file: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
config = CreateConfig()
|
config = CreateConfig()
|
||||||
|
|
||||||
err = json.Unmarshal(configFile.GetBytes(), config)
|
err = json.Unmarshal(configFile.GetBytes(), config)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if bytes.Equal(configFile.GetBytes()[:9], []byte("duplicacy")) {
|
if bytes.Equal(configFile.GetBytes()[:9], []byte("duplicacy")) {
|
||||||
return nil, true, fmt.Errorf("The storage is likely to have been initialized with a password before")
|
return nil, true, fmt.Errorf("The storage is likely to have been initialized with a password before")
|
||||||
} else {
|
} else {
|
||||||
return nil, false, fmt.Errorf("Failed to parse the config file: %v", err)
|
return nil, false, fmt.Errorf("Failed to parse the config file: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return config, false, nil
|
return config, false, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func UploadConfig(storage Storage, config *Config, password string) (bool) {
|
func UploadConfig(storage Storage, config *Config, password string) bool {
|
||||||
|
|
||||||
// This is the key to encrypt the config file.
|
// This is the key to encrypt the config file.
|
||||||
var masterKey []byte
|
var masterKey []byte
|
||||||
|
|
||||||
if len(password) > 0 {
|
if len(password) > 0 {
|
||||||
|
|
||||||
if len(password) < 8 {
|
if len(password) < 8 {
|
||||||
LOG_ERROR("CONFIG_PASSWORD", "The password must be at least 8 characters")
|
LOG_ERROR("CONFIG_PASSWORD", "The password must be at least 8 characters")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
masterKey = GenerateKeyFromPassword(password)
|
masterKey = GenerateKeyFromPassword(password)
|
||||||
}
|
}
|
||||||
|
|
||||||
description, err := json.MarshalIndent(config, "", " ")
|
description, err := json.MarshalIndent(config, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("CONFIG_MARSHAL", "Failed to marshal the config: %v", err)
|
LOG_ERROR("CONFIG_MARSHAL", "Failed to marshal the config: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Although the default key is passed to the function call the key is not actually used since there is no need to
|
// Although the default key is passed to the function call the key is not actually used since there is no need to
|
||||||
// calculate the hash or id of the config file.
|
// calculate the hash or id of the config file.
|
||||||
chunk := CreateChunk(CreateConfig(), true)
|
chunk := CreateChunk(CreateConfig(), true)
|
||||||
chunk.Write(description)
|
chunk.Write(description)
|
||||||
|
|
||||||
if len(password) > 0 {
|
if len(password) > 0 {
|
||||||
// Encrypt the config file with masterKey. If masterKey is nil then no encryption is performed.
|
// Encrypt the config file with masterKey. If masterKey is nil then no encryption is performed.
|
||||||
err = chunk.Encrypt(masterKey, "")
|
err = chunk.Encrypt(masterKey, "")
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("CONFIG_CREATE", "Failed to create the config file: %v", err)
|
LOG_ERROR("CONFIG_CREATE", "Failed to create the config file: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storage.UploadFile(0, "config", chunk.GetBytes())
|
err = storage.UploadFile(0, "config", chunk.GetBytes())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("CONFIG_INIT", "Failed to configure the storage: %v", err)
|
LOG_ERROR("CONFIG_INIT", "Failed to configure the storage: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if IsTracing() {
|
if IsTracing() {
|
||||||
config.Print()
|
config.Print()
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, subDir := range []string {"chunks", "snapshots"} {
|
for _, subDir := range []string{"chunks", "snapshots"} {
|
||||||
err = storage.CreateDirectory(0, subDir)
|
err = storage.CreateDirectory(0, subDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("CONFIG_MKDIR", "Failed to create storage subdirectory: %v", err)
|
LOG_ERROR("CONFIG_MKDIR", "Failed to create storage subdirectory: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigStorage makes the general storage space available for storing duplicacy format snapshots. In essence,
|
// ConfigStorage makes the general storage space available for storing duplicacy format snapshots. In essence,
|
||||||
// it simply creates a file named 'config' that stores various parameters as well as a set of keys if encryption
|
// it simply creates a file named 'config' that stores various parameters as well as a set of keys if encryption
|
||||||
// is enabled.
|
// is enabled.
|
||||||
func ConfigStorage(storage Storage, compressionLevel int, averageChunkSize int, maximumChunkSize int,
|
func ConfigStorage(storage Storage, compressionLevel int, averageChunkSize int, maximumChunkSize int,
|
||||||
minimumChunkSize int, password string, copyFrom *Config) bool {
|
minimumChunkSize int, password string, copyFrom *Config) bool {
|
||||||
|
|
||||||
exist, _, _, err := storage.GetFileInfo(0, "config")
|
exist, _, _, err := storage.GetFileInfo(0, "config")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("CONFIG_INIT", "Failed to check if there is an existing config file: %v", err)
|
LOG_ERROR("CONFIG_INIT", "Failed to check if there is an existing config file: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if exist {
|
if exist {
|
||||||
LOG_INFO("CONFIG_EXIST", "The storage has already been configured")
|
LOG_INFO("CONFIG_EXIST", "The storage has already been configured")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
config := CreateConfigFromParameters(compressionLevel, averageChunkSize, maximumChunkSize, minimumChunkSize, len(password) > 0,
|
||||||
|
copyFrom)
|
||||||
|
if config == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
config := CreateConfigFromParameters(compressionLevel, averageChunkSize, maximumChunkSize, minimumChunkSize, len(password) > 0,
|
return UploadConfig(storage, config, password)
|
||||||
copyFrom)
|
|
||||||
if config == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return UploadConfig(storage, config, password)
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,292 +5,293 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
"github.com/gilbertchen/go-dropbox"
|
|
||||||
|
"github.com/gilbertchen/go-dropbox"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DropboxStorage struct {
|
type DropboxStorage struct {
|
||||||
RateLimitedStorage
|
RateLimitedStorage
|
||||||
|
|
||||||
clients []*dropbox.Files
|
clients []*dropbox.Files
|
||||||
storageDir string
|
storageDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDropboxStorage creates a dropbox storage object.
|
// CreateDropboxStorage creates a dropbox storage object.
|
||||||
func CreateDropboxStorage(accessToken string, storageDir string, threads int) (storage *DropboxStorage, err error) {
|
func CreateDropboxStorage(accessToken string, storageDir string, threads int) (storage *DropboxStorage, err error) {
|
||||||
|
|
||||||
var clients []*dropbox.Files
|
var clients []*dropbox.Files
|
||||||
for i := 0; i < threads; i++ {
|
for i := 0; i < threads; i++ {
|
||||||
client := dropbox.NewFiles(dropbox.NewConfig(accessToken))
|
client := dropbox.NewFiles(dropbox.NewConfig(accessToken))
|
||||||
clients = append(clients, client)
|
clients = append(clients, client)
|
||||||
}
|
}
|
||||||
|
|
||||||
if storageDir == "" || storageDir[0] != '/' {
|
if storageDir == "" || storageDir[0] != '/' {
|
||||||
storageDir = "/" + storageDir
|
storageDir = "/" + storageDir
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(storageDir) > 1 && storageDir[len(storageDir) - 1] == '/' {
|
if len(storageDir) > 1 && storageDir[len(storageDir)-1] == '/' {
|
||||||
storageDir = storageDir[:len(storageDir) - 1]
|
storageDir = storageDir[:len(storageDir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &DropboxStorage {
|
storage = &DropboxStorage{
|
||||||
clients: clients,
|
clients: clients,
|
||||||
storageDir: storageDir,
|
storageDir: storageDir,
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storage.CreateDirectory(0, "")
|
err = storage.CreateDirectory(0, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Can't create storage directory: %v", err)
|
return nil, fmt.Errorf("Can't create storage directory: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage, nil
|
return storage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *DropboxStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
func (storage *DropboxStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
|
||||||
if dir != "" && dir[0] != '/' {
|
if dir != "" && dir[0] != '/' {
|
||||||
dir = "/" + dir
|
dir = "/" + dir
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dir) > 1 && dir[len(dir) - 1] == '/' {
|
if len(dir) > 1 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
input := &dropbox.ListFolderInput {
|
input := &dropbox.ListFolderInput{
|
||||||
Path : storage.storageDir + dir,
|
Path: storage.storageDir + dir,
|
||||||
Recursive : false,
|
Recursive: false,
|
||||||
IncludeMediaInfo: false,
|
IncludeMediaInfo: false,
|
||||||
IncludeDeleted: false,
|
IncludeDeleted: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := storage.clients[threadIndex].ListFolder(input)
|
output, err := storage.clients[threadIndex].ListFolder(input)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range output.Entries {
|
for _, entry := range output.Entries {
|
||||||
name := entry.Name
|
name := entry.Name
|
||||||
if entry.Tag == "folder" {
|
if entry.Tag == "folder" {
|
||||||
name += "/"
|
name += "/"
|
||||||
}
|
}
|
||||||
files = append(files, name)
|
files = append(files, name)
|
||||||
sizes = append(sizes, int64(entry.Size))
|
sizes = append(sizes, int64(entry.Size))
|
||||||
}
|
}
|
||||||
|
|
||||||
if output.HasMore {
|
if output.HasMore {
|
||||||
output, err = storage.clients[threadIndex].ListFolderContinue(
|
output, err = storage.clients[threadIndex].ListFolderContinue(
|
||||||
&dropbox.ListFolderContinueInput { Cursor: output.Cursor, })
|
&dropbox.ListFolderContinueInput{Cursor: output.Cursor})
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return files, sizes, nil
|
return files, sizes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *DropboxStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *DropboxStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
if filePath != "" && filePath[0] != '/' {
|
if filePath != "" && filePath[0] != '/' {
|
||||||
filePath = "/" + filePath
|
filePath = "/" + filePath
|
||||||
}
|
}
|
||||||
|
|
||||||
input := &dropbox.DeleteInput {
|
input := &dropbox.DeleteInput{
|
||||||
Path: storage.storageDir + filePath,
|
Path: storage.storageDir + filePath,
|
||||||
}
|
}
|
||||||
_, err = storage.clients[threadIndex].Delete(input)
|
_, err = storage.clients[threadIndex].Delete(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path_lookup/not_found/") {
|
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path_lookup/not_found/") {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *DropboxStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *DropboxStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
if from != "" && from[0] != '/' {
|
if from != "" && from[0] != '/' {
|
||||||
from = "/" + from
|
from = "/" + from
|
||||||
}
|
}
|
||||||
if to != "" && to[0] != '/' {
|
if to != "" && to[0] != '/' {
|
||||||
to = "/" + to
|
to = "/" + to
|
||||||
}
|
}
|
||||||
input := &dropbox.MoveInput {
|
input := &dropbox.MoveInput{
|
||||||
FromPath: storage.storageDir + from,
|
FromPath: storage.storageDir + from,
|
||||||
ToPath: storage.storageDir + to,
|
ToPath: storage.storageDir + to,
|
||||||
}
|
}
|
||||||
_, err = storage.clients[threadIndex].Move(input)
|
_, err = storage.clients[threadIndex].Move(input)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *DropboxStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
func (storage *DropboxStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
if dir != "" && dir[0] != '/' {
|
if dir != "" && dir[0] != '/' {
|
||||||
dir = "/" + dir
|
dir = "/" + dir
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dir) > 1 && dir[len(dir) - 1] == '/' {
|
if len(dir) > 1 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
input := &dropbox.CreateFolderInput {
|
input := &dropbox.CreateFolderInput{
|
||||||
Path : storage.storageDir + dir,
|
Path: storage.storageDir + dir,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = storage.clients[threadIndex].CreateFolder(input)
|
_, err = storage.clients[threadIndex].CreateFolder(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path/conflict/") {
|
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path/conflict/") {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *DropboxStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *DropboxStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
if filePath != "" && filePath[0] != '/' {
|
if filePath != "" && filePath[0] != '/' {
|
||||||
filePath = "/" + filePath
|
filePath = "/" + filePath
|
||||||
}
|
}
|
||||||
|
|
||||||
input := &dropbox.GetMetadataInput {
|
input := &dropbox.GetMetadataInput{
|
||||||
Path: storage.storageDir + filePath,
|
Path: storage.storageDir + filePath,
|
||||||
IncludeMediaInfo: false,
|
IncludeMediaInfo: false,
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := storage.clients[threadIndex].GetMetadata(input)
|
output, err := storage.clients[threadIndex].GetMetadata(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path/not_found/") {
|
if e, ok := err.(*dropbox.Error); ok && strings.HasPrefix(e.Summary, "path/not_found/") {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
} else {
|
} else {
|
||||||
return false, false, 0, err
|
return false, false, 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, output.Tag == "folder", int64(output.Size), nil
|
return true, output.Tag == "folder", int64(output.Size), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||||
// the suffix '.fsl'.
|
// the suffix '.fsl'.
|
||||||
func (storage *DropboxStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
func (storage *DropboxStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||||
dir := "/chunks"
|
dir := "/chunks"
|
||||||
|
|
||||||
suffix := ""
|
suffix := ""
|
||||||
if isFossil {
|
if isFossil {
|
||||||
suffix = ".fsl"
|
suffix = ".fsl"
|
||||||
}
|
}
|
||||||
|
|
||||||
// The minimum level of directories to dive into before searching for the chunk file.
|
// The minimum level of directories to dive into before searching for the chunk file.
|
||||||
minimumLevel := 1
|
minimumLevel := 1
|
||||||
|
|
||||||
for level := 0; level * 2 < len(chunkID); level ++ {
|
for level := 0; level*2 < len(chunkID); level++ {
|
||||||
if level >= minimumLevel {
|
if level >= minimumLevel {
|
||||||
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
|
filePath = path.Join(dir, chunkID[2*level:]) + suffix
|
||||||
var size int64
|
var size int64
|
||||||
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
}
|
}
|
||||||
if exist {
|
if exist {
|
||||||
return filePath, exist, size, nil
|
return filePath, exist, size, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find the subdirectory the chunk file may reside.
|
// Find the subdirectory the chunk file may reside.
|
||||||
subDir := path.Join(dir, chunkID[2 * level: 2 * level + 2])
|
subDir := path.Join(dir, chunkID[2*level:2*level+2])
|
||||||
exist, _, _, err = storage.GetFileInfo(threadIndex, subDir)
|
exist, _, _, err = storage.GetFileInfo(threadIndex, subDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if exist {
|
if exist {
|
||||||
dir = subDir
|
dir = subDir
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if level < minimumLevel {
|
if level < minimumLevel {
|
||||||
// Create the subdirectory if it doesn't exist.
|
// Create the subdirectory if it doesn't exist.
|
||||||
err = storage.CreateDirectory(threadIndex, subDir)
|
err = storage.CreateDirectory(threadIndex, subDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dir = subDir
|
dir = subDir
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Teh chunk must be under this subdirectory but it doesn't exist.
|
// Teh chunk must be under this subdirectory but it doesn't exist.
|
||||||
return path.Join(dir, chunkID[2 * level:])[1:] + suffix, false, 0, nil
|
return path.Join(dir, chunkID[2*level:])[1:] + suffix, false, 0, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_FATAL("CHUNK_FIND", "Chunk %s is still not found after having searched a maximum level of directories",
|
LOG_FATAL("CHUNK_FIND", "Chunk %s is still not found after having searched a maximum level of directories",
|
||||||
chunkID)
|
chunkID)
|
||||||
return "", false, 0, nil
|
return "", false, 0, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *DropboxStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *DropboxStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
if filePath != "" && filePath[0] != '/' {
|
if filePath != "" && filePath[0] != '/' {
|
||||||
filePath = "/" + filePath
|
filePath = "/" + filePath
|
||||||
}
|
}
|
||||||
|
|
||||||
input := &dropbox.DownloadInput {
|
input := &dropbox.DownloadInput{
|
||||||
Path: storage.storageDir + filePath,
|
Path: storage.storageDir + filePath,
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := storage.clients[threadIndex].Download(input)
|
output, err := storage.clients[threadIndex].Download(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer output.Body.Close()
|
defer output.Body.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit / len(storage.clients))
|
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.clients))
|
||||||
return err
|
return err
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *DropboxStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *DropboxStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
if filePath != "" && filePath[0] != '/' {
|
if filePath != "" && filePath[0] != '/' {
|
||||||
filePath = "/" + filePath
|
filePath = "/" + filePath
|
||||||
}
|
}
|
||||||
|
|
||||||
input := &dropbox.UploadInput {
|
input := &dropbox.UploadInput{
|
||||||
Path: storage.storageDir + filePath,
|
Path: storage.storageDir + filePath,
|
||||||
Mode: dropbox.WriteModeOverwrite,
|
Mode: dropbox.WriteModeOverwrite,
|
||||||
AutoRename: false,
|
AutoRename: false,
|
||||||
Mute: true,
|
Mute: true,
|
||||||
Reader: CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.clients)),
|
Reader: CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.clients)),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = storage.clients[threadIndex].Upload(input)
|
_, err = storage.clients[threadIndex].Upload(input)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *DropboxStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *DropboxStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *DropboxStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *DropboxStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *DropboxStorage) IsStrongConsistent() (bool) { return false }
|
func (storage *DropboxStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *DropboxStorage) IsFastListing() (bool) { return false }
|
func (storage *DropboxStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *DropboxStorage) EnableTestMode() {}
|
func (storage *DropboxStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -4,22 +4,20 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"encoding/base64"
|
||||||
"fmt"
|
"encoding/json"
|
||||||
"path/filepath"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"sort"
|
"os"
|
||||||
"regexp"
|
"path/filepath"
|
||||||
"strconv"
|
"regexp"
|
||||||
"time"
|
"runtime"
|
||||||
"encoding/json"
|
"sort"
|
||||||
"encoding/base64"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"runtime"
|
"time"
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
// This is the hidden directory in the repository for storing various files.
|
// This is the hidden directory in the repository for storing various files.
|
||||||
var DUPLICACY_DIRECTORY = ".duplicacy"
|
var DUPLICACY_DIRECTORY = ".duplicacy"
|
||||||
var DUPLICACY_FILE = ".duplicacy"
|
var DUPLICACY_FILE = ".duplicacy"
|
||||||
@@ -29,546 +27,545 @@ var contentRegex = regexp.MustCompile(`^([0-9]+):([0-9]+):([0-9]+):([0-9]+)`)
|
|||||||
|
|
||||||
// Entry encapsulates information about a file or directory.
|
// Entry encapsulates information about a file or directory.
|
||||||
type Entry struct {
|
type Entry struct {
|
||||||
Path string
|
Path string
|
||||||
Size int64
|
Size int64
|
||||||
Time int64
|
Time int64
|
||||||
Mode uint32
|
Mode uint32
|
||||||
Link string
|
Link string
|
||||||
Hash string
|
Hash string
|
||||||
|
|
||||||
UID int
|
UID int
|
||||||
GID int
|
GID int
|
||||||
|
|
||||||
StartChunk int
|
StartChunk int
|
||||||
StartOffset int
|
StartOffset int
|
||||||
EndChunk int
|
EndChunk int
|
||||||
EndOffset int
|
EndOffset int
|
||||||
|
|
||||||
Attributes map[string][]byte
|
Attributes map[string][]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateEntry creates an entry from file properties.
|
// CreateEntry creates an entry from file properties.
|
||||||
func CreateEntry(path string, size int64, time int64, mode uint32) *Entry {
|
func CreateEntry(path string, size int64, time int64, mode uint32) *Entry {
|
||||||
|
|
||||||
if len(path) > 0 && path[len(path) - 1] != '/' && (mode & uint32(os.ModeDir)) != 0 {
|
if len(path) > 0 && path[len(path)-1] != '/' && (mode&uint32(os.ModeDir)) != 0 {
|
||||||
path += "/"
|
path += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
return &Entry {
|
return &Entry{
|
||||||
Path : path,
|
Path: path,
|
||||||
Size : size,
|
Size: size,
|
||||||
Time : time,
|
Time: time,
|
||||||
Mode : mode,
|
Mode: mode,
|
||||||
|
|
||||||
UID : -1,
|
UID: -1,
|
||||||
GID : -1,
|
GID: -1,
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateEntryFromFileInfo creates an entry from a 'FileInfo' object.
|
// CreateEntryFromFileInfo creates an entry from a 'FileInfo' object.
|
||||||
func CreateEntryFromFileInfo(fileInfo os.FileInfo, directory string) *Entry {
|
func CreateEntryFromFileInfo(fileInfo os.FileInfo, directory string) *Entry {
|
||||||
path := directory + fileInfo.Name()
|
path := directory + fileInfo.Name()
|
||||||
|
|
||||||
mode := fileInfo.Mode()
|
mode := fileInfo.Mode()
|
||||||
|
|
||||||
if mode & os.ModeDir != 0 && mode & os.ModeSymlink != 0 {
|
if mode&os.ModeDir != 0 && mode&os.ModeSymlink != 0 {
|
||||||
mode ^= os.ModeDir
|
mode ^= os.ModeDir
|
||||||
}
|
}
|
||||||
|
|
||||||
if path[len(path) - 1] != '/' && mode & os.ModeDir != 0 {
|
if path[len(path)-1] != '/' && mode&os.ModeDir != 0 {
|
||||||
path += "/"
|
path += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
entry := &Entry {
|
entry := &Entry{
|
||||||
Path: path,
|
Path: path,
|
||||||
Size: fileInfo.Size(),
|
Size: fileInfo.Size(),
|
||||||
Time: fileInfo.ModTime().Unix(),
|
Time: fileInfo.ModTime().Unix(),
|
||||||
Mode: uint32(mode),
|
Mode: uint32(mode),
|
||||||
}
|
}
|
||||||
|
|
||||||
GetOwner(entry, &fileInfo)
|
GetOwner(entry, &fileInfo)
|
||||||
|
|
||||||
return entry
|
return entry
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateEntryFromJSON creates an entry from a json description.
|
// CreateEntryFromJSON creates an entry from a json description.
|
||||||
func (entry *Entry) UnmarshalJSON(description []byte) (err error) {
|
func (entry *Entry) UnmarshalJSON(description []byte) (err error) {
|
||||||
|
|
||||||
var object map[string]interface {}
|
var object map[string]interface{}
|
||||||
|
|
||||||
err = json.Unmarshal(description, &object)
|
err = json.Unmarshal(description, &object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var value interface {}
|
var value interface{}
|
||||||
var ok bool
|
var ok bool
|
||||||
|
|
||||||
if value, ok = object["name"]; ok {
|
if value, ok = object["name"]; ok {
|
||||||
pathInBase64, ok := value.(string)
|
pathInBase64, ok := value.(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("Name is not a string for a file in the snapshot")
|
return fmt.Errorf("Name is not a string for a file in the snapshot")
|
||||||
}
|
}
|
||||||
path, err := base64.StdEncoding.DecodeString(pathInBase64)
|
path, err := base64.StdEncoding.DecodeString(pathInBase64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Invalid name '%s' in the snapshot", pathInBase64)
|
return fmt.Errorf("Invalid name '%s' in the snapshot", pathInBase64)
|
||||||
}
|
}
|
||||||
entry.Path = string(path)
|
entry.Path = string(path)
|
||||||
} else if value, ok = object["path"]; !ok {
|
} else if value, ok = object["path"]; !ok {
|
||||||
return fmt.Errorf("Path is not specified for a file in the snapshot")
|
return fmt.Errorf("Path is not specified for a file in the snapshot")
|
||||||
} else if entry.Path, ok = value.(string); !ok {
|
} else if entry.Path, ok = value.(string); !ok {
|
||||||
return fmt.Errorf("Path is not a string for a file in the snapshot")
|
return fmt.Errorf("Path is not a string for a file in the snapshot")
|
||||||
}
|
}
|
||||||
|
|
||||||
if value, ok = object["size"]; !ok {
|
if value, ok = object["size"]; !ok {
|
||||||
return fmt.Errorf("Size is not specified for file '%s' in the snapshot", entry.Path)
|
return fmt.Errorf("Size is not specified for file '%s' in the snapshot", entry.Path)
|
||||||
} else if _, ok = value.(float64); !ok {
|
} else if _, ok = value.(float64); !ok {
|
||||||
return fmt.Errorf("Size is not a valid integer for file '%s' in the snapshot", entry.Path)
|
return fmt.Errorf("Size is not a valid integer for file '%s' in the snapshot", entry.Path)
|
||||||
}
|
}
|
||||||
entry.Size = int64(value.(float64))
|
entry.Size = int64(value.(float64))
|
||||||
|
|
||||||
if value, ok = object["time"]; !ok {
|
if value, ok = object["time"]; !ok {
|
||||||
return fmt.Errorf("Time is not specified for file '%s' in the snapshot", entry.Path)
|
return fmt.Errorf("Time is not specified for file '%s' in the snapshot", entry.Path)
|
||||||
} else if _, ok = value.(float64); !ok {
|
} else if _, ok = value.(float64); !ok {
|
||||||
return fmt.Errorf("Time is not a valid integer for file '%s' in the snapshot", entry.Path)
|
return fmt.Errorf("Time is not a valid integer for file '%s' in the snapshot", entry.Path)
|
||||||
}
|
}
|
||||||
entry.Time = int64(value.(float64))
|
entry.Time = int64(value.(float64))
|
||||||
|
|
||||||
if value, ok = object["mode"]; !ok {
|
if value, ok = object["mode"]; !ok {
|
||||||
return fmt.Errorf("float64 is not specified for file '%s' in the snapshot", entry.Path)
|
return fmt.Errorf("float64 is not specified for file '%s' in the snapshot", entry.Path)
|
||||||
} else if _, ok = value.(float64); !ok {
|
} else if _, ok = value.(float64); !ok {
|
||||||
return fmt.Errorf("Mode is not a valid integer for file '%s' in the snapshot", entry.Path)
|
return fmt.Errorf("Mode is not a valid integer for file '%s' in the snapshot", entry.Path)
|
||||||
}
|
}
|
||||||
entry.Mode = uint32(value.(float64))
|
entry.Mode = uint32(value.(float64))
|
||||||
|
|
||||||
if value, ok = object["hash"]; !ok {
|
if value, ok = object["hash"]; !ok {
|
||||||
return fmt.Errorf("Hash is not specified for file '%s' in the snapshot", entry.Path)
|
return fmt.Errorf("Hash is not specified for file '%s' in the snapshot", entry.Path)
|
||||||
} else if entry.Hash, ok = value.(string); !ok {
|
} else if entry.Hash, ok = value.(string); !ok {
|
||||||
return fmt.Errorf("Hash is not a string for file '%s' in the snapshot", entry.Path)
|
return fmt.Errorf("Hash is not a string for file '%s' in the snapshot", entry.Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
if value, ok = object["link"]; ok {
|
if value, ok = object["link"]; ok {
|
||||||
var link string
|
var link string
|
||||||
if link, ok = value.(string); !ok {
|
if link, ok = value.(string); !ok {
|
||||||
return fmt.Errorf("Symlink is not a valid string for file '%s' in the snapshot", entry.Path)
|
return fmt.Errorf("Symlink is not a valid string for file '%s' in the snapshot", entry.Path)
|
||||||
}
|
}
|
||||||
entry.Link = link
|
entry.Link = link
|
||||||
}
|
}
|
||||||
|
|
||||||
entry.UID = -1
|
entry.UID = -1
|
||||||
if value, ok = object["uid"]; ok {
|
if value, ok = object["uid"]; ok {
|
||||||
if _, ok = value.(float64); ok {
|
if _, ok = value.(float64); ok {
|
||||||
entry.UID = int(value.(float64))
|
entry.UID = int(value.(float64))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
entry.GID = -1
|
entry.GID = -1
|
||||||
if value, ok = object["gid"]; ok {
|
if value, ok = object["gid"]; ok {
|
||||||
if _, ok = value.(float64); ok {
|
if _, ok = value.(float64); ok {
|
||||||
entry.GID = int(value.(float64))
|
entry.GID = int(value.(float64))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if value, ok = object["attributes"]; ok {
|
if value, ok = object["attributes"]; ok {
|
||||||
if attributes, ok := value.(map[string]interface {}); !ok {
|
if attributes, ok := value.(map[string]interface{}); !ok {
|
||||||
return fmt.Errorf("Attributes are invalid for file '%s' in the snapshot", entry.Path)
|
return fmt.Errorf("Attributes are invalid for file '%s' in the snapshot", entry.Path)
|
||||||
} else {
|
} else {
|
||||||
entry.Attributes = make(map[string][]byte)
|
entry.Attributes = make(map[string][]byte)
|
||||||
for name, object := range attributes {
|
for name, object := range attributes {
|
||||||
if object == nil {
|
if object == nil {
|
||||||
entry.Attributes[name] = []byte("")
|
entry.Attributes[name] = []byte("")
|
||||||
} else if attributeInBase64, ok := object.(string); !ok {
|
} else if attributeInBase64, ok := object.(string); !ok {
|
||||||
return fmt.Errorf("Attribute '%s' is invalid for file '%s' in the snapshot", name, entry.Path)
|
return fmt.Errorf("Attribute '%s' is invalid for file '%s' in the snapshot", name, entry.Path)
|
||||||
} else if attribute, err := base64.StdEncoding.DecodeString(attributeInBase64); err != nil {
|
} else if attribute, err := base64.StdEncoding.DecodeString(attributeInBase64); err != nil {
|
||||||
return fmt.Errorf("Failed to decode attribute '%s' for file '%s' in the snapshot: %v",
|
return fmt.Errorf("Failed to decode attribute '%s' for file '%s' in the snapshot: %v",
|
||||||
name, entry.Path, err)
|
name, entry.Path, err)
|
||||||
} else {
|
} else {
|
||||||
entry.Attributes[name] = attribute
|
entry.Attributes[name] = attribute
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if entry.IsFile() && entry.Size > 0 {
|
if entry.IsFile() && entry.Size > 0 {
|
||||||
if value, ok = object["content"]; !ok {
|
if value, ok = object["content"]; !ok {
|
||||||
return fmt.Errorf("Content is not specified for file '%s' in the snapshot", entry.Path)
|
return fmt.Errorf("Content is not specified for file '%s' in the snapshot", entry.Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
if content, ok := value.(string); !ok {
|
if content, ok := value.(string); !ok {
|
||||||
return fmt.Errorf("Content is invalid for file '%s' in the snapshot", entry.Path)
|
return fmt.Errorf("Content is invalid for file '%s' in the snapshot", entry.Path)
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
matched := contentRegex.FindStringSubmatch(content)
|
matched := contentRegex.FindStringSubmatch(content)
|
||||||
if matched == nil {
|
if matched == nil {
|
||||||
return fmt.Errorf("Content is specified in a wrong format for file '%s' in the snapshot", entry.Path)
|
return fmt.Errorf("Content is specified in a wrong format for file '%s' in the snapshot", entry.Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
entry.StartChunk, _ = strconv.Atoi(matched[1])
|
entry.StartChunk, _ = strconv.Atoi(matched[1])
|
||||||
entry.StartOffset, _ = strconv.Atoi(matched[2])
|
entry.StartOffset, _ = strconv.Atoi(matched[2])
|
||||||
entry.EndChunk, _ = strconv.Atoi(matched[3])
|
entry.EndChunk, _ = strconv.Atoi(matched[3])
|
||||||
entry.EndOffset, _ = strconv.Atoi(matched[4])
|
entry.EndOffset, _ = strconv.Atoi(matched[4])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) convertToObject(encodeName bool) map[string]interface{} {
|
func (entry *Entry) convertToObject(encodeName bool) map[string]interface{} {
|
||||||
|
|
||||||
object := make(map[string]interface{})
|
object := make(map[string]interface{})
|
||||||
|
|
||||||
if encodeName {
|
if encodeName {
|
||||||
object["name"] = base64.StdEncoding.EncodeToString([]byte(entry.Path))
|
object["name"] = base64.StdEncoding.EncodeToString([]byte(entry.Path))
|
||||||
} else {
|
} else {
|
||||||
object["path"] = entry.Path
|
object["path"] = entry.Path
|
||||||
}
|
}
|
||||||
object["size"] = entry.Size
|
object["size"] = entry.Size
|
||||||
object["time"] = entry.Time
|
object["time"] = entry.Time
|
||||||
object["mode"] = entry.Mode
|
object["mode"] = entry.Mode
|
||||||
object["hash"] = entry.Hash
|
object["hash"] = entry.Hash
|
||||||
|
|
||||||
if entry.IsLink() {
|
if entry.IsLink() {
|
||||||
object["link"] = entry.Link
|
object["link"] = entry.Link
|
||||||
}
|
}
|
||||||
|
|
||||||
if entry.IsFile() && entry.Size > 0 {
|
if entry.IsFile() && entry.Size > 0 {
|
||||||
object["content"] = fmt.Sprintf("%d:%d:%d:%d",
|
object["content"] = fmt.Sprintf("%d:%d:%d:%d",
|
||||||
entry.StartChunk, entry.StartOffset, entry.EndChunk, entry.EndOffset)
|
entry.StartChunk, entry.StartOffset, entry.EndChunk, entry.EndOffset)
|
||||||
}
|
}
|
||||||
|
|
||||||
if entry.UID != -1 && entry.GID != -1 {
|
if entry.UID != -1 && entry.GID != -1 {
|
||||||
object["uid"] = entry.UID
|
object["uid"] = entry.UID
|
||||||
object["gid"] = entry.GID
|
object["gid"] = entry.GID
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(entry.Attributes) > 0 {
|
if len(entry.Attributes) > 0 {
|
||||||
object["attributes"] = entry.Attributes
|
object["attributes"] = entry.Attributes
|
||||||
}
|
}
|
||||||
|
|
||||||
return object
|
return object
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON returns the json description of an entry.
|
// MarshalJSON returns the json description of an entry.
|
||||||
func (entry *Entry) MarshalJSON() ([] byte, error) {
|
func (entry *Entry) MarshalJSON() ([]byte, error) {
|
||||||
|
|
||||||
object := entry.convertToObject(true)
|
object := entry.convertToObject(true)
|
||||||
description, err := json.Marshal(object)
|
description, err := json.Marshal(object)
|
||||||
return description, err
|
return description, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) IsFile() bool {
|
func (entry *Entry) IsFile() bool {
|
||||||
return entry.Mode & uint32(os.ModeType) == 0
|
return entry.Mode&uint32(os.ModeType) == 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) IsDir() bool {
|
func (entry *Entry) IsDir() bool {
|
||||||
return entry.Mode & uint32(os.ModeDir) != 0
|
return entry.Mode&uint32(os.ModeDir) != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) IsLink() bool {
|
func (entry *Entry) IsLink() bool {
|
||||||
return entry.Mode & uint32(os.ModeSymlink) != 0
|
return entry.Mode&uint32(os.ModeSymlink) != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) GetPermissions() os.FileMode {
|
func (entry *Entry) GetPermissions() os.FileMode {
|
||||||
return os.FileMode(entry.Mode) & os.ModePerm
|
return os.FileMode(entry.Mode) & os.ModePerm
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) IsSameAs(other *Entry) bool {
|
func (entry *Entry) IsSameAs(other *Entry) bool {
|
||||||
return entry.Size == other.Size && entry.Time <= other.Time + 1 && entry.Time >= other.Time - 1
|
return entry.Size == other.Size && entry.Time <= other.Time+1 && entry.Time >= other.Time-1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) IsSameAsFileInfo(other os.FileInfo) bool {
|
func (entry *Entry) IsSameAsFileInfo(other os.FileInfo) bool {
|
||||||
time := other.ModTime().Unix()
|
time := other.ModTime().Unix()
|
||||||
return entry.Size == other.Size() && entry.Time <= time + 1 && entry.Time >= time - 1
|
return entry.Size == other.Size() && entry.Time <= time+1 && entry.Time >= time-1
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) String(maxSizeDigits int) string {
|
func (entry *Entry) String(maxSizeDigits int) string {
|
||||||
modifiedTime := time.Unix(entry.Time, 0).Format("2006-01-02 15:04:05")
|
modifiedTime := time.Unix(entry.Time, 0).Format("2006-01-02 15:04:05")
|
||||||
return fmt.Sprintf("%*d %s %64s %s", maxSizeDigits, entry.Size, modifiedTime, entry.Hash, entry.Path)
|
return fmt.Sprintf("%*d %s %64s %s", maxSizeDigits, entry.Size, modifiedTime, entry.Hash, entry.Path)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo) bool {
|
func (entry *Entry) RestoreMetadata(fullPath string, fileInfo *os.FileInfo) bool {
|
||||||
|
|
||||||
if fileInfo == nil {
|
if fileInfo == nil {
|
||||||
stat, err := os.Stat(fullPath)
|
stat, err := os.Stat(fullPath)
|
||||||
fileInfo = &stat
|
fileInfo = &stat
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("RESTORE_STAT", "Failed to retrieve the file info: %v", err)
|
LOG_ERROR("RESTORE_STAT", "Failed to retrieve the file info: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*fileInfo).Mode() & os.ModePerm != entry.GetPermissions() {
|
if (*fileInfo).Mode()&os.ModePerm != entry.GetPermissions() {
|
||||||
err := os.Chmod(fullPath, entry.GetPermissions())
|
err := os.Chmod(fullPath, entry.GetPermissions())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("RESTORE_CHMOD", "Failed to set the file permissions: %v", err)
|
LOG_ERROR("RESTORE_CHMOD", "Failed to set the file permissions: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*fileInfo).ModTime().Unix() != entry.Time {
|
if (*fileInfo).ModTime().Unix() != entry.Time {
|
||||||
modifiedTime := time.Unix(entry.Time, 0)
|
modifiedTime := time.Unix(entry.Time, 0)
|
||||||
err := os.Chtimes(fullPath, modifiedTime, modifiedTime)
|
err := os.Chtimes(fullPath, modifiedTime, modifiedTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("RESTORE_CHTIME", "Failed to set the modification time: %v", err)
|
LOG_ERROR("RESTORE_CHTIME", "Failed to set the modification time: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(entry.Attributes) > 0 {
|
if len(entry.Attributes) > 0 {
|
||||||
entry.SetAttributesToFile(fullPath)
|
entry.SetAttributesToFile(fullPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
return SetOwner(fullPath, entry, fileInfo)
|
return SetOwner(fullPath, entry, fileInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Return -1 if 'left' should appear before 'right', 1 if opposite, and 0 if they are the same.
|
// Return -1 if 'left' should appear before 'right', 1 if opposite, and 0 if they are the same.
|
||||||
// Files are always arranged before subdirectories under the same parent directory.
|
// Files are always arranged before subdirectories under the same parent directory.
|
||||||
func (left *Entry) Compare(right *Entry) int {
|
func (left *Entry) Compare(right *Entry) int {
|
||||||
|
|
||||||
path1 := left.Path
|
path1 := left.Path
|
||||||
path2 := right.Path
|
path2 := right.Path
|
||||||
|
|
||||||
p := 0
|
p := 0
|
||||||
for ; p < len(path1) && p < len(path2); p++ {
|
for ; p < len(path1) && p < len(path2); p++ {
|
||||||
if path1[p] != path2[p] {
|
if path1[p] != path2[p] {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// c1, c2 is the first byte that differs
|
// c1, c2 is the first byte that differs
|
||||||
var c1, c2 byte
|
var c1, c2 byte
|
||||||
if p < len(path1) {
|
if p < len(path1) {
|
||||||
c1 = path1[p]
|
c1 = path1[p]
|
||||||
}
|
}
|
||||||
if p < len(path2) {
|
if p < len(path2) {
|
||||||
c2 = path2[p]
|
c2 = path2[p]
|
||||||
}
|
}
|
||||||
|
|
||||||
// c3, c4 indicates how the current component ends
|
// c3, c4 indicates how the current component ends
|
||||||
// c3 == '/': the current component is a directory
|
// c3 == '/': the current component is a directory
|
||||||
// c3 != '/': the current component is the last one
|
// c3 != '/': the current component is the last one
|
||||||
c3 := c1
|
c3 := c1
|
||||||
for i := p; c3 != '/' && i < len(path1); i++ {
|
for i := p; c3 != '/' && i < len(path1); i++ {
|
||||||
c3 = path1[i]
|
c3 = path1[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
c4 := c2
|
c4 := c2
|
||||||
for i := p; c4 != '/' && i < len(path2); i++ {
|
for i := p; c4 != '/' && i < len(path2); i++ {
|
||||||
c4 = path2[i]
|
c4 = path2[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
if c3 == '/' {
|
if c3 == '/' {
|
||||||
if c4 == '/' {
|
if c4 == '/' {
|
||||||
// We are comparing two directory components
|
// We are comparing two directory components
|
||||||
if c1 == '/' {
|
if c1 == '/' {
|
||||||
// left is shorter
|
// left is shorter
|
||||||
// Note that c2 maybe smaller than c1 but c1 is '/' which is counted
|
// Note that c2 maybe smaller than c1 but c1 is '/' which is counted
|
||||||
// as 0
|
// as 0
|
||||||
return -1
|
return -1
|
||||||
} else if c2 == '/' {
|
} else if c2 == '/' {
|
||||||
// right is shorter
|
// right is shorter
|
||||||
return 1
|
return 1
|
||||||
} else {
|
} else {
|
||||||
return int(c1) - int(c2)
|
return int(c1) - int(c2)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// We're at the last component of left and left is a file
|
// We're at the last component of left and left is a file
|
||||||
if c4 == '/' {
|
if c4 == '/' {
|
||||||
// the current component of right is a directory
|
// the current component of right is a directory
|
||||||
return -1
|
return -1
|
||||||
} else {
|
} else {
|
||||||
return int(c1) - int(c2)
|
return int(c1) - int(c2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is used to sort entries by their names.
|
// This is used to sort entries by their names.
|
||||||
type ByName []*Entry
|
type ByName []*Entry
|
||||||
|
|
||||||
func (entries ByName) Len() int { return len(entries) }
|
func (entries ByName) Len() int { return len(entries) }
|
||||||
func (entries ByName) Swap(i, j int) { entries[i], entries[j] = entries[j], entries[i] }
|
func (entries ByName) Swap(i, j int) { entries[i], entries[j] = entries[j], entries[i] }
|
||||||
func (entries ByName) Less(i, j int) bool {
|
func (entries ByName) Less(i, j int) bool {
|
||||||
return entries[i].Compare(entries[j]) < 0
|
return entries[i].Compare(entries[j]) < 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is used to sort entries by their starting chunks (and starting offsets if the starting chunks are the same).
|
// This is used to sort entries by their starting chunks (and starting offsets if the starting chunks are the same).
|
||||||
type ByChunk []*Entry
|
type ByChunk []*Entry
|
||||||
|
|
||||||
func (entries ByChunk) Len() int { return len(entries) }
|
func (entries ByChunk) Len() int { return len(entries) }
|
||||||
func (entries ByChunk) Swap(i, j int) { entries[i], entries[j] = entries[j], entries[i] }
|
func (entries ByChunk) Swap(i, j int) { entries[i], entries[j] = entries[j], entries[i] }
|
||||||
func (entries ByChunk) Less(i, j int) bool {
|
func (entries ByChunk) Less(i, j int) bool {
|
||||||
return entries[i].StartChunk < entries[j].StartChunk ||
|
return entries[i].StartChunk < entries[j].StartChunk ||
|
||||||
(entries[i].StartChunk == entries[j].StartChunk && entries[i].StartOffset < entries[j].StartOffset)
|
(entries[i].StartChunk == entries[j].StartChunk && entries[i].StartOffset < entries[j].StartOffset)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is used to sort FileInfo objects.
|
// This is used to sort FileInfo objects.
|
||||||
type FileInfoCompare []os.FileInfo
|
type FileInfoCompare []os.FileInfo
|
||||||
|
|
||||||
func (files FileInfoCompare) Len() int { return len(files) }
|
func (files FileInfoCompare) Len() int { return len(files) }
|
||||||
func (files FileInfoCompare) Swap(i, j int) { files[i], files[j] = files[j], files[i] }
|
func (files FileInfoCompare) Swap(i, j int) { files[i], files[j] = files[j], files[i] }
|
||||||
func (files FileInfoCompare) Less(i, j int) bool {
|
func (files FileInfoCompare) Less(i, j int) bool {
|
||||||
|
|
||||||
left := files[i]
|
left := files[i]
|
||||||
right := files[j]
|
right := files[j]
|
||||||
|
|
||||||
if left.IsDir() && left.Mode() & os.ModeSymlink == 0 {
|
if left.IsDir() && left.Mode()&os.ModeSymlink == 0 {
|
||||||
if right.IsDir() && right.Mode() & os.ModeSymlink == 0 {
|
if right.IsDir() && right.Mode()&os.ModeSymlink == 0 {
|
||||||
return left.Name() < right.Name()
|
return left.Name() < right.Name()
|
||||||
} else {
|
} else {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if right.IsDir() && right.Mode() & os.ModeSymlink == 0 {
|
if right.IsDir() && right.Mode()&os.ModeSymlink == 0 {
|
||||||
return true
|
return true
|
||||||
} else {
|
} else {
|
||||||
return left.Name() < right.Name()
|
return left.Name() < right.Name()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths
|
// ListEntries returns a list of entries representing file and subdirectories under the directory 'path'. Entry paths
|
||||||
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files.
|
// are normalized as relative to 'top'. 'patterns' are used to exclude or include certain files.
|
||||||
func ListEntries(top string, path string, fileList *[]*Entry, patterns [] string, discardAttributes bool) (directoryList []*Entry,
|
func ListEntries(top string, path string, fileList *[]*Entry, patterns []string, discardAttributes bool) (directoryList []*Entry,
|
||||||
skippedFiles [] string, err error) {
|
skippedFiles []string, err error) {
|
||||||
|
|
||||||
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
|
LOG_DEBUG("LIST_ENTRIES", "Listing %s", path)
|
||||||
|
|
||||||
fullPath := joinPath(top, path)
|
fullPath := joinPath(top, path)
|
||||||
|
|
||||||
files := make([]os.FileInfo, 0, 1024)
|
files := make([]os.FileInfo, 0, 1024)
|
||||||
|
|
||||||
files, err = ioutil.ReadDir(fullPath)
|
files, err = ioutil.ReadDir(fullPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return directoryList, nil, err
|
return directoryList, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
normalizedPath := path
|
normalizedPath := path
|
||||||
if len(normalizedPath) > 0 && normalizedPath[len(normalizedPath) - 1] != '/' {
|
if len(normalizedPath) > 0 && normalizedPath[len(normalizedPath)-1] != '/' {
|
||||||
normalizedPath += "/"
|
normalizedPath += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
normalizedTop := top
|
normalizedTop := top
|
||||||
if normalizedTop != "" && normalizedTop[len(normalizedTop) - 1] != '/' {
|
if normalizedTop != "" && normalizedTop[len(normalizedTop)-1] != '/' {
|
||||||
normalizedTop += "/"
|
normalizedTop += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Sort(FileInfoCompare(files))
|
sort.Sort(FileInfoCompare(files))
|
||||||
|
|
||||||
entries := make([]*Entry, 0, 4)
|
entries := make([]*Entry, 0, 4)
|
||||||
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
if f.Name() == DUPLICACY_DIRECTORY {
|
if f.Name() == DUPLICACY_DIRECTORY {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
entry := CreateEntryFromFileInfo(f, normalizedPath)
|
entry := CreateEntryFromFileInfo(f, normalizedPath)
|
||||||
if len(patterns) > 0 && !MatchPath(entry.Path, patterns) {
|
if len(patterns) > 0 && !MatchPath(entry.Path, patterns) {
|
||||||
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded", entry.Path)
|
LOG_DEBUG("LIST_EXCLUDE", "%s is excluded", entry.Path)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if entry.IsLink() {
|
if entry.IsLink() {
|
||||||
isRegular := false
|
isRegular := false
|
||||||
isRegular, entry.Link, err = Readlink(filepath.Join(top, entry.Path))
|
isRegular, entry.Link, err = Readlink(filepath.Join(top, entry.Path))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("LIST_LINK", "Failed to read the symlink %s: %v", entry.Path, err )
|
LOG_WARN("LIST_LINK", "Failed to read the symlink %s: %v", entry.Path, err)
|
||||||
skippedFiles = append(skippedFiles, entry.Path)
|
skippedFiles = append(skippedFiles, entry.Path)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if isRegular {
|
if isRegular {
|
||||||
entry.Mode ^= uint32(os.ModeSymlink)
|
entry.Mode ^= uint32(os.ModeSymlink)
|
||||||
} else if path == "" && filepath.IsAbs(entry.Link) && !strings.HasPrefix(entry.Link, normalizedTop) {
|
} else if path == "" && filepath.IsAbs(entry.Link) && !strings.HasPrefix(entry.Link, normalizedTop) {
|
||||||
stat, err := os.Stat(filepath.Join(top, entry.Path))
|
stat, err := os.Stat(filepath.Join(top, entry.Path))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err )
|
LOG_WARN("LIST_LINK", "Failed to read the symlink: %v", err)
|
||||||
skippedFiles = append(skippedFiles, entry.Path)
|
skippedFiles = append(skippedFiles, entry.Path)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
newEntry := CreateEntryFromFileInfo(stat, "")
|
newEntry := CreateEntryFromFileInfo(stat, "")
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
// On Windows, stat.Name() is the last component of the target, so we need to construct the correct
|
// On Windows, stat.Name() is the last component of the target, so we need to construct the correct
|
||||||
// path from f.Name(); note that a "/" is append assuming a symbolic link is always a directory
|
// path from f.Name(); note that a "/" is append assuming a symbolic link is always a directory
|
||||||
newEntry.Path = filepath.Join(normalizedPath, f.Name()) + "/"
|
newEntry.Path = filepath.Join(normalizedPath, f.Name()) + "/"
|
||||||
}
|
}
|
||||||
entry = newEntry
|
entry = newEntry
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !discardAttributes {
|
if !discardAttributes {
|
||||||
entry.ReadAttributes(top)
|
entry.ReadAttributes(top)
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.Mode() & (os.ModeNamedPipe | os.ModeSocket | os.ModeDevice) != 0 {
|
if f.Mode()&(os.ModeNamedPipe|os.ModeSocket|os.ModeDevice) != 0 {
|
||||||
LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
|
LOG_WARN("LIST_SKIP", "Skipped non-regular file %s", entry.Path)
|
||||||
skippedFiles = append(skippedFiles, entry.Path)
|
skippedFiles = append(skippedFiles, entry.Path)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
entries = append(entries, entry)
|
entries = append(entries, entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// For top level directory we need to sort again because symlinks may have been changed
|
// For top level directory we need to sort again because symlinks may have been changed
|
||||||
if path == "" {
|
if path == "" {
|
||||||
sort.Sort(ByName(entries))
|
sort.Sort(ByName(entries))
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.IsDir() {
|
if entry.IsDir() {
|
||||||
directoryList = append(directoryList, entry)
|
directoryList = append(directoryList, entry)
|
||||||
} else {
|
} else {
|
||||||
*fileList = append(*fileList, entry)
|
*fileList = append(*fileList, entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, j := 0, len(directoryList) - 1; i < j; i, j = i + 1, j - 1 {
|
for i, j := 0, len(directoryList)-1; i < j; i, j = i+1, j-1 {
|
||||||
directoryList[i], directoryList[j] = directoryList[j], directoryList[i]
|
directoryList[i], directoryList[j] = directoryList[j], directoryList[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
return directoryList, skippedFiles, nil
|
return directoryList, skippedFiles, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Diff returns how many bytes remain unmodifiled between two files.
|
// Diff returns how many bytes remain unmodifiled between two files.
|
||||||
func (entry *Entry) Diff(chunkHashes[]string, chunkLengths[]int,
|
func (entry *Entry) Diff(chunkHashes []string, chunkLengths []int,
|
||||||
otherHashes[]string, otherLengths [] int) (modifiedLength int64) {
|
otherHashes []string, otherLengths []int) (modifiedLength int64) {
|
||||||
|
|
||||||
var offset1, offset2 int64
|
var offset1, offset2 int64
|
||||||
i1 := entry.StartChunk
|
i1 := entry.StartChunk
|
||||||
i2 := 0
|
i2 := 0
|
||||||
for i1 <= entry.EndChunk && i2 < len(otherHashes) {
|
for i1 <= entry.EndChunk && i2 < len(otherHashes) {
|
||||||
|
|
||||||
start := 0
|
start := 0
|
||||||
if i1 == entry.StartChunk {
|
if i1 == entry.StartChunk {
|
||||||
start = entry.StartOffset
|
start = entry.StartOffset
|
||||||
}
|
}
|
||||||
end := chunkLengths[i1]
|
end := chunkLengths[i1]
|
||||||
if i1 == entry.EndChunk {
|
if i1 == entry.EndChunk {
|
||||||
end = entry.EndOffset
|
end = entry.EndOffset
|
||||||
}
|
}
|
||||||
|
|
||||||
if offset1 < offset2 {
|
if offset1 < offset2 {
|
||||||
modifiedLength += int64(end - start)
|
modifiedLength += int64(end - start)
|
||||||
offset1 += int64(end - start)
|
offset1 += int64(end - start)
|
||||||
i1++
|
i1++
|
||||||
} else if offset1 > offset2 {
|
} else if offset1 > offset2 {
|
||||||
offset2 += int64(otherLengths[i2])
|
offset2 += int64(otherLengths[i2])
|
||||||
i2++
|
i2++
|
||||||
} else {
|
} else {
|
||||||
if chunkHashes[i1] == otherHashes[i2] && end - start == otherLengths[i2] {
|
if chunkHashes[i1] == otherHashes[i2] && end-start == otherLengths[i2] {
|
||||||
} else {
|
} else {
|
||||||
modifiedLength += int64(chunkLengths[i1])
|
modifiedLength += int64(chunkLengths[i1])
|
||||||
}
|
}
|
||||||
offset1 += int64(end - start)
|
offset1 += int64(end - start)
|
||||||
offset2 += int64(otherLengths[i2])
|
offset2 += int64(otherLengths[i2])
|
||||||
i1++
|
i1++
|
||||||
i2++
|
i2++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return modifiedLength
|
return modifiedLength
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,216 +5,214 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"io/ioutil"
|
||||||
"io/ioutil"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"math/rand"
|
"sort"
|
||||||
"sort"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestEntrySort(t *testing.T) {
|
func TestEntrySort(t *testing.T) {
|
||||||
|
|
||||||
DATA := [...]string {
|
DATA := [...]string{
|
||||||
"ab",
|
"ab",
|
||||||
"ab-",
|
"ab-",
|
||||||
"ab0",
|
"ab0",
|
||||||
"ab1",
|
"ab1",
|
||||||
"\xBB\xDDfile",
|
"\xBB\xDDfile",
|
||||||
"\xFF\xDDfile",
|
"\xFF\xDDfile",
|
||||||
"ab/",
|
"ab/",
|
||||||
"ab/c",
|
"ab/c",
|
||||||
"ab+/c-",
|
"ab+/c-",
|
||||||
"ab+/c0",
|
"ab+/c0",
|
||||||
"ab+/c/",
|
"ab+/c/",
|
||||||
"ab+/c/d",
|
"ab+/c/d",
|
||||||
"ab+/c+/",
|
"ab+/c+/",
|
||||||
"ab+/c+/d",
|
"ab+/c+/d",
|
||||||
"ab+/c0/",
|
"ab+/c0/",
|
||||||
"ab+/c0/d",
|
"ab+/c0/d",
|
||||||
"ab-/",
|
"ab-/",
|
||||||
"ab-/c",
|
"ab-/c",
|
||||||
"ab0/",
|
"ab0/",
|
||||||
"ab1/",
|
"ab1/",
|
||||||
"ab1/c",
|
"ab1/c",
|
||||||
"ab1/\xBB\xDDfile",
|
"ab1/\xBB\xDDfile",
|
||||||
"ab1/\xFF\xDDfile",
|
"ab1/\xFF\xDDfile",
|
||||||
}
|
}
|
||||||
|
|
||||||
var entry1, entry2 *Entry
|
var entry1, entry2 *Entry
|
||||||
|
|
||||||
for i, p1 := range DATA {
|
for i, p1 := range DATA {
|
||||||
if p1[len(p1) - 1] == '/' {
|
if p1[len(p1)-1] == '/' {
|
||||||
entry1 = CreateEntry(p1, 0, 0, 0700 | uint32(os.ModeDir))
|
entry1 = CreateEntry(p1, 0, 0, 0700|uint32(os.ModeDir))
|
||||||
} else {
|
} else {
|
||||||
entry1 = CreateEntry(p1, 0, 0, 0700)
|
entry1 = CreateEntry(p1, 0, 0, 0700)
|
||||||
}
|
}
|
||||||
for j, p2 := range DATA {
|
for j, p2 := range DATA {
|
||||||
|
|
||||||
if p2[len(p2) - 1] == '/' {
|
if p2[len(p2)-1] == '/' {
|
||||||
entry2 = CreateEntry(p2, 0, 0, 0700 | uint32(os.ModeDir))
|
entry2 = CreateEntry(p2, 0, 0, 0700|uint32(os.ModeDir))
|
||||||
} else {
|
} else {
|
||||||
entry2 = CreateEntry(p2, 0, 0, 0700)
|
entry2 = CreateEntry(p2, 0, 0, 0700)
|
||||||
}
|
}
|
||||||
|
|
||||||
compared := entry1.Compare(entry2)
|
compared := entry1.Compare(entry2)
|
||||||
|
|
||||||
if compared < 0 {
|
if compared < 0 {
|
||||||
compared = -1
|
compared = -1
|
||||||
} else if compared > 0 {
|
} else if compared > 0 {
|
||||||
compared = 1
|
compared = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
var expected int
|
var expected int
|
||||||
if i < j {
|
if i < j {
|
||||||
expected = -1
|
expected = -1
|
||||||
} else if i > j {
|
} else if i > j {
|
||||||
expected = 1
|
expected = 1
|
||||||
} else {
|
} else {
|
||||||
expected = 0
|
expected = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
if compared != expected {
|
if compared != expected {
|
||||||
t.Errorf("%s vs %s: %d, expected: %d", p1, p2, compared, expected)
|
t.Errorf("%s vs %s: %d, expected: %d", p1, p2, compared, expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestEntryList(t *testing.T) {
|
func TestEntryList(t *testing.T) {
|
||||||
|
|
||||||
testDir := filepath.Join(os.TempDir(), "duplicacy_test")
|
testDir := filepath.Join(os.TempDir(), "duplicacy_test")
|
||||||
os.RemoveAll(testDir)
|
os.RemoveAll(testDir)
|
||||||
os.MkdirAll(testDir, 0700)
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
DATA := [...]string {
|
DATA := [...]string{
|
||||||
"ab",
|
"ab",
|
||||||
"ab-",
|
"ab-",
|
||||||
"ab0",
|
"ab0",
|
||||||
"ab1",
|
"ab1",
|
||||||
"ab+/",
|
"ab+/",
|
||||||
"ab+/c",
|
"ab+/c",
|
||||||
"ab+/c+",
|
"ab+/c+",
|
||||||
"ab+/c1",
|
"ab+/c1",
|
||||||
"ab+/c-/",
|
"ab+/c-/",
|
||||||
"ab+/c-/d",
|
"ab+/c-/d",
|
||||||
"ab+/c0/",
|
"ab+/c0/",
|
||||||
"ab+/c0/d",
|
"ab+/c0/d",
|
||||||
"ab2/",
|
"ab2/",
|
||||||
"ab2/c",
|
"ab2/c",
|
||||||
"ab3/",
|
"ab3/",
|
||||||
"ab3/c",
|
"ab3/c",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var entry1, entry2 *Entry
|
||||||
|
|
||||||
var entry1, entry2 *Entry
|
for i, p1 := range DATA {
|
||||||
|
if p1[len(p1)-1] == '/' {
|
||||||
|
entry1 = CreateEntry(p1, 0, 0, 0700|uint32(os.ModeDir))
|
||||||
|
} else {
|
||||||
|
entry1 = CreateEntry(p1, 0, 0, 0700)
|
||||||
|
}
|
||||||
|
for j, p2 := range DATA {
|
||||||
|
|
||||||
for i, p1 := range DATA {
|
if p2[len(p2)-1] == '/' {
|
||||||
if p1[len(p1) - 1] == '/' {
|
entry2 = CreateEntry(p2, 0, 0, 0700|uint32(os.ModeDir))
|
||||||
entry1 = CreateEntry(p1, 0, 0, 0700 | uint32(os.ModeDir))
|
} else {
|
||||||
} else {
|
entry2 = CreateEntry(p2, 0, 0, 0700)
|
||||||
entry1 = CreateEntry(p1, 0, 0, 0700)
|
}
|
||||||
}
|
|
||||||
for j, p2 := range DATA {
|
|
||||||
|
|
||||||
if p2[len(p2) - 1] == '/' {
|
compared := entry1.Compare(entry2)
|
||||||
entry2 = CreateEntry(p2, 0, 0, 0700 | uint32(os.ModeDir))
|
|
||||||
} else {
|
|
||||||
entry2 = CreateEntry(p2, 0, 0, 0700)
|
|
||||||
}
|
|
||||||
|
|
||||||
compared := entry1.Compare(entry2)
|
if compared < 0 {
|
||||||
|
compared = -1
|
||||||
|
} else if compared > 0 {
|
||||||
|
compared = 1
|
||||||
|
}
|
||||||
|
|
||||||
if compared < 0 {
|
var expected int
|
||||||
compared = -1
|
if i < j {
|
||||||
} else if compared > 0 {
|
expected = -1
|
||||||
compared = 1
|
} else if i > j {
|
||||||
}
|
expected = 1
|
||||||
|
} else {
|
||||||
|
expected = 0
|
||||||
|
}
|
||||||
|
|
||||||
var expected int
|
if compared != expected {
|
||||||
if i < j {
|
t.Errorf("%s vs %s: %d, expected: %d", p1, p2, compared, expected)
|
||||||
expected = -1
|
}
|
||||||
} else if i > j {
|
|
||||||
expected = 1
|
|
||||||
} else {
|
|
||||||
expected = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if compared != expected {
|
}
|
||||||
t.Errorf("%s vs %s: %d, expected: %d", p1, p2, compared, expected)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
}
|
for _, file := range DATA {
|
||||||
}
|
|
||||||
|
|
||||||
for _, file := range DATA {
|
fullPath := filepath.Join(testDir, file)
|
||||||
|
if file[len(file)-1] == '/' {
|
||||||
|
err := os.Mkdir(fullPath, 0700)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Mkdir(%s) returned an error: %s", fullPath, err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
fullPath := filepath.Join(testDir, file)
|
err := ioutil.WriteFile(fullPath, []byte(file), 0700)
|
||||||
if file[len(file) - 1] == '/' {
|
if err != nil {
|
||||||
err := os.Mkdir(fullPath, 0700)
|
t.Errorf("WriteFile(%s) returned an error: %s", fullPath, err)
|
||||||
if err != nil {
|
}
|
||||||
t.Errorf("Mkdir(%s) returned an error: %s", fullPath, err)
|
}
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
err := ioutil.WriteFile(fullPath, []byte(file), 0700)
|
directories := make([]*Entry, 0, 4)
|
||||||
if err != nil {
|
directories = append(directories, CreateEntry("", 0, 0, 0))
|
||||||
t.Errorf("WriteFile(%s) returned an error: %s", fullPath, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
directories := make([]*Entry, 0, 4)
|
entries := make([]*Entry, 0, 4)
|
||||||
directories = append(directories, CreateEntry("", 0, 0, 0))
|
|
||||||
|
|
||||||
entries := make([]*Entry, 0, 4)
|
for len(directories) > 0 {
|
||||||
|
directory := directories[len(directories)-1]
|
||||||
|
directories = directories[:len(directories)-1]
|
||||||
|
entries = append(entries, directory)
|
||||||
|
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
|
||||||
|
}
|
||||||
|
directories = append(directories, subdirectories...)
|
||||||
|
}
|
||||||
|
|
||||||
for len(directories) > 0 {
|
entries = entries[1:]
|
||||||
directory := directories[len(directories) - 1]
|
|
||||||
directories = directories[:len(directories) - 1]
|
|
||||||
entries = append(entries, directory)
|
|
||||||
subdirectories, _, err := ListEntries(testDir, directory.Path, &entries, nil, false)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("ListEntries(%s, %s) returned an error: %s", testDir, directory.Path, err)
|
|
||||||
}
|
|
||||||
directories = append(directories, subdirectories...)
|
|
||||||
}
|
|
||||||
|
|
||||||
entries = entries[1:]
|
for _, entry := range entries {
|
||||||
|
t.Logf("entry: %s", entry.Path)
|
||||||
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
if len(entries) != len(DATA) {
|
||||||
t.Logf("entry: %s", entry.Path)
|
t.Errorf("Got %d entries instead of %d", len(entries), len(DATA))
|
||||||
}
|
return
|
||||||
|
}
|
||||||
|
|
||||||
if len(entries) != len(DATA) {
|
for i := 0; i < len(entries); i++ {
|
||||||
t.Errorf("Got %d entries instead of %d", len(entries), len(DATA))
|
if entries[i].Path != DATA[i] {
|
||||||
return
|
t.Errorf("entry: %s, expected: %s", entries[i].Path, DATA[i])
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < len(entries); i++ {
|
t.Logf("shuffling %d entries", len(entries))
|
||||||
if entries[i].Path != DATA[i] {
|
for i := range entries {
|
||||||
t.Errorf("entry: %s, expected: %s", entries[i].Path, DATA[i])
|
j := rand.Intn(i + 1)
|
||||||
}
|
entries[i], entries[j] = entries[j], entries[i]
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Logf("shuffling %d entries", len(entries))
|
sort.Sort(ByName(entries))
|
||||||
for i := range entries {
|
|
||||||
j := rand.Intn(i + 1)
|
|
||||||
entries[i], entries[j] = entries[j], entries[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(ByName(entries))
|
for i := 0; i < len(entries); i++ {
|
||||||
|
if entries[i].Path != DATA[i] {
|
||||||
|
t.Errorf("entry: %s, expected: %s", entries[i].Path, DATA[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < len(entries); i++ {
|
if !t.Failed() {
|
||||||
if entries[i].Path != DATA[i] {
|
os.RemoveAll(testDir)
|
||||||
t.Errorf("entry: %s, expected: %s", entries[i].Path, DATA[i])
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !t.Failed() {
|
|
||||||
os.RemoveAll(testDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,70 +5,66 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FileReader wraps a number of files and turns them into a series of readers.
|
// FileReader wraps a number of files and turns them into a series of readers.
|
||||||
type FileReader struct {
|
type FileReader struct {
|
||||||
top string
|
top string
|
||||||
files [] *Entry
|
files []*Entry
|
||||||
|
|
||||||
CurrentFile *os.File
|
CurrentFile *os.File
|
||||||
CurrentIndex int
|
CurrentIndex int
|
||||||
CurrentEntry *Entry
|
CurrentEntry *Entry
|
||||||
|
|
||||||
SkippedFiles [] string
|
SkippedFiles []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateFileReader creates a file reader.
|
// CreateFileReader creates a file reader.
|
||||||
func CreateFileReader(top string, files[] *Entry) (*FileReader) {
|
func CreateFileReader(top string, files []*Entry) *FileReader {
|
||||||
|
|
||||||
reader := &FileReader {
|
reader := &FileReader{
|
||||||
top: top,
|
top: top,
|
||||||
files: files,
|
files: files,
|
||||||
CurrentIndex: -1,
|
CurrentIndex: -1,
|
||||||
}
|
}
|
||||||
|
|
||||||
reader.NextFile()
|
reader.NextFile()
|
||||||
|
|
||||||
return reader
|
return reader
|
||||||
}
|
}
|
||||||
|
|
||||||
// NextFile switchs to the next file in the file reader.
|
// NextFile switchs to the next file in the file reader.
|
||||||
func (reader *FileReader) NextFile() bool{
|
func (reader *FileReader) NextFile() bool {
|
||||||
|
|
||||||
if reader.CurrentFile != nil {
|
if reader.CurrentFile != nil {
|
||||||
reader.CurrentFile.Close()
|
reader.CurrentFile.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
reader.CurrentIndex++
|
reader.CurrentIndex++
|
||||||
for reader.CurrentIndex < len(reader.files) {
|
for reader.CurrentIndex < len(reader.files) {
|
||||||
|
|
||||||
reader.CurrentEntry = reader.files[reader.CurrentIndex]
|
reader.CurrentEntry = reader.files[reader.CurrentIndex]
|
||||||
if !reader.CurrentEntry.IsFile() || reader.CurrentEntry.Size == 0 {
|
if !reader.CurrentEntry.IsFile() || reader.CurrentEntry.Size == 0 {
|
||||||
reader.CurrentIndex++
|
reader.CurrentIndex++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
fullPath := joinPath(reader.top, reader.CurrentEntry.Path)
|
fullPath := joinPath(reader.top, reader.CurrentEntry.Path)
|
||||||
reader.CurrentFile, err = os.OpenFile(fullPath, os.O_RDONLY, 0)
|
reader.CurrentFile, err = os.OpenFile(fullPath, os.O_RDONLY, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("OPEN_FAILURE", "Failed to open file for reading: %v", err)
|
LOG_WARN("OPEN_FAILURE", "Failed to open file for reading: %v", err)
|
||||||
reader.CurrentEntry.Size = 0
|
reader.CurrentEntry.Size = 0
|
||||||
reader.SkippedFiles = append(reader.SkippedFiles, reader.CurrentEntry.Path)
|
reader.SkippedFiles = append(reader.SkippedFiles, reader.CurrentEntry.Path)
|
||||||
reader.CurrentIndex++
|
reader.CurrentIndex++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
reader.CurrentFile = nil
|
reader.CurrentFile = nil
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,259 +5,259 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"fmt"
|
||||||
"fmt"
|
"io"
|
||||||
"path"
|
"io/ioutil"
|
||||||
"io"
|
"math/rand"
|
||||||
"io/ioutil"
|
"os"
|
||||||
"time"
|
"path"
|
||||||
"math/rand"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FileStorage is a local on-disk file storage implementing the Storage interface.
|
// FileStorage is a local on-disk file storage implementing the Storage interface.
|
||||||
type FileStorage struct {
|
type FileStorage struct {
|
||||||
RateLimitedStorage
|
RateLimitedStorage
|
||||||
|
|
||||||
minimumLevel int // The minimum level of directories to dive into before searching for the chunk file.
|
minimumLevel int // The minimum level of directories to dive into before searching for the chunk file.
|
||||||
isCacheNeeded bool // Network storages require caching
|
isCacheNeeded bool // Network storages require caching
|
||||||
storageDir string
|
storageDir string
|
||||||
numberOfThreads int
|
numberOfThreads int
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateFileStorage creates a file storage.
|
// CreateFileStorage creates a file storage.
|
||||||
func CreateFileStorage(storageDir string, minimumLevel int, isCacheNeeded bool, threads int) (storage *FileStorage, err error) {
|
func CreateFileStorage(storageDir string, minimumLevel int, isCacheNeeded bool, threads int) (storage *FileStorage, err error) {
|
||||||
|
|
||||||
var stat os.FileInfo
|
var stat os.FileInfo
|
||||||
|
|
||||||
stat, err = os.Stat(storageDir)
|
stat, err = os.Stat(storageDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
err = os.MkdirAll(storageDir, 0744)
|
err = os.MkdirAll(storageDir, 0744)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !stat.IsDir() {
|
if !stat.IsDir() {
|
||||||
return nil, fmt.Errorf("The storage path %s is a file", storageDir)
|
return nil, fmt.Errorf("The storage path %s is a file", storageDir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for storageDir[len(storageDir) - 1] == '/' {
|
for storageDir[len(storageDir)-1] == '/' {
|
||||||
storageDir = storageDir[:len(storageDir) - 1]
|
storageDir = storageDir[:len(storageDir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &FileStorage {
|
storage = &FileStorage{
|
||||||
storageDir : storageDir,
|
storageDir: storageDir,
|
||||||
minimumLevel: minimumLevel,
|
minimumLevel: minimumLevel,
|
||||||
isCacheNeeded: isCacheNeeded,
|
isCacheNeeded: isCacheNeeded,
|
||||||
numberOfThreads: threads,
|
numberOfThreads: threads,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Random number fo generating the temporary chunk file suffix.
|
// Random number fo generating the temporary chunk file suffix.
|
||||||
rand.Seed(time.Now().UnixNano())
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
|
||||||
return storage, nil
|
return storage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively).
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively).
|
||||||
func (storage *FileStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
func (storage *FileStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
|
|
||||||
fullPath := path.Join(storage.storageDir, dir)
|
fullPath := path.Join(storage.storageDir, dir)
|
||||||
|
|
||||||
list, err := ioutil.ReadDir(fullPath)
|
list, err := ioutil.ReadDir(fullPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return nil, nil, nil
|
return nil, nil, nil
|
||||||
}
|
}
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, f := range list {
|
for _, f := range list {
|
||||||
name := f.Name()
|
name := f.Name()
|
||||||
if f.IsDir() && name[len(name) - 1] != '/' {
|
if f.IsDir() && name[len(name)-1] != '/' {
|
||||||
name += "/"
|
name += "/"
|
||||||
}
|
}
|
||||||
files = append(files, name)
|
files = append(files, name)
|
||||||
sizes = append(sizes, f.Size())
|
sizes = append(sizes, f.Size())
|
||||||
}
|
}
|
||||||
|
|
||||||
return files, sizes, nil
|
return files, sizes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *FileStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *FileStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
err = os.Remove(path.Join(storage.storageDir, filePath))
|
err = os.Remove(path.Join(storage.storageDir, filePath))
|
||||||
if err == nil || os.IsNotExist(err) {
|
if err == nil || os.IsNotExist(err) {
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *FileStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *FileStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
return os.Rename(path.Join(storage.storageDir, from), path.Join(storage.storageDir, to))
|
return os.Rename(path.Join(storage.storageDir, from), path.Join(storage.storageDir, to))
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *FileStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
func (storage *FileStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
err = os.Mkdir(path.Join(storage.storageDir, dir), 0744)
|
err = os.Mkdir(path.Join(storage.storageDir, dir), 0744)
|
||||||
if err != nil && os.IsExist(err) {
|
if err != nil && os.IsExist(err) {
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *FileStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *FileStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
stat, err := os.Stat(path.Join(storage.storageDir, filePath))
|
stat, err := os.Stat(path.Join(storage.storageDir, filePath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
} else {
|
} else {
|
||||||
return false, false, 0, err
|
return false, false, 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, stat.IsDir(), stat.Size(), nil
|
return true, stat.IsDir(), stat.Size(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with the
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with the
|
||||||
// suffix '.fsl'.
|
// suffix '.fsl'.
|
||||||
func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
func (storage *FileStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||||
dir := path.Join(storage.storageDir, "chunks")
|
dir := path.Join(storage.storageDir, "chunks")
|
||||||
|
|
||||||
suffix := ""
|
suffix := ""
|
||||||
if isFossil {
|
if isFossil {
|
||||||
suffix = ".fsl"
|
suffix = ".fsl"
|
||||||
}
|
}
|
||||||
|
|
||||||
for level := 0; level * 2 < len(chunkID); level ++ {
|
for level := 0; level*2 < len(chunkID); level++ {
|
||||||
if level >= storage.minimumLevel {
|
if level >= storage.minimumLevel {
|
||||||
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
|
filePath = path.Join(dir, chunkID[2*level:]) + suffix
|
||||||
// Use Lstat() instead of Stat() since 1) Stat() doesn't work for deduplicated disks on Windows and 2) there isn't
|
// Use Lstat() instead of Stat() since 1) Stat() doesn't work for deduplicated disks on Windows and 2) there isn't
|
||||||
// really a need to follow the link if filePath is a link.
|
// really a need to follow the link if filePath is a link.
|
||||||
stat, err := os.Lstat(filePath)
|
stat, err := os.Lstat(filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("FS_FIND", "File %s can't be found: %v", filePath, err)
|
LOG_DEBUG("FS_FIND", "File %s can't be found: %v", filePath, err)
|
||||||
} else if stat.IsDir() {
|
} else if stat.IsDir() {
|
||||||
return filePath[len(storage.storageDir) + 1:], false, 0, fmt.Errorf("The path %s is a directory", filePath)
|
return filePath[len(storage.storageDir)+1:], false, 0, fmt.Errorf("The path %s is a directory", filePath)
|
||||||
} else {
|
} else {
|
||||||
return filePath[len(storage.storageDir) + 1:], true, stat.Size(), nil
|
return filePath[len(storage.storageDir)+1:], true, stat.Size(), nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find the subdirectory the chunk file may reside.
|
// Find the subdirectory the chunk file may reside.
|
||||||
subDir := path.Join(dir, chunkID[2 * level: 2 * level + 2])
|
subDir := path.Join(dir, chunkID[2*level:2*level+2])
|
||||||
stat, err := os.Stat(subDir)
|
stat, err := os.Stat(subDir)
|
||||||
if err == nil && stat.IsDir() {
|
if err == nil && stat.IsDir() {
|
||||||
dir = subDir
|
dir = subDir
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if level < storage.minimumLevel {
|
if level < storage.minimumLevel {
|
||||||
// Create the subdirectory if it doesn't exist.
|
// Create the subdirectory if it doesn't exist.
|
||||||
|
|
||||||
if err == nil && !stat.IsDir() {
|
if err == nil && !stat.IsDir() {
|
||||||
return "", false, 0, fmt.Errorf("The path %s is not a directory", subDir)
|
return "", false, 0, fmt.Errorf("The path %s is not a directory", subDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = os.Mkdir(subDir, 0744)
|
err = os.Mkdir(subDir, 0744)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// The directory may have been created by other threads so check it again.
|
// The directory may have been created by other threads so check it again.
|
||||||
stat, _ := os.Stat(subDir)
|
stat, _ := os.Stat(subDir)
|
||||||
if stat == nil || !stat.IsDir() {
|
if stat == nil || !stat.IsDir() {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dir = subDir
|
dir = subDir
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// The chunk must be under this subdirectory but it doesn't exist.
|
// The chunk must be under this subdirectory but it doesn't exist.
|
||||||
return path.Join(dir, chunkID[2 * level:])[len(storage.storageDir) + 1:] + suffix, false, 0, nil
|
return path.Join(dir, chunkID[2*level:])[len(storage.storageDir)+1:] + suffix, false, 0, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return "", false, 0, fmt.Errorf("The maximum level of directories searched")
|
return "", false, 0, fmt.Errorf("The maximum level of directories searched")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *FileStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *FileStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
file, err := os.Open(path.Join(storage.storageDir, filePath))
|
file, err := os.Open(path.Join(storage.storageDir, filePath))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit / storage.numberOfThreads); err != nil {
|
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'
|
// UploadFile writes 'content' to the file at 'filePath'
|
||||||
func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *FileStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
fullPath := path.Join(storage.storageDir, filePath)
|
fullPath := path.Join(storage.storageDir, filePath)
|
||||||
|
|
||||||
letters := "abcdefghijklmnopqrstuvwxyz"
|
letters := "abcdefghijklmnopqrstuvwxyz"
|
||||||
suffix := make([]byte, 8)
|
suffix := make([]byte, 8)
|
||||||
for i := range suffix {
|
for i := range suffix {
|
||||||
suffix[i] = letters[rand.Intn(len(letters))]
|
suffix[i] = letters[rand.Intn(len(letters))]
|
||||||
}
|
}
|
||||||
|
|
||||||
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
|
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
|
||||||
|
|
||||||
file, err := os.OpenFile(temporaryFile, os.O_WRONLY | os.O_CREATE | os.O_TRUNC, 0644)
|
file, err := os.OpenFile(temporaryFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / storage.numberOfThreads)
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
_, err = io.Copy(file, reader)
|
_, err = io.Copy(file, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
file.Close()
|
file.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
file.Close()
|
file.Close()
|
||||||
|
|
||||||
err = os.Rename(temporaryFile, fullPath)
|
err = os.Rename(temporaryFile, fullPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
||||||
if _, e := os.Stat(fullPath); e == nil {
|
if _, e := os.Stat(fullPath); e == nil {
|
||||||
os.Remove(temporaryFile)
|
os.Remove(temporaryFile)
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *FileStorage) IsCacheNeeded () (bool) { return storage.isCacheNeeded }
|
func (storage *FileStorage) IsCacheNeeded() bool { return storage.isCacheNeeded }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *FileStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *FileStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *FileStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *FileStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *FileStorage) IsFastListing() (bool) { return false }
|
func (storage *FileStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *FileStorage) EnableTestMode() {}
|
func (storage *FileStorage) EnableTestMode() {}
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -5,65 +5,64 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
"net/url"
|
|
||||||
"math/rand"
|
|
||||||
"io/ioutil"
|
|
||||||
"encoding/json"
|
|
||||||
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"golang.org/x/oauth2"
|
|
||||||
"golang.org/x/oauth2/google"
|
|
||||||
gcs "cloud.google.com/go/storage"
|
gcs "cloud.google.com/go/storage"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"golang.org/x/oauth2"
|
||||||
|
"golang.org/x/oauth2/google"
|
||||||
|
"google.golang.org/api/googleapi"
|
||||||
"google.golang.org/api/iterator"
|
"google.golang.org/api/iterator"
|
||||||
"google.golang.org/api/option"
|
"google.golang.org/api/option"
|
||||||
"google.golang.org/api/googleapi"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type GCSStorage struct {
|
type GCSStorage struct {
|
||||||
RateLimitedStorage
|
RateLimitedStorage
|
||||||
|
|
||||||
bucket *gcs.BucketHandle
|
bucket *gcs.BucketHandle
|
||||||
storageDir string
|
storageDir string
|
||||||
|
|
||||||
numberOfThreads int
|
numberOfThreads int
|
||||||
TestMode bool
|
TestMode bool
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type GCSConfig struct {
|
type GCSConfig struct {
|
||||||
ClientID string `json:"client_id"`
|
ClientID string `json:"client_id"`
|
||||||
ClientSecret string `json:"client_secret"`
|
ClientSecret string `json:"client_secret"`
|
||||||
Endpoint oauth2.Endpoint `json:"end_point"`
|
Endpoint oauth2.Endpoint `json:"end_point"`
|
||||||
Token oauth2.Token `json:"token"`
|
Token oauth2.Token `json:"token"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateGCSStorage creates a GCD storage object.
|
// CreateGCSStorage creates a GCD storage object.
|
||||||
func CreateGCSStorage(tokenFile string, bucketName string, storageDir string, threads int) (storage *GCSStorage, err error) {
|
func CreateGCSStorage(tokenFile string, bucketName string, storageDir string, threads int) (storage *GCSStorage, err error) {
|
||||||
|
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
|
|
||||||
description, err := ioutil.ReadFile(tokenFile)
|
description, err := ioutil.ReadFile(tokenFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var object map[string]interface {}
|
var object map[string]interface{}
|
||||||
|
|
||||||
err = json.Unmarshal(description, &object)
|
err = json.Unmarshal(description, &object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
isServiceAccount := false
|
isServiceAccount := false
|
||||||
if value, ok := object["type"]; ok {
|
if value, ok := object["type"]; ok {
|
||||||
if authType, ok := value.(string); ok && authType == "service_account" {
|
if authType, ok := value.(string); ok && authType == "service_account" {
|
||||||
isServiceAccount = true
|
isServiceAccount = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var tokenSource oauth2.TokenSource
|
var tokenSource oauth2.TokenSource
|
||||||
|
|
||||||
@@ -74,7 +73,7 @@ func CreateGCSStorage(tokenFile string, bucketName string, storageDir string, th
|
|||||||
}
|
}
|
||||||
tokenSource = config.TokenSource(ctx)
|
tokenSource = config.TokenSource(ctx)
|
||||||
} else {
|
} else {
|
||||||
gcsConfig := &GCSConfig {}
|
gcsConfig := &GCSConfig{}
|
||||||
if err := json.Unmarshal(description, gcsConfig); err != nil {
|
if err := json.Unmarshal(description, gcsConfig); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -87,82 +86,81 @@ func CreateGCSStorage(tokenFile string, bucketName string, storageDir string, th
|
|||||||
tokenSource = config.TokenSource(ctx, &gcsConfig.Token)
|
tokenSource = config.TokenSource(ctx, &gcsConfig.Token)
|
||||||
}
|
}
|
||||||
|
|
||||||
options := option.WithTokenSource(tokenSource)
|
options := option.WithTokenSource(tokenSource)
|
||||||
client, err := gcs.NewClient(ctx, options)
|
client, err := gcs.NewClient(ctx, options)
|
||||||
|
|
||||||
bucket := client.Bucket(bucketName)
|
bucket := client.Bucket(bucketName)
|
||||||
|
|
||||||
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
|
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||||
storageDir += "/"
|
storageDir += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &GCSStorage {
|
storage = &GCSStorage{
|
||||||
bucket: bucket,
|
bucket: bucket,
|
||||||
storageDir: storageDir,
|
storageDir: storageDir,
|
||||||
numberOfThreads: threads,
|
numberOfThreads: threads,
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage, nil
|
return storage, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *GCSStorage) shouldRetry(backoff *int, err error) (bool, error) {
|
func (storage *GCSStorage) shouldRetry(backoff *int, err error) (bool, error) {
|
||||||
|
|
||||||
retry := false
|
retry := false
|
||||||
message := ""
|
message := ""
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
} else if e, ok := err.(*googleapi.Error); ok {
|
} else if e, ok := err.(*googleapi.Error); ok {
|
||||||
if 500 <= e.Code && e.Code < 600 {
|
if 500 <= e.Code && e.Code < 600 {
|
||||||
// Retry for 5xx response codes.
|
// Retry for 5xx response codes.
|
||||||
message = fmt.Sprintf("HTTP status code %d", e.Code)
|
message = fmt.Sprintf("HTTP status code %d", e.Code)
|
||||||
retry = true
|
retry = true
|
||||||
} else if e.Code == 429 {
|
} else if e.Code == 429 {
|
||||||
// Too many requests{
|
// Too many requests{
|
||||||
message = "HTTP status code 429"
|
message = "HTTP status code 429"
|
||||||
retry = true
|
retry = true
|
||||||
} else if e.Code == 403 {
|
} else if e.Code == 403 {
|
||||||
// User Rate Limit Exceeded
|
// User Rate Limit Exceeded
|
||||||
message = "User Rate Limit Exceeded"
|
message = "User Rate Limit Exceeded"
|
||||||
retry = true
|
retry = true
|
||||||
}
|
}
|
||||||
} else if e, ok := err.(*url.Error); ok {
|
} else if e, ok := err.(*url.Error); ok {
|
||||||
message = e.Error()
|
message = e.Error()
|
||||||
retry = true
|
retry = true
|
||||||
} else if err == io.ErrUnexpectedEOF {
|
} else if err == io.ErrUnexpectedEOF {
|
||||||
// Retry on unexpected EOFs and temporary network errors.
|
// Retry on unexpected EOFs and temporary network errors.
|
||||||
message = "Unexpected EOF"
|
message = "Unexpected EOF"
|
||||||
retry = true
|
retry = true
|
||||||
} else if err, ok := err.(net.Error); ok {
|
} else if err, ok := err.(net.Error); ok {
|
||||||
message = "Temporary network error"
|
message = "Temporary network error"
|
||||||
retry = err.Temporary()
|
retry = err.Temporary()
|
||||||
}
|
}
|
||||||
|
|
||||||
if !retry || *backoff >= 256 {
|
if !retry || *backoff >= 256 {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
delay := float32(*backoff) * rand.Float32()
|
delay := float32(*backoff) * rand.Float32()
|
||||||
LOG_INFO("GCS_RETRY", "%s; retrying after %.2f seconds", message, delay)
|
LOG_INFO("GCS_RETRY", "%s; retrying after %.2f seconds", message, delay)
|
||||||
time.Sleep(time.Duration(float32(*backoff) * float32(time.Second)))
|
time.Sleep(time.Duration(float32(*backoff) * float32(time.Second)))
|
||||||
*backoff *= 2
|
*backoff *= 2
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *GCSStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
func (storage *GCSStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
query := gcs.Query {
|
query := gcs.Query{
|
||||||
Prefix: storage.storageDir + dir + "/",
|
Prefix: storage.storageDir + dir + "/",
|
||||||
}
|
}
|
||||||
dirOnly := false
|
dirOnly := false
|
||||||
prefixLength := len(query.Prefix)
|
prefixLength := len(query.Prefix)
|
||||||
|
|
||||||
if dir == "snapshots" {
|
if dir == "snapshots" {
|
||||||
query.Delimiter = "/"
|
query.Delimiter = "/"
|
||||||
dirOnly = true
|
dirOnly = true
|
||||||
}
|
}
|
||||||
@@ -174,7 +172,7 @@ func (storage *GCSStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
attributes, err := iter.Next()
|
attributes, err := iter.Next()
|
||||||
if err == iterator.Done {
|
if err == iterator.Done {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -198,7 +196,7 @@ func (storage *GCSStorage) ListFiles(threadIndex int, dir string) ([]string, []i
|
|||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *GCSStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *GCSStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
err = storage.bucket.Object(storage.storageDir + filePath).Delete(context.Background())
|
err = storage.bucket.Object(storage.storageDir + filePath).Delete(context.Background())
|
||||||
if err == gcs.ErrObjectNotExist {
|
if err == gcs.ErrObjectNotExist {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
@@ -243,10 +241,10 @@ func (storage *GCSStorage) GetFileInfo(threadIndex int, filePath string) (exist
|
|||||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||||
// the suffix '.fsl'.
|
// the suffix '.fsl'.
|
||||||
func (storage *GCSStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
func (storage *GCSStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||||
filePath = "chunks/" + chunkID
|
filePath = "chunks/" + chunkID
|
||||||
if isFossil {
|
if isFossil {
|
||||||
filePath += ".fsl"
|
filePath += ".fsl"
|
||||||
}
|
}
|
||||||
|
|
||||||
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
||||||
|
|
||||||
@@ -259,9 +257,9 @@ func (storage *GCSStorage) DownloadFile(threadIndex int, filePath string, chunk
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / storage.numberOfThreads)
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
@@ -271,7 +269,7 @@ func (storage *GCSStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
for {
|
for {
|
||||||
writeCloser := storage.bucket.Object(storage.storageDir + filePath).NewWriter(context.Background())
|
writeCloser := storage.bucket.Object(storage.storageDir + filePath).NewWriter(context.Background())
|
||||||
defer writeCloser.Close()
|
defer writeCloser.Close()
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / storage.numberOfThreads)
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
_, err = io.Copy(writeCloser, reader)
|
_, err = io.Copy(writeCloser, reader)
|
||||||
|
|
||||||
if retry, e := storage.shouldRetry(&backoff, err); e == nil && !retry {
|
if retry, e := storage.shouldRetry(&backoff, err); e == nil && !retry {
|
||||||
@@ -282,22 +280,22 @@ func (storage *GCSStorage) UploadFile(threadIndex int, filePath string, content
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *GCSStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *GCSStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *GCSStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *GCSStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *GCSStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *GCSStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *GCSStorage) IsFastListing() (bool) { return true }
|
func (storage *GCSStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *GCSStorage) EnableTestMode() { storage.TestMode = true }
|
func (storage *GCSStorage) EnableTestMode() { storage.TestMode = true }
|
||||||
|
|||||||
@@ -5,456 +5,456 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"bytes"
|
||||||
"net"
|
"encoding/json"
|
||||||
"time"
|
"fmt"
|
||||||
"sync"
|
"io"
|
||||||
"bytes"
|
"io/ioutil"
|
||||||
"strings"
|
"math/rand"
|
||||||
"io/ioutil"
|
"net"
|
||||||
"encoding/json"
|
"net/http"
|
||||||
"io"
|
net_url "net/url"
|
||||||
"net/http"
|
"strings"
|
||||||
net_url "net/url"
|
"sync"
|
||||||
"math/rand"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type HubicError struct {
|
type HubicError struct {
|
||||||
Status int
|
Status int
|
||||||
Message string
|
Message string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (err HubicError) Error() string {
|
func (err HubicError) Error() string {
|
||||||
return fmt.Sprintf("%d %s", err.Status, err.Message)
|
return fmt.Sprintf("%d %s", err.Status, err.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
var HubicRefreshTokenURL = "https://duplicacy.com/hubic_refresh"
|
var HubicRefreshTokenURL = "https://duplicacy.com/hubic_refresh"
|
||||||
var HubicCredentialURL = "https://api.hubic.com/1.0/account/credentials"
|
var HubicCredentialURL = "https://api.hubic.com/1.0/account/credentials"
|
||||||
|
|
||||||
type HubicCredential struct {
|
type HubicCredential struct {
|
||||||
Token string
|
Token string
|
||||||
Endpoint string
|
Endpoint string
|
||||||
Expires time.Time
|
Expires time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
type HubicClient struct {
|
type HubicClient struct {
|
||||||
HTTPClient *http.Client
|
HTTPClient *http.Client
|
||||||
|
|
||||||
TokenFile string
|
TokenFile string
|
||||||
Token *oauth2.Token
|
Token *oauth2.Token
|
||||||
TokenLock *sync.Mutex
|
TokenLock *sync.Mutex
|
||||||
|
|
||||||
Credential HubicCredential
|
Credential HubicCredential
|
||||||
CredentialLock *sync.Mutex
|
CredentialLock *sync.Mutex
|
||||||
|
|
||||||
TestMode bool
|
TestMode bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHubicClient(tokenFile string) (*HubicClient, error) {
|
func NewHubicClient(tokenFile string) (*HubicClient, error) {
|
||||||
|
|
||||||
description, err := ioutil.ReadFile(tokenFile)
|
description, err := ioutil.ReadFile(tokenFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
token := new(oauth2.Token)
|
token := new(oauth2.Token)
|
||||||
if err := json.Unmarshal(description, token); err != nil {
|
if err := json.Unmarshal(description, token); err != nil {
|
||||||
return nil, fmt.Errorf("%v: %s", err, description)
|
return nil, fmt.Errorf("%v: %s", err, description)
|
||||||
}
|
}
|
||||||
|
|
||||||
client := &HubicClient{
|
client := &HubicClient{
|
||||||
HTTPClient: &http.Client {
|
HTTPClient: &http.Client{
|
||||||
Transport: &http.Transport {
|
Transport: &http.Transport{
|
||||||
Dial: (&net.Dialer{
|
Dial: (&net.Dialer{
|
||||||
Timeout: 30 * time.Second,
|
Timeout: 30 * time.Second,
|
||||||
KeepAlive: 30 * time.Second,
|
KeepAlive: 30 * time.Second,
|
||||||
}).Dial,
|
}).Dial,
|
||||||
TLSHandshakeTimeout: 60 * time.Second,
|
TLSHandshakeTimeout: 60 * time.Second,
|
||||||
ResponseHeaderTimeout: 30 * time.Second,
|
ResponseHeaderTimeout: 30 * time.Second,
|
||||||
ExpectContinueTimeout: 10 * time.Second,
|
ExpectContinueTimeout: 10 * time.Second,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
TokenFile: tokenFile,
|
TokenFile: tokenFile,
|
||||||
Token: token,
|
Token: token,
|
||||||
TokenLock: &sync.Mutex{},
|
TokenLock: &sync.Mutex{},
|
||||||
CredentialLock: &sync.Mutex{},
|
CredentialLock: &sync.Mutex{},
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.RefreshToken()
|
err = client.RefreshToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.GetCredential()
|
err = client.GetCredential()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *HubicClient) call(url string, method string, input interface{}, extraHeader map[string]string) (io.ReadCloser, int64, string, error) {
|
func (client *HubicClient) call(url string, method string, input interface{}, extraHeader map[string]string) (io.ReadCloser, int64, string, error) {
|
||||||
|
|
||||||
var response *http.Response
|
var response *http.Response
|
||||||
|
|
||||||
backoff := 1
|
backoff := 1
|
||||||
for i := 0; i < 8; i++ {
|
for i := 0; i < 8; i++ {
|
||||||
|
|
||||||
LOG_DEBUG("HUBIC_CALL", "%s %s", method, url)
|
LOG_DEBUG("HUBIC_CALL", "%s %s", method, url)
|
||||||
|
|
||||||
//fmt.Printf("%s %s\n", method, url)
|
//fmt.Printf("%s %s\n", method, url)
|
||||||
|
|
||||||
var inputReader io.Reader
|
var inputReader io.Reader
|
||||||
|
|
||||||
switch input.(type) {
|
switch input.(type) {
|
||||||
default:
|
default:
|
||||||
jsonInput, err := json.Marshal(input)
|
jsonInput, err := json.Marshal(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, "", err
|
return nil, 0, "", err
|
||||||
}
|
}
|
||||||
inputReader = bytes.NewReader(jsonInput)
|
inputReader = bytes.NewReader(jsonInput)
|
||||||
case []byte:
|
case []byte:
|
||||||
inputReader = bytes.NewReader(input.([]byte))
|
inputReader = bytes.NewReader(input.([]byte))
|
||||||
case int:
|
case int:
|
||||||
inputReader = bytes.NewReader([]byte(""))
|
inputReader = bytes.NewReader([]byte(""))
|
||||||
case *bytes.Buffer:
|
case *bytes.Buffer:
|
||||||
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
|
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
|
||||||
case *RateLimitedReader:
|
case *RateLimitedReader:
|
||||||
input.(*RateLimitedReader).Reset()
|
input.(*RateLimitedReader).Reset()
|
||||||
inputReader = input.(*RateLimitedReader)
|
inputReader = input.(*RateLimitedReader)
|
||||||
}
|
}
|
||||||
|
|
||||||
request, err := http.NewRequest(method, url, inputReader)
|
request, err := http.NewRequest(method, url, inputReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, "", err
|
return nil, 0, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
if reader, ok := inputReader.(*RateLimitedReader); ok {
|
if reader, ok := inputReader.(*RateLimitedReader); ok {
|
||||||
request.ContentLength = reader.Length()
|
request.ContentLength = reader.Length()
|
||||||
}
|
}
|
||||||
|
|
||||||
if url == HubicCredentialURL {
|
if url == HubicCredentialURL {
|
||||||
client.TokenLock.Lock()
|
client.TokenLock.Lock()
|
||||||
request.Header.Set("Authorization", "Bearer " + client.Token.AccessToken)
|
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
|
||||||
client.TokenLock.Unlock()
|
client.TokenLock.Unlock()
|
||||||
} else if url != HubicRefreshTokenURL {
|
} else if url != HubicRefreshTokenURL {
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
request.Header.Set("X-Auth-Token", client.Credential.Token)
|
request.Header.Set("X-Auth-Token", client.Credential.Token)
|
||||||
client.CredentialLock.Unlock()
|
client.CredentialLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
for key, value := range extraHeader {
|
for key, value := range extraHeader {
|
||||||
request.Header.Set(key, value)
|
request.Header.Set(key, value)
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err = client.HTTPClient.Do(request)
|
response, err = client.HTTPClient.Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, "", err
|
return nil, 0, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
contentType := ""
|
contentType := ""
|
||||||
if len(response.Header["Content-Type"]) > 0 {
|
if len(response.Header["Content-Type"]) > 0 {
|
||||||
contentType = response.Header["Content-Type"][0]
|
contentType = response.Header["Content-Type"][0]
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.StatusCode < 400 {
|
if response.StatusCode < 400 {
|
||||||
return response.Body, response.ContentLength, contentType, nil
|
return response.Body, response.ContentLength, contentType, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
/*buffer := bytes.NewBufferString("")
|
/*buffer := bytes.NewBufferString("")
|
||||||
io.Copy(buffer, response.Body)
|
io.Copy(buffer, response.Body)
|
||||||
fmt.Printf("%s\n", buffer.String())*/
|
fmt.Printf("%s\n", buffer.String())*/
|
||||||
|
|
||||||
response.Body.Close()
|
response.Body.Close()
|
||||||
|
|
||||||
if response.StatusCode == 401 {
|
if response.StatusCode == 401 {
|
||||||
|
|
||||||
if url == HubicRefreshTokenURL {
|
if url == HubicRefreshTokenURL {
|
||||||
return nil, 0, "", HubicError { Status: response.StatusCode, Message: "Authorization error when refreshing token"}
|
return nil, 0, "", HubicError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
|
||||||
}
|
}
|
||||||
|
|
||||||
if url == HubicCredentialURL {
|
if url == HubicCredentialURL {
|
||||||
return nil, 0, "", HubicError { Status: response.StatusCode, Message: "Authorization error when retrieving credentials"}
|
return nil, 0, "", HubicError{Status: response.StatusCode, Message: "Authorization error when retrieving credentials"}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.RefreshToken()
|
err = client.RefreshToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, "", err
|
return nil, 0, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.GetCredential()
|
err = client.GetCredential()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, "", err
|
return nil, 0, "", err
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
} else if response.StatusCode >= 500 && response.StatusCode < 600 {
|
} else if response.StatusCode >= 500 && response.StatusCode < 600 {
|
||||||
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
||||||
LOG_INFO("HUBIC_RETRY", "Response status: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
|
LOG_INFO("HUBIC_RETRY", "Response status: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
|
||||||
time.Sleep(retryAfter * time.Millisecond)
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
backoff *= 2
|
backoff *= 2
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
return nil, 0, "", HubicError { Status: response.StatusCode, Message: "Hubic API error"}
|
return nil, 0, "", HubicError{Status: response.StatusCode, Message: "Hubic API error"}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, 0, "", fmt.Errorf("Maximum number of retries reached")
|
return nil, 0, "", fmt.Errorf("Maximum number of retries reached")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *HubicClient) RefreshToken() (err error) {
|
func (client *HubicClient) RefreshToken() (err error) {
|
||||||
client.TokenLock.Lock()
|
client.TokenLock.Lock()
|
||||||
defer client.TokenLock.Unlock()
|
defer client.TokenLock.Unlock()
|
||||||
|
|
||||||
if client.Token.Valid() {
|
if client.Token.Valid() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser, _, _, err := client.call(HubicRefreshTokenURL, "POST", client.Token, nil)
|
readCloser, _, _, err := client.call(HubicRefreshTokenURL, "POST", client.Token, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&client.Token); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&client.Token); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
description, err := json.Marshal(client.Token)
|
description, err := json.Marshal(client.Token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(client.TokenFile, description, 0644)
|
err = ioutil.WriteFile(client.TokenFile, description, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *HubicClient) GetCredential() (err error) {
|
func (client *HubicClient) GetCredential() (err error) {
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
defer client.CredentialLock.Unlock()
|
defer client.CredentialLock.Unlock()
|
||||||
|
|
||||||
readCloser, _, _, err := client.call(HubicCredentialURL, "GET", 0, nil)
|
readCloser, _, _, err := client.call(HubicCredentialURL, "GET", 0, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer := bytes.NewBufferString("")
|
buffer := bytes.NewBufferString("")
|
||||||
io.Copy(buffer, readCloser)
|
io.Copy(buffer, readCloser)
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
|
|
||||||
if err = json.NewDecoder(buffer).Decode(&client.Credential); err != nil {
|
if err = json.NewDecoder(buffer).Decode(&client.Credential); err != nil {
|
||||||
return fmt.Errorf("%v (response: %s)", err, buffer)
|
return fmt.Errorf("%v (response: %s)", err, buffer)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type HubicEntry struct {
|
type HubicEntry struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
Size int64 `json:"bytes"`
|
Size int64 `json:"bytes"`
|
||||||
Type string `json:"content_type"`
|
Type string `json:"content_type"`
|
||||||
Subdir string `json:"subdir"`
|
Subdir string `json:"subdir"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *HubicClient) ListEntries(path string) ([]HubicEntry, error) {
|
func (client *HubicClient) ListEntries(path string) ([]HubicEntry, error) {
|
||||||
|
|
||||||
if len(path) > 0 && path[len(path) - 1] != '/' {
|
if len(path) > 0 && path[len(path)-1] != '/' {
|
||||||
path += "/"
|
path += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
count := 1000
|
count := 1000
|
||||||
if client.TestMode {
|
if client.TestMode {
|
||||||
count = 8
|
count = 8
|
||||||
}
|
}
|
||||||
|
|
||||||
marker := ""
|
marker := ""
|
||||||
|
|
||||||
var entries []HubicEntry
|
var entries []HubicEntry
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
url := client.Credential.Endpoint + "/default"
|
url := client.Credential.Endpoint + "/default"
|
||||||
client.CredentialLock.Unlock()
|
client.CredentialLock.Unlock()
|
||||||
url += fmt.Sprintf("?format=json&limit=%d&delimiter=%%2f", count)
|
url += fmt.Sprintf("?format=json&limit=%d&delimiter=%%2f", count)
|
||||||
if path != "" {
|
if path != "" {
|
||||||
url += "&prefix=" + net_url.QueryEscape(path)
|
url += "&prefix=" + net_url.QueryEscape(path)
|
||||||
}
|
}
|
||||||
if marker != "" {
|
if marker != "" {
|
||||||
url += "&marker=" + net_url.QueryEscape(marker)
|
url += "&marker=" + net_url.QueryEscape(marker)
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser, _, _, err := client.call(url, "GET", 0, nil)
|
readCloser, _, _, err := client.call(url, "GET", 0, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
var output []HubicEntry
|
var output []HubicEntry
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range output {
|
for _, entry := range output {
|
||||||
if entry.Subdir == "" {
|
if entry.Subdir == "" {
|
||||||
marker = entry.Name
|
marker = entry.Name
|
||||||
} else {
|
} else {
|
||||||
marker = entry.Subdir
|
marker = entry.Subdir
|
||||||
for len(entry.Subdir) > 0 && entry.Subdir[len(entry.Subdir) - 1] == '/' {
|
for len(entry.Subdir) > 0 && entry.Subdir[len(entry.Subdir)-1] == '/' {
|
||||||
entry.Subdir = entry.Subdir[:len(entry.Subdir) - 1]
|
entry.Subdir = entry.Subdir[:len(entry.Subdir)-1]
|
||||||
}
|
}
|
||||||
entry.Name = entry.Subdir
|
entry.Name = entry.Subdir
|
||||||
entry.Type = "application/directory"
|
entry.Type = "application/directory"
|
||||||
}
|
}
|
||||||
if path != "" && strings.HasPrefix(entry.Name, path) {
|
if path != "" && strings.HasPrefix(entry.Name, path) {
|
||||||
entry.Name = entry.Name[len(path):]
|
entry.Name = entry.Name[len(path):]
|
||||||
}
|
}
|
||||||
entries = append(entries, entry)
|
entries = append(entries, entry)
|
||||||
}
|
}
|
||||||
if len(output) < count {
|
if len(output) < count {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *HubicClient) GetFileInfo(path string) (bool, bool, int64, error) {
|
func (client *HubicClient) GetFileInfo(path string) (bool, bool, int64, error) {
|
||||||
|
|
||||||
for len(path) > 0 && path[len(path) - 1] == '/' {
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
path = path[:len(path) - 1]
|
path = path[:len(path)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
url := client.Credential.Endpoint + "/default/" + path
|
url := client.Credential.Endpoint + "/default/" + path
|
||||||
client.CredentialLock.Unlock()
|
client.CredentialLock.Unlock()
|
||||||
|
|
||||||
readCloser, size, contentType, err := client.call(url, "HEAD", 0, nil)
|
readCloser, size, contentType, err := client.call(url, "HEAD", 0, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(HubicError); ok && e.Status == 404 {
|
if e, ok := err.(HubicError); ok && e.Status == 404 {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
} else {
|
} else {
|
||||||
return false, false, 0, err
|
return false, false, 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
|
|
||||||
return true, contentType == "application/directory", size, nil
|
return true, contentType == "application/directory", size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *HubicClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
|
func (client *HubicClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
for len(path) > 0 && path[len(path) - 1] == '/' {
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
path = path[:len(path) - 1]
|
path = path[:len(path)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
url := client.Credential.Endpoint + "/default/" + path
|
url := client.Credential.Endpoint + "/default/" + path
|
||||||
client.CredentialLock.Unlock()
|
client.CredentialLock.Unlock()
|
||||||
|
|
||||||
readCloser, size, _, err := client.call(url, "GET", 0, nil)
|
readCloser, size, _, err := client.call(url, "GET", 0, nil)
|
||||||
return readCloser, size, err
|
return readCloser, size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *HubicClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
|
func (client *HubicClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
|
||||||
|
|
||||||
for len(path) > 0 && path[len(path) - 1] == '/' {
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
path = path[:len(path) - 1]
|
path = path[:len(path)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
url := client.Credential.Endpoint + "/default/" + path
|
url := client.Credential.Endpoint + "/default/" + path
|
||||||
client.CredentialLock.Unlock()
|
client.CredentialLock.Unlock()
|
||||||
|
|
||||||
header := make(map[string]string)
|
header := make(map[string]string)
|
||||||
header["Content-Type"] = "application/octet-stream"
|
header["Content-Type"] = "application/octet-stream"
|
||||||
|
|
||||||
readCloser, _, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), header)
|
readCloser, _, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), header)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *HubicClient) DeleteFile(path string) error {
|
func (client *HubicClient) DeleteFile(path string) error {
|
||||||
|
|
||||||
for len(path) > 0 && path[len(path) - 1] == '/' {
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
path = path[:len(path) - 1]
|
path = path[:len(path)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
url := client.Credential.Endpoint + "/default/" + path
|
url := client.Credential.Endpoint + "/default/" + path
|
||||||
client.CredentialLock.Unlock()
|
client.CredentialLock.Unlock()
|
||||||
|
|
||||||
readCloser, _, _, err := client.call(url, "DELETE", 0, nil)
|
readCloser, _, _, err := client.call(url, "DELETE", 0, nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *HubicClient) MoveFile(from string, to string) error {
|
func (client *HubicClient) MoveFile(from string, to string) error {
|
||||||
|
|
||||||
for len(from) > 0 && from[len(from) - 1] == '/' {
|
for len(from) > 0 && from[len(from)-1] == '/' {
|
||||||
from = from[:len(from) - 1]
|
from = from[:len(from)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
for len(to) > 0 && to[len(to) - 1] == '/' {
|
for len(to) > 0 && to[len(to)-1] == '/' {
|
||||||
to = to[:len(to) - 1]
|
to = to[:len(to)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
url := client.Credential.Endpoint + "/default/" + from
|
url := client.Credential.Endpoint + "/default/" + from
|
||||||
client.CredentialLock.Unlock()
|
client.CredentialLock.Unlock()
|
||||||
|
|
||||||
header := make(map[string]string)
|
header := make(map[string]string)
|
||||||
header["Destination"] = "default/" + to
|
header["Destination"] = "default/" + to
|
||||||
|
|
||||||
readCloser, _, _, err := client.call(url, "COPY", 0, header)
|
readCloser, _, _, err := client.call(url, "COPY", 0, header)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
|
|
||||||
return client.DeleteFile(from)
|
return client.DeleteFile(from)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *HubicClient) CreateDirectory(path string) (error) {
|
func (client *HubicClient) CreateDirectory(path string) error {
|
||||||
|
|
||||||
for len(path) > 0 && path[len(path) - 1] == '/' {
|
for len(path) > 0 && path[len(path)-1] == '/' {
|
||||||
path = path[:len(path) - 1]
|
path = path[:len(path)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client.CredentialLock.Lock()
|
client.CredentialLock.Lock()
|
||||||
url := client.Credential.Endpoint + "/default/" + path
|
url := client.Credential.Endpoint + "/default/" + path
|
||||||
client.CredentialLock.Unlock()
|
client.CredentialLock.Unlock()
|
||||||
|
|
||||||
header := make(map[string]string)
|
header := make(map[string]string)
|
||||||
header["Content-Type"] = "application/directory"
|
header["Content-Type"] = "application/directory"
|
||||||
|
|
||||||
readCloser, _, _, err := client.call(url, "PUT", "", header)
|
readCloser, _, _, err := client.call(url, "PUT", "", header)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,145 +5,145 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"crypto/sha256"
|
||||||
"fmt"
|
"encoding/hex"
|
||||||
"testing"
|
"fmt"
|
||||||
"crypto/sha256"
|
"io"
|
||||||
"encoding/hex"
|
"testing"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHubicClient(t *testing.T) {
|
func TestHubicClient(t *testing.T) {
|
||||||
|
|
||||||
hubicClient, err := NewHubicClient("hubic-token.json")
|
hubicClient, err := NewHubicClient("hubic-token.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create the Hubic client: %v", err)
|
t.Errorf("Failed to create the Hubic client: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hubicClient.TestMode = true
|
hubicClient.TestMode = true
|
||||||
|
|
||||||
existingFiles, err := hubicClient.ListEntries("")
|
existingFiles, err := hubicClient.ListEntries("")
|
||||||
for _, file := range existingFiles {
|
for _, file := range existingFiles {
|
||||||
fmt.Printf("name: %s, isDir: %t\n", file.Name, file.Type == "application/directory")
|
fmt.Printf("name: %s, isDir: %t\n", file.Name, file.Type == "application/directory")
|
||||||
}
|
}
|
||||||
|
|
||||||
testExists, _, _, err := hubicClient.GetFileInfo("test")
|
testExists, _, _, err := hubicClient.GetFileInfo("test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list the test directory: %v", err)
|
t.Errorf("Failed to list the test directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !testExists {
|
if !testExists {
|
||||||
err = hubicClient.CreateDirectory("test")
|
err = hubicClient.CreateDirectory("test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create the test directory: %v", err)
|
t.Errorf("Failed to create the test directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
test1Exists, _, _, err := hubicClient.GetFileInfo("test/test1")
|
test1Exists, _, _, err := hubicClient.GetFileInfo("test/test1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list the test1 directory: %v", err)
|
t.Errorf("Failed to list the test1 directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !test1Exists {
|
if !test1Exists {
|
||||||
err = hubicClient.CreateDirectory("test/test1")
|
err = hubicClient.CreateDirectory("test/test1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create the test1 directory: %v", err)
|
t.Errorf("Failed to create the test1 directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
test2Exists, _, _, err := hubicClient.GetFileInfo("test/test2")
|
test2Exists, _, _, err := hubicClient.GetFileInfo("test/test2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list the test2 directory: %v", err)
|
t.Errorf("Failed to list the test2 directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !test2Exists {
|
if !test2Exists {
|
||||||
err = hubicClient.CreateDirectory("test/test2")
|
err = hubicClient.CreateDirectory("test/test2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create the test2 directory: %v", err)
|
t.Errorf("Failed to create the test2 directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
numberOfFiles := 20
|
numberOfFiles := 20
|
||||||
maxFileSize := 64 * 1024
|
maxFileSize := 64 * 1024
|
||||||
|
|
||||||
for i := 0; i < numberOfFiles; i++ {
|
for i := 0; i < numberOfFiles; i++ {
|
||||||
content := make([]byte, rand.Int() % maxFileSize + 1)
|
content := make([]byte, rand.Int()%maxFileSize+1)
|
||||||
_, err = crypto_rand.Read(content)
|
_, err = crypto_rand.Read(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error generating random content: %v", err)
|
t.Errorf("Error generating random content: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hasher := sha256.New()
|
hasher := sha256.New()
|
||||||
hasher.Write(content)
|
hasher.Write(content)
|
||||||
filename := hex.EncodeToString(hasher.Sum(nil))
|
filename := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
fmt.Printf("file: %s\n", filename)
|
fmt.Printf("file: %s\n", filename)
|
||||||
|
|
||||||
err = hubicClient.UploadFile("test/test1/" + filename, content, 100)
|
err = hubicClient.UploadFile("test/test1/"+filename, content, 100)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
|
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
|
||||||
t.Errorf("Failed to upload the file %s: %v", filename, err)
|
t.Errorf("Failed to upload the file %s: %v", filename, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
entries, err := hubicClient.ListEntries("test/test1")
|
entries, err := hubicClient.ListEntries("test/test1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error list randomly generated files: %v", err)
|
t.Errorf("Error list randomly generated files: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
|
|
||||||
exists, isDir, size, err := hubicClient.GetFileInfo("test/test1/" + entry.Name)
|
exists, isDir, size, err := hubicClient.GetFileInfo("test/test1/" + entry.Name)
|
||||||
fmt.Printf("%s exists: %t, isDir: %t, size: %d, err: %v\n", "test/test1/" + entry.Name, exists, isDir, size, err)
|
fmt.Printf("%s exists: %t, isDir: %t, size: %d, err: %v\n", "test/test1/"+entry.Name, exists, isDir, size, err)
|
||||||
|
|
||||||
err = hubicClient.MoveFile("test/test1/" + entry.Name, "test/test2/" + entry.Name)
|
err = hubicClient.MoveFile("test/test1/"+entry.Name, "test/test2/"+entry.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to move %s: %v", entry.Name, err)
|
t.Errorf("Failed to move %s: %v", entry.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
entries, err = hubicClient.ListEntries("test/test2")
|
entries, err = hubicClient.ListEntries("test/test2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error list randomly generated files: %v", err)
|
t.Errorf("Error list randomly generated files: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
readCloser, _, err := hubicClient.DownloadFile("test/test2/" + entry.Name)
|
readCloser, _, err := hubicClient.DownloadFile("test/test2/" + entry.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error downloading file %s: %v", entry.Name, err)
|
t.Errorf("Error downloading file %s: %v", entry.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hasher := sha256.New()
|
hasher := sha256.New()
|
||||||
io.Copy(hasher, readCloser)
|
io.Copy(hasher, readCloser)
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
|
|
||||||
if hash != entry.Name {
|
if hash != entry.Name {
|
||||||
t.Errorf("File %s, hash %s", entry.Name, hash)
|
t.Errorf("File %s, hash %s", entry.Name, hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
|
|
||||||
err = hubicClient.DeleteFile("test/test2/" + entry.Name)
|
err = hubicClient.DeleteFile("test/test2/" + entry.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
|
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,203 +5,203 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
type HubicStorage struct {
|
type HubicStorage struct {
|
||||||
RateLimitedStorage
|
RateLimitedStorage
|
||||||
|
|
||||||
client *HubicClient
|
client *HubicClient
|
||||||
storageDir string
|
storageDir string
|
||||||
numberOfThreads int
|
numberOfThreads int
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateHubicStorage creates an Hubic storage object.
|
// CreateHubicStorage creates an Hubic storage object.
|
||||||
func CreateHubicStorage(tokenFile string, storagePath string, threads int) (storage *HubicStorage, err error) {
|
func CreateHubicStorage(tokenFile string, storagePath string, threads int) (storage *HubicStorage, err error) {
|
||||||
|
|
||||||
for len(storagePath) > 0 && storagePath[len(storagePath) - 1] == '/' {
|
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
|
||||||
storagePath = storagePath[:len(storagePath) - 1]
|
storagePath = storagePath[:len(storagePath)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := NewHubicClient(tokenFile)
|
client, err := NewHubicClient(tokenFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
exists, isDir, _, err := client.GetFileInfo(storagePath)
|
exists, isDir, _, err := client.GetFileInfo(storagePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !exists {
|
if !exists {
|
||||||
return nil, fmt.Errorf("Path '%s' doesn't exist", storagePath)
|
return nil, fmt.Errorf("Path '%s' doesn't exist", storagePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isDir {
|
if !isDir {
|
||||||
return nil, fmt.Errorf("Path '%s' is not a directory", storagePath)
|
return nil, fmt.Errorf("Path '%s' is not a directory", storagePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &HubicStorage {
|
storage = &HubicStorage{
|
||||||
client: client,
|
client: client,
|
||||||
storageDir: storagePath,
|
storageDir: storagePath,
|
||||||
numberOfThreads: threads,
|
numberOfThreads: threads,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, path := range []string { "chunks", "snapshots" } {
|
for _, path := range []string{"chunks", "snapshots"} {
|
||||||
dir := storagePath + "/" + path
|
dir := storagePath + "/" + path
|
||||||
exists, isDir, _, err := client.GetFileInfo(dir)
|
exists, isDir, _, err := client.GetFileInfo(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if !exists {
|
if !exists {
|
||||||
err = client.CreateDirectory(storagePath + "/" + path)
|
err = client.CreateDirectory(storagePath + "/" + path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else if !isDir {
|
} else if !isDir {
|
||||||
return nil, fmt.Errorf("%s is not a directory", dir)
|
return nil, fmt.Errorf("%s is not a directory", dir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage, nil
|
return storage, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *HubicStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
func (storage *HubicStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "snapshots" {
|
if dir == "snapshots" {
|
||||||
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
subDirs := []string{}
|
subDirs := []string{}
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.Type == "application/directory" {
|
if entry.Type == "application/directory" {
|
||||||
subDirs = append(subDirs, entry.Name + "/")
|
subDirs = append(subDirs, entry.Name+"/")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return subDirs, nil, nil
|
return subDirs, nil, nil
|
||||||
} else if strings.HasPrefix(dir, "snapshots/") {
|
} else if strings.HasPrefix(dir, "snapshots/") {
|
||||||
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
files := []string{}
|
files := []string{}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.Type == "application/directory" {
|
if entry.Type == "application/directory" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
files = append(files, entry.Name)
|
files = append(files, entry.Name)
|
||||||
}
|
}
|
||||||
return files, nil, nil
|
return files, nil, nil
|
||||||
} else {
|
} else {
|
||||||
files := []string{}
|
files := []string{}
|
||||||
sizes := []int64{}
|
sizes := []int64{}
|
||||||
entries, err := storage.client.ListEntries(storage.storageDir + "/chunks")
|
entries, err := storage.client.ListEntries(storage.storageDir + "/chunks")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if entry.Type == "application/directory" {
|
if entry.Type == "application/directory" {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
files = append(files, entry.Name)
|
files = append(files, entry.Name)
|
||||||
sizes = append(sizes, entry.Size)
|
sizes = append(sizes, entry.Size)
|
||||||
}
|
}
|
||||||
return files, sizes, nil
|
return files, sizes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *HubicStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *HubicStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
err = storage.client.DeleteFile(storage.storageDir + "/" + filePath)
|
err = storage.client.DeleteFile(storage.storageDir + "/" + filePath)
|
||||||
if e, ok := err.(HubicError); ok && e.Status == 404 {
|
if e, ok := err.(HubicError); ok && e.Status == 404 {
|
||||||
LOG_DEBUG("HUBIC_DELETE", "Ignore 404 error")
|
LOG_DEBUG("HUBIC_DELETE", "Ignore 404 error")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *HubicStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *HubicStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
fromPath := storage.storageDir + "/" + from
|
fromPath := storage.storageDir + "/" + from
|
||||||
toPath := storage.storageDir + "/" + to
|
toPath := storage.storageDir + "/" + to
|
||||||
|
|
||||||
return storage.client.MoveFile(fromPath, toPath)
|
return storage.client.MoveFile(fromPath, toPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *HubicStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
func (storage *HubicStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage.client.CreateDirectory(storage.storageDir + "/" + dir)
|
return storage.client.CreateDirectory(storage.storageDir + "/" + dir)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *HubicStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *HubicStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
for len(filePath) > 0 && filePath[len(filePath) - 1] == '/' {
|
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
|
||||||
filePath = filePath[:len(filePath) - 1]
|
filePath = filePath[:len(filePath)-1]
|
||||||
}
|
}
|
||||||
return storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
return storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||||
// the suffix '.fsl'.
|
// the suffix '.fsl'.
|
||||||
func (storage *HubicStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
func (storage *HubicStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||||
filePath = "chunks/" + chunkID
|
filePath = "chunks/" + chunkID
|
||||||
if isFossil {
|
if isFossil {
|
||||||
filePath += ".fsl"
|
filePath += ".fsl"
|
||||||
}
|
}
|
||||||
|
|
||||||
exist, _, size, err = storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
exist, _, size, err = storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
||||||
return filePath, exist, size, err
|
return filePath, exist, size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *HubicStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *HubicStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
readCloser, _, err := storage.client.DownloadFile(storage.storageDir + "/" + filePath)
|
readCloser, _, err := storage.client.DownloadFile(storage.storageDir + "/" + filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / storage.numberOfThreads)
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThreads)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *HubicStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *HubicStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
return storage.client.UploadFile(storage.storageDir + "/" + filePath, content, storage.UploadRateLimit / storage.numberOfThreads)
|
return storage.client.UploadFile(storage.storageDir+"/"+filePath, content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *HubicStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *HubicStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *HubicStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *HubicStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *HubicStorage) IsStrongConsistent() (bool) { return false }
|
func (storage *HubicStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *HubicStorage) IsFastListing() (bool) { return true }
|
func (storage *HubicStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *HubicStorage) EnableTestMode() {
|
func (storage *HubicStorage) EnableTestMode() {
|
||||||
storage.client.TestMode = true
|
storage.client.TestMode = true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,24 +7,24 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/gilbertchen/keyring"
|
"github.com/gilbertchen/keyring"
|
||||||
)
|
)
|
||||||
|
|
||||||
func SetKeyringFile(path string) {
|
func SetKeyringFile(path string) {
|
||||||
// We only use keyring file on Windows
|
// We only use keyring file on Windows
|
||||||
}
|
}
|
||||||
|
|
||||||
func keyringGet(key string) (value string) {
|
func keyringGet(key string) (value string) {
|
||||||
value, err := keyring.Get("duplicacy", key)
|
value, err := keyring.Get("duplicacy", key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("KEYRING_GET", "Failed to get the value from the keyring: %v", err)
|
LOG_DEBUG("KEYRING_GET", "Failed to get the value from the keyring: %v", err)
|
||||||
}
|
}
|
||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
|
|
||||||
func keyringSet(key string, value string) {
|
func keyringSet(key string, value string) {
|
||||||
err := keyring.Set("duplicacy", key, value)
|
err := keyring.Set("duplicacy", key, value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("KEYRING_GET", "Failed to store the value to the keyring: %v", err)
|
LOG_DEBUG("KEYRING_GET", "Failed to store the value to the keyring: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,156 +5,156 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"syscall"
|
"encoding/json"
|
||||||
"unsafe"
|
"io/ioutil"
|
||||||
"io/ioutil"
|
"syscall"
|
||||||
"encoding/json"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
var keyringFile string
|
var keyringFile string
|
||||||
|
|
||||||
var (
|
var (
|
||||||
dllcrypt32 = syscall.NewLazyDLL("Crypt32.dll")
|
dllcrypt32 = syscall.NewLazyDLL("Crypt32.dll")
|
||||||
dllkernel32 = syscall.NewLazyDLL("Kernel32.dll")
|
dllkernel32 = syscall.NewLazyDLL("Kernel32.dll")
|
||||||
|
|
||||||
procEncryptData = dllcrypt32.NewProc("CryptProtectData")
|
procEncryptData = dllcrypt32.NewProc("CryptProtectData")
|
||||||
procDecryptData = dllcrypt32.NewProc("CryptUnprotectData")
|
procDecryptData = dllcrypt32.NewProc("CryptUnprotectData")
|
||||||
procLocalFree = dllkernel32.NewProc("LocalFree")
|
procLocalFree = dllkernel32.NewProc("LocalFree")
|
||||||
)
|
)
|
||||||
|
|
||||||
type DATA_BLOB struct {
|
type DATA_BLOB struct {
|
||||||
cbData uint32
|
cbData uint32
|
||||||
pbData *byte
|
pbData *byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetKeyringFile(path string) {
|
func SetKeyringFile(path string) {
|
||||||
keyringFile = path
|
keyringFile = path
|
||||||
}
|
}
|
||||||
|
|
||||||
func keyringEncrypt(value []byte) ([]byte, error) {
|
func keyringEncrypt(value []byte) ([]byte, error) {
|
||||||
|
|
||||||
dataIn := DATA_BLOB {
|
dataIn := DATA_BLOB{
|
||||||
pbData: &value[0],
|
pbData: &value[0],
|
||||||
cbData: uint32(len(value)),
|
cbData: uint32(len(value)),
|
||||||
}
|
}
|
||||||
dataOut := DATA_BLOB {}
|
dataOut := DATA_BLOB{}
|
||||||
|
|
||||||
r, _, err := procEncryptData.Call(uintptr(unsafe.Pointer(&dataIn)),
|
r, _, err := procEncryptData.Call(uintptr(unsafe.Pointer(&dataIn)),
|
||||||
0, 0, 0, 0, 0, uintptr(unsafe.Pointer(&dataOut)))
|
0, 0, 0, 0, 0, uintptr(unsafe.Pointer(&dataOut)))
|
||||||
if r == 0 {
|
if r == 0 {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
address := uintptr(unsafe.Pointer(dataOut.pbData))
|
address := uintptr(unsafe.Pointer(dataOut.pbData))
|
||||||
defer procLocalFree.Call(address)
|
defer procLocalFree.Call(address)
|
||||||
|
|
||||||
encryptedData := make([]byte, dataOut.cbData)
|
encryptedData := make([]byte, dataOut.cbData)
|
||||||
for i := 0; i < len(encryptedData); i++ {
|
for i := 0; i < len(encryptedData); i++ {
|
||||||
encryptedData[i] = *(*byte)(unsafe.Pointer(uintptr(int(address) + i)))
|
encryptedData[i] = *(*byte)(unsafe.Pointer(uintptr(int(address) + i)))
|
||||||
}
|
}
|
||||||
return encryptedData, nil
|
return encryptedData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func keyringDecrypt(value []byte) ([]byte, error) {
|
func keyringDecrypt(value []byte) ([]byte, error) {
|
||||||
|
|
||||||
dataIn := DATA_BLOB {
|
dataIn := DATA_BLOB{
|
||||||
pbData: &value[0],
|
pbData: &value[0],
|
||||||
cbData: uint32(len(value)),
|
cbData: uint32(len(value)),
|
||||||
}
|
}
|
||||||
dataOut := DATA_BLOB {}
|
dataOut := DATA_BLOB{}
|
||||||
|
|
||||||
r, _, err := procDecryptData.Call(uintptr(unsafe.Pointer(&dataIn)),
|
r, _, err := procDecryptData.Call(uintptr(unsafe.Pointer(&dataIn)),
|
||||||
0, 0, 0, 0, 0, uintptr(unsafe.Pointer(&dataOut)))
|
0, 0, 0, 0, 0, uintptr(unsafe.Pointer(&dataOut)))
|
||||||
if r == 0 {
|
if r == 0 {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
address := uintptr(unsafe.Pointer(dataOut.pbData))
|
address := uintptr(unsafe.Pointer(dataOut.pbData))
|
||||||
defer procLocalFree.Call(address)
|
defer procLocalFree.Call(address)
|
||||||
|
|
||||||
decryptedData := make([]byte, dataOut.cbData)
|
decryptedData := make([]byte, dataOut.cbData)
|
||||||
for i := 0; i < len(decryptedData); i++ {
|
for i := 0; i < len(decryptedData); i++ {
|
||||||
address := int(uintptr(unsafe.Pointer(dataOut.pbData)))
|
address := int(uintptr(unsafe.Pointer(dataOut.pbData)))
|
||||||
decryptedData[i] = *(*byte)(unsafe.Pointer(uintptr(int(address) + i)))
|
decryptedData[i] = *(*byte)(unsafe.Pointer(uintptr(int(address) + i)))
|
||||||
}
|
}
|
||||||
return decryptedData, nil
|
return decryptedData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func keyringGet(key string) (value string) {
|
func keyringGet(key string) (value string) {
|
||||||
if keyringFile == "" {
|
if keyringFile == "" {
|
||||||
LOG_DEBUG("KEYRING_NOT_INITIALIZED", "Keyring file not set")
|
LOG_DEBUG("KEYRING_NOT_INITIALIZED", "Keyring file not set")
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
description, err := ioutil.ReadFile(keyringFile)
|
description, err := ioutil.ReadFile(keyringFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("KEYRING_READ", "Keyring file not read: %v", err)
|
LOG_DEBUG("KEYRING_READ", "Keyring file not read: %v", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
var keyring map[string][]byte
|
var keyring map[string][]byte
|
||||||
err = json.Unmarshal(description, &keyring)
|
err = json.Unmarshal(description, &keyring)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("KEYRING_PARSE", "Failed to parse the keyring storage file %s: %v", keyringFile, err)
|
LOG_DEBUG("KEYRING_PARSE", "Failed to parse the keyring storage file %s: %v", keyringFile, err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
encryptedValue := keyring[key]
|
encryptedValue := keyring[key]
|
||||||
|
|
||||||
if len(encryptedValue) == 0 {
|
if len(encryptedValue) == 0 {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
valueInBytes, err := keyringDecrypt(encryptedValue)
|
valueInBytes, err := keyringDecrypt(encryptedValue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("KEYRING_DECRYPT", "Failed to decrypt the value: %v", err)
|
LOG_DEBUG("KEYRING_DECRYPT", "Failed to decrypt the value: %v", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
return string(valueInBytes)
|
return string(valueInBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func keyringSet(key string, value string) bool {
|
func keyringSet(key string, value string) bool {
|
||||||
if value == "" {
|
if value == "" {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if keyringFile == "" {
|
if keyringFile == "" {
|
||||||
LOG_DEBUG("KEYRING_NOT_INITIALIZED", "Keyring file not set")
|
LOG_DEBUG("KEYRING_NOT_INITIALIZED", "Keyring file not set")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
keyring := make(map[string][]byte)
|
keyring := make(map[string][]byte)
|
||||||
|
|
||||||
description, err := ioutil.ReadFile(keyringFile)
|
description, err := ioutil.ReadFile(keyringFile)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
err = json.Unmarshal(description, &keyring)
|
err = json.Unmarshal(description, &keyring)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("KEYRING_PARSE", "Failed to parse the keyring storage file %s: %v", keyringFile, err)
|
LOG_DEBUG("KEYRING_PARSE", "Failed to parse the keyring storage file %s: %v", keyringFile, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if value == "" {
|
if value == "" {
|
||||||
keyring[key] = nil
|
keyring[key] = nil
|
||||||
} else {
|
} else {
|
||||||
encryptedValue, err := keyringEncrypt([]byte(value))
|
encryptedValue, err := keyringEncrypt([]byte(value))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("KEYRING_ENCRYPT", "Failed to encrypt the value: %v", err)
|
LOG_DEBUG("KEYRING_ENCRYPT", "Failed to encrypt the value: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
keyring[key] = encryptedValue
|
keyring[key] = encryptedValue
|
||||||
}
|
}
|
||||||
|
|
||||||
description, err = json.MarshalIndent(keyring, "", " ")
|
description, err = json.MarshalIndent(keyring, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("KEYRING_MARSHAL", "Failed to marshal the keyring storage: %v", err)
|
LOG_DEBUG("KEYRING_MARSHAL", "Failed to marshal the keyring storage: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(keyringFile, description, 0600)
|
err = ioutil.WriteFile(keyringFile, description, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("KEYRING_WRITE", "Failed to save the keyring storage to file %s: %v", keyringFile, err)
|
LOG_DEBUG("KEYRING_WRITE", "Failed to save the keyring storage to file %s: %v", keyringFile, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,22 +5,22 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"fmt"
|
||||||
"fmt"
|
"os"
|
||||||
"time"
|
"runtime/debug"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"runtime/debug"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
DEBUG = -2
|
DEBUG = -2
|
||||||
TRACE = -1
|
TRACE = -1
|
||||||
INFO = 0
|
INFO = 0
|
||||||
WARN = 1
|
WARN = 1
|
||||||
ERROR = 2
|
ERROR = 2
|
||||||
FATAL = 3
|
FATAL = 3
|
||||||
ASSERT = 4
|
ASSERT = 4
|
||||||
)
|
)
|
||||||
|
|
||||||
var LogFunction func(level int, logID string, message string)
|
var LogFunction func(level int, logID string, message string)
|
||||||
@@ -28,161 +28,161 @@ var LogFunction func(level int, logID string, message string)
|
|||||||
var printLogHeader = false
|
var printLogHeader = false
|
||||||
|
|
||||||
func EnableLogHeader() {
|
func EnableLogHeader() {
|
||||||
printLogHeader = true
|
printLogHeader = true
|
||||||
}
|
}
|
||||||
|
|
||||||
var printStackTrace = false
|
var printStackTrace = false
|
||||||
|
|
||||||
func EnableStackTrace() {
|
func EnableStackTrace() {
|
||||||
printStackTrace = true
|
printStackTrace = true
|
||||||
}
|
}
|
||||||
|
|
||||||
var testingT *testing.T
|
var testingT *testing.T
|
||||||
|
|
||||||
func setTestingT(t *testing.T) {
|
func setTestingT(t *testing.T) {
|
||||||
testingT = t
|
testingT = t
|
||||||
}
|
}
|
||||||
|
|
||||||
func getLevelName(level int) string {
|
func getLevelName(level int) string {
|
||||||
switch level {
|
switch level {
|
||||||
case DEBUG:
|
case DEBUG:
|
||||||
return "DEBUG"
|
return "DEBUG"
|
||||||
case TRACE:
|
case TRACE:
|
||||||
return "TRACE"
|
return "TRACE"
|
||||||
case INFO:
|
case INFO:
|
||||||
return "INFO"
|
return "INFO"
|
||||||
case WARN:
|
case WARN:
|
||||||
return "WARN"
|
return "WARN"
|
||||||
case ERROR:
|
case ERROR:
|
||||||
return "ERROR"
|
return "ERROR"
|
||||||
case FATAL:
|
case FATAL:
|
||||||
return "FATAL"
|
return "FATAL"
|
||||||
case ASSERT:
|
case ASSERT:
|
||||||
return "ASSERT"
|
return "ASSERT"
|
||||||
default:
|
default:
|
||||||
return fmt.Sprintf("[%d]", level)
|
return fmt.Sprintf("[%d]", level)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var loggingLevel int
|
var loggingLevel int
|
||||||
|
|
||||||
func IsDebugging() bool {
|
func IsDebugging() bool {
|
||||||
return loggingLevel <= DEBUG
|
return loggingLevel <= DEBUG
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsTracing() bool {
|
func IsTracing() bool {
|
||||||
return loggingLevel <= TRACE
|
return loggingLevel <= TRACE
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetLoggingLevel(level int) {
|
func SetLoggingLevel(level int) {
|
||||||
loggingLevel = level
|
loggingLevel = level
|
||||||
}
|
}
|
||||||
|
|
||||||
func LOG_DEBUG(logID string, format string, v ...interface{}) {
|
func LOG_DEBUG(logID string, format string, v ...interface{}) {
|
||||||
logf(DEBUG, logID, format, v...)
|
logf(DEBUG, logID, format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func LOG_TRACE(logID string, format string, v ...interface{}) {
|
func LOG_TRACE(logID string, format string, v ...interface{}) {
|
||||||
logf(TRACE, logID, format, v...)
|
logf(TRACE, logID, format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func LOG_INFO(logID string, format string, v ...interface{}) {
|
func LOG_INFO(logID string, format string, v ...interface{}) {
|
||||||
logf(INFO, logID, format, v...)
|
logf(INFO, logID, format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func LOG_WARN(logID string, format string, v ...interface{}) {
|
func LOG_WARN(logID string, format string, v ...interface{}) {
|
||||||
logf(WARN, logID, format, v...)
|
logf(WARN, logID, format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func LOG_ERROR(logID string, format string, v ...interface{}) {
|
func LOG_ERROR(logID string, format string, v ...interface{}) {
|
||||||
logf(ERROR, logID, format, v...)
|
logf(ERROR, logID, format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func LOG_FATAL(logID string, format string, v ...interface{}) {
|
func LOG_FATAL(logID string, format string, v ...interface{}) {
|
||||||
logf(FATAL, logID, format, v...)
|
logf(FATAL, logID, format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func LOG_ASSERT(logID string, format string, v ...interface{}) {
|
func LOG_ASSERT(logID string, format string, v ...interface{}) {
|
||||||
logf(ASSERT, logID, format, v...)
|
logf(ASSERT, logID, format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
type Exception struct {
|
type Exception struct {
|
||||||
Level int
|
Level int
|
||||||
LogID string
|
LogID string
|
||||||
Message string
|
Message string
|
||||||
}
|
}
|
||||||
|
|
||||||
var logMutex sync.Mutex
|
var logMutex sync.Mutex
|
||||||
|
|
||||||
func logf(level int, logID string, format string, v ...interface{}) {
|
func logf(level int, logID string, format string, v ...interface{}) {
|
||||||
|
|
||||||
message := fmt.Sprintf(format, v...)
|
message := fmt.Sprintf(format, v...)
|
||||||
|
|
||||||
if LogFunction != nil {
|
if LogFunction != nil {
|
||||||
LogFunction(level, logID, message)
|
LogFunction(level, logID, message)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
|
||||||
// Uncomment this line to enable unbufferred logging for tests
|
// Uncomment this line to enable unbufferred logging for tests
|
||||||
// fmt.Printf("%s %s %s %s\n", now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
// fmt.Printf("%s %s %s %s\n", now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
||||||
|
|
||||||
if testingT != nil {
|
if testingT != nil {
|
||||||
if level < WARN {
|
if level < WARN {
|
||||||
if level >= loggingLevel {
|
if level >= loggingLevel {
|
||||||
testingT.Logf("%s %s %s %s\n",
|
testingT.Logf("%s %s %s %s\n",
|
||||||
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
testingT.Errorf("%s %s %s %s\n",
|
testingT.Errorf("%s %s %s %s\n",
|
||||||
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
logMutex.Lock()
|
logMutex.Lock()
|
||||||
defer logMutex.Unlock()
|
defer logMutex.Unlock()
|
||||||
|
|
||||||
if level >= loggingLevel {
|
if level >= loggingLevel {
|
||||||
if printLogHeader {
|
if printLogHeader {
|
||||||
fmt.Printf("%s %s %s %s\n",
|
fmt.Printf("%s %s %s %s\n",
|
||||||
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
now.Format("2006-01-02 15:04:05.000"), getLevelName(level), logID, message)
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf("%s\n", message)
|
fmt.Printf("%s\n", message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if level > WARN {
|
if level > WARN {
|
||||||
panic(Exception{
|
panic(Exception{
|
||||||
Level: level,
|
Level: level,
|
||||||
LogID: logID,
|
LogID: logID,
|
||||||
Message: message,
|
Message: message,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
duplicacyExitCode = 100
|
duplicacyExitCode = 100
|
||||||
otherExitCode = 101
|
otherExitCode = 101
|
||||||
)
|
)
|
||||||
|
|
||||||
// This is the function to be called before exiting when an error occurs.
|
// This is the function to be called before exiting when an error occurs.
|
||||||
var RunAtError func() = func() {}
|
var RunAtError func() = func() {}
|
||||||
|
|
||||||
func CatchLogException() {
|
func CatchLogException() {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
switch e := r.(type) {
|
switch e := r.(type) {
|
||||||
case Exception:
|
case Exception:
|
||||||
if printStackTrace {
|
if printStackTrace {
|
||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
}
|
}
|
||||||
RunAtError()
|
RunAtError()
|
||||||
os.Exit(duplicacyExitCode)
|
os.Exit(duplicacyExitCode)
|
||||||
default:
|
default:
|
||||||
fmt.Fprintf(os.Stderr, "%v\n", e)
|
fmt.Fprintf(os.Stderr, "%v\n", e)
|
||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
RunAtError()
|
RunAtError()
|
||||||
os.Exit(otherExitCode)
|
os.Exit(otherExitCode)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,372 +5,372 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"bytes"
|
||||||
"time"
|
"encoding/json"
|
||||||
"sync"
|
"fmt"
|
||||||
"bytes"
|
"io"
|
||||||
"strings"
|
"io/ioutil"
|
||||||
"io/ioutil"
|
"math/rand"
|
||||||
"encoding/json"
|
"net/http"
|
||||||
"io"
|
"strings"
|
||||||
"net/http"
|
"sync"
|
||||||
"math/rand"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/oauth2"
|
"golang.org/x/oauth2"
|
||||||
)
|
)
|
||||||
|
|
||||||
type OneDriveError struct {
|
type OneDriveError struct {
|
||||||
Status int
|
Status int
|
||||||
Message string
|
Message string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (err OneDriveError) Error() string {
|
func (err OneDriveError) Error() string {
|
||||||
return fmt.Sprintf("%d %s", err.Status, err.Message)
|
return fmt.Sprintf("%d %s", err.Status, err.Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
type OneDriveErrorResponse struct {
|
type OneDriveErrorResponse struct {
|
||||||
Error OneDriveError `json:"error"`
|
Error OneDriveError `json:"error"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var OneDriveRefreshTokenURL = "https://duplicacy.com/one_refresh"
|
var OneDriveRefreshTokenURL = "https://duplicacy.com/one_refresh"
|
||||||
var OneDriveAPIURL = "https://api.onedrive.com/v1.0"
|
var OneDriveAPIURL = "https://api.onedrive.com/v1.0"
|
||||||
|
|
||||||
type OneDriveClient struct {
|
type OneDriveClient struct {
|
||||||
HTTPClient *http.Client
|
HTTPClient *http.Client
|
||||||
|
|
||||||
TokenFile string
|
TokenFile string
|
||||||
Token *oauth2.Token
|
Token *oauth2.Token
|
||||||
TokenLock *sync.Mutex
|
TokenLock *sync.Mutex
|
||||||
|
|
||||||
IsConnected bool
|
IsConnected bool
|
||||||
TestMode bool
|
TestMode bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewOneDriveClient(tokenFile string) (*OneDriveClient, error) {
|
func NewOneDriveClient(tokenFile string) (*OneDriveClient, error) {
|
||||||
|
|
||||||
description, err := ioutil.ReadFile(tokenFile)
|
description, err := ioutil.ReadFile(tokenFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
token := new(oauth2.Token)
|
token := new(oauth2.Token)
|
||||||
if err := json.Unmarshal(description, token); err != nil {
|
if err := json.Unmarshal(description, token); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
client := &OneDriveClient{
|
client := &OneDriveClient{
|
||||||
HTTPClient: http.DefaultClient,
|
HTTPClient: http.DefaultClient,
|
||||||
TokenFile: tokenFile,
|
TokenFile: tokenFile,
|
||||||
Token: token,
|
Token: token,
|
||||||
TokenLock: &sync.Mutex{},
|
TokenLock: &sync.Mutex{},
|
||||||
}
|
}
|
||||||
|
|
||||||
return client, nil
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *OneDriveClient) call(url string, method string, input interface{}, contentType string) (io.ReadCloser, int64, error) {
|
func (client *OneDriveClient) call(url string, method string, input interface{}, contentType string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
var response *http.Response
|
var response *http.Response
|
||||||
|
|
||||||
backoff := 1
|
backoff := 1
|
||||||
for i := 0; i < 8; i++ {
|
for i := 0; i < 8; i++ {
|
||||||
|
|
||||||
LOG_DEBUG("ONEDRIVE_CALL", "%s %s", method, url)
|
LOG_DEBUG("ONEDRIVE_CALL", "%s %s", method, url)
|
||||||
|
|
||||||
var inputReader io.Reader
|
var inputReader io.Reader
|
||||||
|
|
||||||
switch input.(type) {
|
switch input.(type) {
|
||||||
default:
|
default:
|
||||||
jsonInput, err := json.Marshal(input)
|
jsonInput, err := json.Marshal(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
inputReader = bytes.NewReader(jsonInput)
|
inputReader = bytes.NewReader(jsonInput)
|
||||||
case []byte:
|
case []byte:
|
||||||
inputReader = bytes.NewReader(input.([]byte))
|
inputReader = bytes.NewReader(input.([]byte))
|
||||||
case int:
|
case int:
|
||||||
inputReader = nil
|
inputReader = nil
|
||||||
case *bytes.Buffer:
|
case *bytes.Buffer:
|
||||||
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
|
inputReader = bytes.NewReader(input.(*bytes.Buffer).Bytes())
|
||||||
case *RateLimitedReader:
|
case *RateLimitedReader:
|
||||||
input.(*RateLimitedReader).Reset()
|
input.(*RateLimitedReader).Reset()
|
||||||
inputReader = input.(*RateLimitedReader)
|
inputReader = input.(*RateLimitedReader)
|
||||||
}
|
}
|
||||||
|
|
||||||
request, err := http.NewRequest(method, url, inputReader)
|
request, err := http.NewRequest(method, url, inputReader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if reader, ok := inputReader.(*RateLimitedReader); ok {
|
if reader, ok := inputReader.(*RateLimitedReader); ok {
|
||||||
request.ContentLength = reader.Length()
|
request.ContentLength = reader.Length()
|
||||||
}
|
}
|
||||||
|
|
||||||
if url != OneDriveRefreshTokenURL {
|
if url != OneDriveRefreshTokenURL {
|
||||||
client.TokenLock.Lock()
|
client.TokenLock.Lock()
|
||||||
request.Header.Set("Authorization", "Bearer " + client.Token.AccessToken)
|
request.Header.Set("Authorization", "Bearer "+client.Token.AccessToken)
|
||||||
client.TokenLock.Unlock()
|
client.TokenLock.Unlock()
|
||||||
}
|
}
|
||||||
if contentType != "" {
|
if contentType != "" {
|
||||||
request.Header.Set("Content-Type", contentType)
|
request.Header.Set("Content-Type", contentType)
|
||||||
}
|
}
|
||||||
|
|
||||||
response, err = client.HTTPClient.Do(request)
|
response, err = client.HTTPClient.Do(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if client.IsConnected {
|
if client.IsConnected {
|
||||||
if strings.Contains(err.Error(), "TLS handshake timeout") {
|
if strings.Contains(err.Error(), "TLS handshake timeout") {
|
||||||
// Give a long timeout regardless of backoff when a TLS timeout happens, hoping that
|
// Give a long timeout regardless of backoff when a TLS timeout happens, hoping that
|
||||||
// idle connections are not to be reused on reconnect.
|
// idle connections are not to be reused on reconnect.
|
||||||
retryAfter := time.Duration(rand.Float32() * 60000 + 180000)
|
retryAfter := time.Duration(rand.Float32()*60000 + 180000)
|
||||||
LOG_INFO("ONEDRIVE_RETRY", "TLS handshake timeout; retry after %d milliseconds", retryAfter)
|
LOG_INFO("ONEDRIVE_RETRY", "TLS handshake timeout; retry after %d milliseconds", retryAfter)
|
||||||
time.Sleep(retryAfter * time.Millisecond)
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
} else {
|
} else {
|
||||||
// For all other errors just blindly retry until the maximum is reached
|
// For all other errors just blindly retry until the maximum is reached
|
||||||
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
||||||
LOG_INFO("ONEDRIVE_RETRY", "%v; retry after %d milliseconds", err, retryAfter)
|
LOG_INFO("ONEDRIVE_RETRY", "%v; retry after %d milliseconds", err, retryAfter)
|
||||||
time.Sleep(retryAfter * time.Millisecond)
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
}
|
}
|
||||||
backoff *= 2
|
backoff *= 2
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
client.IsConnected = true
|
client.IsConnected = true
|
||||||
|
|
||||||
if response.StatusCode < 400 {
|
if response.StatusCode < 400 {
|
||||||
return response.Body, response.ContentLength, nil
|
return response.Body, response.ContentLength, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
defer response.Body.Close()
|
defer response.Body.Close()
|
||||||
|
|
||||||
errorResponse := &OneDriveErrorResponse {
|
errorResponse := &OneDriveErrorResponse{
|
||||||
Error: OneDriveError { Status: response.StatusCode },
|
Error: OneDriveError{Status: response.StatusCode},
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.StatusCode == 401 {
|
if response.StatusCode == 401 {
|
||||||
|
|
||||||
if url == OneDriveRefreshTokenURL {
|
if url == OneDriveRefreshTokenURL {
|
||||||
return nil, 0, OneDriveError { Status: response.StatusCode, Message: "Authorization error when refreshing token"}
|
return nil, 0, OneDriveError{Status: response.StatusCode, Message: "Authorization error when refreshing token"}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = client.RefreshToken()
|
err = client.RefreshToken()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
} else if response.StatusCode > 401 && response.StatusCode != 404 {
|
} else if response.StatusCode > 401 && response.StatusCode != 404 {
|
||||||
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
retryAfter := time.Duration(rand.Float32() * 1000.0 * float32(backoff))
|
||||||
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
|
LOG_INFO("ONEDRIVE_RETRY", "Response code: %d; retry after %d milliseconds", response.StatusCode, retryAfter)
|
||||||
time.Sleep(retryAfter * time.Millisecond)
|
time.Sleep(retryAfter * time.Millisecond)
|
||||||
backoff *= 2
|
backoff *= 2
|
||||||
continue
|
continue
|
||||||
} else {
|
} else {
|
||||||
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
|
if err := json.NewDecoder(response.Body).Decode(errorResponse); err != nil {
|
||||||
return nil, 0, OneDriveError { Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response"), }
|
return nil, 0, OneDriveError{Status: response.StatusCode, Message: fmt.Sprintf("Unexpected response")}
|
||||||
}
|
}
|
||||||
|
|
||||||
errorResponse.Error.Status = response.StatusCode
|
errorResponse.Error.Status = response.StatusCode
|
||||||
return nil, 0, errorResponse.Error
|
return nil, 0, errorResponse.Error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, 0, fmt.Errorf("Maximum number of retries reached")
|
return nil, 0, fmt.Errorf("Maximum number of retries reached")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *OneDriveClient) RefreshToken() (err error) {
|
func (client *OneDriveClient) RefreshToken() (err error) {
|
||||||
client.TokenLock.Lock()
|
client.TokenLock.Lock()
|
||||||
defer client.TokenLock.Unlock()
|
defer client.TokenLock.Unlock()
|
||||||
|
|
||||||
if client.Token.Valid() {
|
if client.Token.Valid() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser, _, err := client.call(OneDriveRefreshTokenURL, "POST", client.Token, "")
|
readCloser, _, err := client.call(OneDriveRefreshTokenURL, "POST", client.Token, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to refresh the access token: %v", err)
|
return fmt.Errorf("failed to refresh the access token: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(client.Token); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
description, err := json.Marshal(client.Token)
|
description, err := json.Marshal(client.Token)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = ioutil.WriteFile(client.TokenFile, description, 0644)
|
err = ioutil.WriteFile(client.TokenFile, description, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type OneDriveEntry struct {
|
type OneDriveEntry struct {
|
||||||
ID string
|
ID string
|
||||||
Name string
|
Name string
|
||||||
Folder map[string] interface {}
|
Folder map[string]interface{}
|
||||||
Size int64
|
Size int64
|
||||||
}
|
}
|
||||||
|
|
||||||
type OneDriveListEntriesOutput struct {
|
type OneDriveListEntriesOutput struct {
|
||||||
Entries []OneDriveEntry `json:"value"`
|
Entries []OneDriveEntry `json:"value"`
|
||||||
NextLink string `json:"@odata.nextLink"`
|
NextLink string `json:"@odata.nextLink"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error) {
|
func (client *OneDriveClient) ListEntries(path string) ([]OneDriveEntry, error) {
|
||||||
|
|
||||||
entries := []OneDriveEntry{}
|
entries := []OneDriveEntry{}
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/drive/root:/" + path + ":/children"
|
url := OneDriveAPIURL + "/drive/root:/" + path + ":/children"
|
||||||
if path == "" {
|
if path == "" {
|
||||||
url = OneDriveAPIURL + "/drive/root/children"
|
url = OneDriveAPIURL + "/drive/root/children"
|
||||||
}
|
}
|
||||||
if client.TestMode {
|
if client.TestMode {
|
||||||
url += "?top=8"
|
url += "?top=8"
|
||||||
} else {
|
} else {
|
||||||
url += "?top=1000"
|
url += "?top=1000"
|
||||||
}
|
}
|
||||||
url += "&select=name,size,folder"
|
url += "&select=name,size,folder"
|
||||||
|
|
||||||
for {
|
for {
|
||||||
readCloser, _, err := client.call(url, "GET", 0, "")
|
readCloser, _, err := client.call(url, "GET", 0, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := &OneDriveListEntriesOutput {}
|
output := &OneDriveListEntriesOutput{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
entries = append(entries, output.Entries...)
|
entries = append(entries, output.Entries...)
|
||||||
|
|
||||||
url = output.NextLink
|
url = output.NextLink
|
||||||
if url == "" {
|
if url == "" {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return entries, nil
|
return entries, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) {
|
func (client *OneDriveClient) GetFileInfo(path string) (string, bool, int64, error) {
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/drive/root:/" + path
|
url := OneDriveAPIURL + "/drive/root:/" + path
|
||||||
url += "?select=id,name,size,folder"
|
url += "?select=id,name,size,folder"
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "GET", 0, "")
|
readCloser, _, err := client.call(url, "GET", 0, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(OneDriveError); ok && e.Status == 404 {
|
if e, ok := err.(OneDriveError); ok && e.Status == 404 {
|
||||||
return "", false, 0, nil
|
return "", false, 0, nil
|
||||||
} else {
|
} else {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
output := &OneDriveEntry{}
|
output := &OneDriveEntry{}
|
||||||
|
|
||||||
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
if err = json.NewDecoder(readCloser).Decode(&output); err != nil {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return output.ID, len(output.Folder) != 0, output.Size, nil
|
return output.ID, len(output.Folder) != 0, output.Size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *OneDriveClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
|
func (client *OneDriveClient) DownloadFile(path string) (io.ReadCloser, int64, error) {
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/drive/items/root:/" + path + ":/content"
|
url := OneDriveAPIURL + "/drive/items/root:/" + path + ":/content"
|
||||||
|
|
||||||
return client.call(url, "GET", 0, "")
|
return client.call(url, "GET", 0, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
|
func (client *OneDriveClient) UploadFile(path string, content []byte, rateLimit int) (err error) {
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/drive/root:/" + path + ":/content"
|
url := OneDriveAPIURL + "/drive/root:/" + path + ":/content"
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
|
readCloser, _, err := client.call(url, "PUT", CreateRateLimitedReader(content, rateLimit), "application/octet-stream")
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *OneDriveClient) DeleteFile(path string) error {
|
func (client *OneDriveClient) DeleteFile(path string) error {
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/drive/root:/" + path
|
url := OneDriveAPIURL + "/drive/root:/" + path
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "DELETE", 0, "")
|
readCloser, _, err := client.call(url, "DELETE", 0, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
func (client *OneDriveClient) MoveFile(path string, parent string) error {
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/drive/root:/" + path
|
url := OneDriveAPIURL + "/drive/root:/" + path
|
||||||
|
|
||||||
parentReference := make(map[string]string)
|
parentReference := make(map[string]string)
|
||||||
parentReference["path"] = "/drive/root:/" + parent
|
parentReference["path"] = "/drive/root:/" + parent
|
||||||
|
|
||||||
parameters := make(map[string]interface{})
|
parameters := make(map[string]interface{})
|
||||||
parameters["parentReference"] = parentReference
|
parameters["parentReference"] = parentReference
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "PATCH", parameters, "application/json")
|
readCloser, _, err := client.call(url, "PATCH", parameters, "application/json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (client *OneDriveClient) CreateDirectory(path string, name string) (error) {
|
func (client *OneDriveClient) CreateDirectory(path string, name string) error {
|
||||||
|
|
||||||
url := OneDriveAPIURL + "/root/children"
|
url := OneDriveAPIURL + "/root/children"
|
||||||
|
|
||||||
if path != "" {
|
if path != "" {
|
||||||
|
|
||||||
parentID, isDir, _, err := client.GetFileInfo(path)
|
parentID, isDir, _, err := client.GetFileInfo(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if parentID == "" {
|
if parentID == "" {
|
||||||
return fmt.Errorf("The path '%s' does not exist", path)
|
return fmt.Errorf("The path '%s' does not exist", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isDir {
|
if !isDir {
|
||||||
return fmt.Errorf("The path '%s' is not a directory", path)
|
return fmt.Errorf("The path '%s' is not a directory", path)
|
||||||
}
|
}
|
||||||
|
|
||||||
url = OneDriveAPIURL + "/drive/items/" + parentID + "/children"
|
url = OneDriveAPIURL + "/drive/items/" + parentID + "/children"
|
||||||
}
|
}
|
||||||
|
|
||||||
parameters := make(map[string]interface{})
|
parameters := make(map[string]interface{})
|
||||||
parameters["name"] = name
|
parameters["name"] = name
|
||||||
parameters["folder"] = make(map[string]int)
|
parameters["folder"] = make(map[string]int)
|
||||||
|
|
||||||
readCloser, _, err := client.call(url, "POST", parameters, "application/json")
|
readCloser, _, err := client.call(url, "POST", parameters, "application/json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
readCloser.Close()
|
readCloser.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,142 +5,141 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"crypto/sha256"
|
||||||
"fmt"
|
"encoding/hex"
|
||||||
"testing"
|
"fmt"
|
||||||
"crypto/sha256"
|
"io"
|
||||||
"encoding/hex"
|
"testing"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestOneDriveClient(t *testing.T) {
|
func TestOneDriveClient(t *testing.T) {
|
||||||
|
|
||||||
oneDriveClient, err := NewOneDriveClient("one-token.json")
|
oneDriveClient, err := NewOneDriveClient("one-token.json")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create the OneDrive client: %v", err)
|
t.Errorf("Failed to create the OneDrive client: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
oneDriveClient.TestMode = true
|
oneDriveClient.TestMode = true
|
||||||
|
|
||||||
existingFiles, err := oneDriveClient.ListEntries("")
|
existingFiles, err := oneDriveClient.ListEntries("")
|
||||||
for _, file := range existingFiles {
|
for _, file := range existingFiles {
|
||||||
fmt.Printf("name: %s, isDir: %t\n", file.Name, len(file.Folder) != 0)
|
fmt.Printf("name: %s, isDir: %t\n", file.Name, len(file.Folder) != 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
testID, _, _, err := oneDriveClient.GetFileInfo("test")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to list the test directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if testID == "" {
|
||||||
|
err = oneDriveClient.CreateDirectory("", "test")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to create the test directory: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
testID, _, _, err := oneDriveClient.GetFileInfo("test")
|
test1ID, _, _, err := oneDriveClient.GetFileInfo("test/test1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list the test directory: %v", err)
|
t.Errorf("Failed to list the test1 directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if testID == "" {
|
if test1ID == "" {
|
||||||
err = oneDriveClient.CreateDirectory("", "test")
|
err = oneDriveClient.CreateDirectory("test", "test1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create the test directory: %v", err)
|
t.Errorf("Failed to create the test1 directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
test1ID, _, _, err := oneDriveClient.GetFileInfo("test/test1")
|
test2ID, _, _, err := oneDriveClient.GetFileInfo("test/test2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list the test1 directory: %v", err)
|
t.Errorf("Failed to list the test2 directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if test1ID == "" {
|
if test2ID == "" {
|
||||||
err = oneDriveClient.CreateDirectory("test", "test1")
|
err = oneDriveClient.CreateDirectory("test", "test2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create the test1 directory: %v", err)
|
t.Errorf("Failed to create the test2 directory: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
test2ID, _, _, err := oneDriveClient.GetFileInfo("test/test2")
|
numberOfFiles := 20
|
||||||
if err != nil {
|
maxFileSize := 64 * 1024
|
||||||
t.Errorf("Failed to list the test2 directory: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if test2ID == "" {
|
|
||||||
err = oneDriveClient.CreateDirectory("test", "test2")
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to create the test2 directory: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
numberOfFiles := 20
|
for i := 0; i < numberOfFiles; i++ {
|
||||||
maxFileSize := 64 * 1024
|
content := make([]byte, rand.Int()%maxFileSize+1)
|
||||||
|
_, err = crypto_rand.Read(content)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error generating random content: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < numberOfFiles; i++ {
|
hasher := sha256.New()
|
||||||
content := make([]byte, rand.Int() % maxFileSize + 1)
|
hasher.Write(content)
|
||||||
_, err = crypto_rand.Read(content)
|
filename := hex.EncodeToString(hasher.Sum(nil))
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error generating random content: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
hasher := sha256.New()
|
fmt.Printf("file: %s\n", filename)
|
||||||
hasher.Write(content)
|
|
||||||
filename := hex.EncodeToString(hasher.Sum(nil))
|
|
||||||
|
|
||||||
fmt.Printf("file: %s\n", filename)
|
err = oneDriveClient.UploadFile("test/test1/"+filename, content, 100)
|
||||||
|
if err != nil {
|
||||||
|
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
|
||||||
|
t.Errorf("Failed to upload the file %s: %v", filename, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
err = oneDriveClient.UploadFile("test/test1/" + filename, content, 100)
|
entries, err := oneDriveClient.ListEntries("test/test1")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
/*if e, ok := err.(ACDError); !ok || e.Status != 409 */ {
|
t.Errorf("Error list randomly generated files: %v", err)
|
||||||
t.Errorf("Failed to upload the file %s: %v", filename, err)
|
return
|
||||||
return
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
entries, err := oneDriveClient.ListEntries("test/test1")
|
for _, entry := range entries {
|
||||||
if err != nil {
|
err = oneDriveClient.MoveFile("test/test1/"+entry.Name, "test/test2")
|
||||||
t.Errorf("Error list randomly generated files: %v", err)
|
if err != nil {
|
||||||
return
|
t.Errorf("Failed to move %s: %v", entry.Name, err)
|
||||||
}
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
entries, err = oneDriveClient.ListEntries("test/test2")
|
||||||
err = oneDriveClient.MoveFile("test/test1/" + entry.Name, "test/test2")
|
if err != nil {
|
||||||
if err != nil {
|
t.Errorf("Error list randomly generated files: %v", err)
|
||||||
t.Errorf("Failed to move %s: %v", entry.Name, err)
|
return
|
||||||
return
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
entries, err = oneDriveClient.ListEntries("test/test2")
|
for _, entry := range entries {
|
||||||
if err != nil {
|
readCloser, _, err := oneDriveClient.DownloadFile("test/test2/" + entry.Name)
|
||||||
t.Errorf("Error list randomly generated files: %v", err)
|
if err != nil {
|
||||||
return
|
t.Errorf("Error downloading file %s: %v", entry.Name, err)
|
||||||
}
|
return
|
||||||
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
hasher := sha256.New()
|
||||||
readCloser, _, err := oneDriveClient.DownloadFile("test/test2/" + entry.Name)
|
io.Copy(hasher, readCloser)
|
||||||
if err != nil {
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
t.Errorf("Error downloading file %s: %v", entry.Name, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
hasher := sha256.New()
|
if hash != entry.Name {
|
||||||
io.Copy(hasher, readCloser)
|
t.Errorf("File %s, hash %s", entry.Name, hash)
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
}
|
||||||
|
|
||||||
if hash != entry.Name {
|
readCloser.Close()
|
||||||
t.Errorf("File %s, hash %s", entry.Name, hash)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
readCloser.Close()
|
for _, entry := range entries {
|
||||||
}
|
|
||||||
|
|
||||||
for _, entry := range entries {
|
err = oneDriveClient.DeleteFile("test/test2/" + entry.Name)
|
||||||
|
if err != nil {
|
||||||
err = oneDriveClient.DeleteFile("test/test2/" + entry.Name)
|
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
|
||||||
if err != nil {
|
return
|
||||||
t.Errorf("Failed to delete the file %s: %v", entry.Name, err)
|
}
|
||||||
return
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,238 +5,238 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path"
|
"path"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
type OneDriveStorage struct {
|
type OneDriveStorage struct {
|
||||||
RateLimitedStorage
|
RateLimitedStorage
|
||||||
|
|
||||||
client *OneDriveClient
|
client *OneDriveClient
|
||||||
storageDir string
|
storageDir string
|
||||||
numberOfThread int
|
numberOfThread int
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateOneDriveStorage creates an OneDrive storage object.
|
// CreateOneDriveStorage creates an OneDrive storage object.
|
||||||
func CreateOneDriveStorage(tokenFile string, storagePath string, threads int) (storage *OneDriveStorage, err error) {
|
func CreateOneDriveStorage(tokenFile string, storagePath string, threads int) (storage *OneDriveStorage, err error) {
|
||||||
|
|
||||||
for len(storagePath) > 0 && storagePath[len(storagePath) - 1] == '/' {
|
for len(storagePath) > 0 && storagePath[len(storagePath)-1] == '/' {
|
||||||
storagePath = storagePath[:len(storagePath) - 1]
|
storagePath = storagePath[:len(storagePath)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := NewOneDriveClient(tokenFile)
|
client, err := NewOneDriveClient(tokenFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
fileID, isDir, _, err := client.GetFileInfo(storagePath)
|
fileID, isDir, _, err := client.GetFileInfo(storagePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if fileID == "" {
|
if fileID == "" {
|
||||||
return nil, fmt.Errorf("Path '%s' doesn't exist", storagePath)
|
return nil, fmt.Errorf("Path '%s' doesn't exist", storagePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isDir {
|
if !isDir {
|
||||||
return nil, fmt.Errorf("Path '%s' is not a directory", storagePath)
|
return nil, fmt.Errorf("Path '%s' is not a directory", storagePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &OneDriveStorage {
|
storage = &OneDriveStorage{
|
||||||
client: client,
|
client: client,
|
||||||
storageDir: storagePath,
|
storageDir: storagePath,
|
||||||
numberOfThread: threads,
|
numberOfThread: threads,
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, path := range []string { "chunks", "fossils", "snapshots" } {
|
for _, path := range []string{"chunks", "fossils", "snapshots"} {
|
||||||
dir := storagePath + "/" + path
|
dir := storagePath + "/" + path
|
||||||
dirID, isDir, _, err := client.GetFileInfo(dir)
|
dirID, isDir, _, err := client.GetFileInfo(dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if dirID == "" {
|
if dirID == "" {
|
||||||
err = client.CreateDirectory(storagePath, path)
|
err = client.CreateDirectory(storagePath, path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
} else if !isDir {
|
} else if !isDir {
|
||||||
return nil, fmt.Errorf("%s is not a directory", dir)
|
return nil, fmt.Errorf("%s is not a directory", dir)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage, nil
|
return storage, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
func (storage *OneDriveStorage) ListFiles(threadIndex int, dir string) ([]string, []int64, error) {
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "snapshots" {
|
if dir == "snapshots" {
|
||||||
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
subDirs := []string{}
|
subDirs := []string{}
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if len(entry.Folder) > 0 {
|
if len(entry.Folder) > 0 {
|
||||||
subDirs = append(subDirs, entry.Name + "/")
|
subDirs = append(subDirs, entry.Name+"/")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return subDirs, nil, nil
|
return subDirs, nil, nil
|
||||||
} else if strings.HasPrefix(dir, "snapshots/") {
|
} else if strings.HasPrefix(dir, "snapshots/") {
|
||||||
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
entries, err := storage.client.ListEntries(storage.storageDir + "/" + dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
files := []string{}
|
files := []string{}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
if len(entry.Folder) == 0 {
|
if len(entry.Folder) == 0 {
|
||||||
files = append(files, entry.Name)
|
files = append(files, entry.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return files, nil, nil
|
return files, nil, nil
|
||||||
} else {
|
} else {
|
||||||
files := []string{}
|
files := []string{}
|
||||||
sizes := []int64{}
|
sizes := []int64{}
|
||||||
for _, parent := range []string {"chunks", "fossils" } {
|
for _, parent := range []string{"chunks", "fossils"} {
|
||||||
entries, err := storage.client.ListEntries(storage.storageDir + "/" + parent)
|
entries, err := storage.client.ListEntries(storage.storageDir + "/" + parent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
name := entry.Name
|
name := entry.Name
|
||||||
if parent == "fossils" {
|
if parent == "fossils" {
|
||||||
name += ".fsl"
|
name += ".fsl"
|
||||||
}
|
}
|
||||||
files = append(files, name)
|
files = append(files, name)
|
||||||
sizes = append(sizes, entry.Size)
|
sizes = append(sizes, entry.Size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return files, sizes, nil
|
return files, sizes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *OneDriveStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *OneDriveStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
if strings.HasSuffix(filePath, ".fsl") && strings.HasPrefix(filePath, "chunks/") {
|
if strings.HasSuffix(filePath, ".fsl") && strings.HasPrefix(filePath, "chunks/") {
|
||||||
filePath = "fossils/" + filePath[len("chunks/"):len(filePath) - len(".fsl")]
|
filePath = "fossils/" + filePath[len("chunks/"):len(filePath)-len(".fsl")]
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storage.client.DeleteFile(storage.storageDir + "/" + filePath)
|
err = storage.client.DeleteFile(storage.storageDir + "/" + filePath)
|
||||||
if e, ok := err.(OneDriveError); ok && e.Status == 404 {
|
if e, ok := err.(OneDriveError); ok && e.Status == 404 {
|
||||||
LOG_DEBUG("ONEDRIVE_DELETE", "Ignore 404 error")
|
LOG_DEBUG("ONEDRIVE_DELETE", "Ignore 404 error")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *OneDriveStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *OneDriveStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
fromPath := storage.storageDir + "/" + from
|
fromPath := storage.storageDir + "/" + from
|
||||||
toParent := storage.storageDir + "/fossils"
|
toParent := storage.storageDir + "/fossils"
|
||||||
if strings.HasSuffix(from, ".fsl") {
|
if strings.HasSuffix(from, ".fsl") {
|
||||||
fromPath = storage.storageDir + "/fossils/" + from[len("chunks/"):len(from) - len(".fsl")]
|
fromPath = storage.storageDir + "/fossils/" + from[len("chunks/"):len(from)-len(".fsl")]
|
||||||
toParent = storage.storageDir + "/chunks"
|
toParent = storage.storageDir + "/chunks"
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storage.client.MoveFile(fromPath, toParent)
|
err = storage.client.MoveFile(fromPath, toParent)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
|
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
|
||||||
LOG_DEBUG("ONEDRIVE_MOVE", "Ignore 409 conflict error")
|
LOG_DEBUG("ONEDRIVE_MOVE", "Ignore 409 conflict error")
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *OneDriveStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
func (storage *OneDriveStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
for len(dir) > 0 && dir[len(dir) - 1] == '/' {
|
for len(dir) > 0 && dir[len(dir)-1] == '/' {
|
||||||
dir = dir[:len(dir) - 1]
|
dir = dir[:len(dir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
parent := path.Dir(dir)
|
parent := path.Dir(dir)
|
||||||
|
|
||||||
if parent == "." {
|
if parent == "." {
|
||||||
return storage.client.CreateDirectory(storage.storageDir, dir)
|
return storage.client.CreateDirectory(storage.storageDir, dir)
|
||||||
} else {
|
} else {
|
||||||
return storage.client.CreateDirectory(storage.storageDir + "/" + parent, path.Base(dir))
|
return storage.client.CreateDirectory(storage.storageDir+"/"+parent, path.Base(dir))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *OneDriveStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *OneDriveStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
for len(filePath) > 0 && filePath[len(filePath) - 1] == '/' {
|
for len(filePath) > 0 && filePath[len(filePath)-1] == '/' {
|
||||||
filePath = filePath[:len(filePath) - 1]
|
filePath = filePath[:len(filePath)-1]
|
||||||
}
|
}
|
||||||
fileID, isDir, size, err := storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
fileID, isDir, size, err := storage.client.GetFileInfo(storage.storageDir + "/" + filePath)
|
||||||
return fileID != "", isDir, size, err
|
return fileID != "", isDir, size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||||
// the suffix '.fsl'.
|
// the suffix '.fsl'.
|
||||||
func (storage *OneDriveStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
func (storage *OneDriveStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||||
filePath = "chunks/" + chunkID
|
filePath = "chunks/" + chunkID
|
||||||
realPath := storage.storageDir + "/" + filePath
|
realPath := storage.storageDir + "/" + filePath
|
||||||
if isFossil {
|
if isFossil {
|
||||||
filePath += ".fsl"
|
filePath += ".fsl"
|
||||||
realPath = storage.storageDir + "/fossils/" + chunkID
|
realPath = storage.storageDir + "/fossils/" + chunkID
|
||||||
}
|
}
|
||||||
|
|
||||||
fileID, _, size, err := storage.client.GetFileInfo(realPath)
|
fileID, _, size, err := storage.client.GetFileInfo(realPath)
|
||||||
return filePath, fileID != "", size, err
|
return filePath, fileID != "", size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *OneDriveStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *OneDriveStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
readCloser, _, err := storage.client.DownloadFile(storage.storageDir + "/" + filePath)
|
readCloser, _, err := storage.client.DownloadFile(storage.storageDir + "/" + filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / storage.numberOfThread)
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/storage.numberOfThread)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *OneDriveStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *OneDriveStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
err = storage.client.UploadFile(storage.storageDir + "/" + filePath, content, storage.UploadRateLimit / storage.numberOfThread)
|
err = storage.client.UploadFile(storage.storageDir+"/"+filePath, content, storage.UploadRateLimit/storage.numberOfThread)
|
||||||
|
|
||||||
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
|
if e, ok := err.(OneDriveError); ok && e.Status == 409 {
|
||||||
LOG_TRACE("ONEDRIVE_UPLOAD", "File %s already exists", filePath)
|
LOG_TRACE("ONEDRIVE_UPLOAD", "File %s already exists", filePath)
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *OneDriveStorage) IsCacheNeeded() (bool) { return true }
|
func (storage *OneDriveStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *OneDriveStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *OneDriveStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *OneDriveStorage) IsStrongConsistent() (bool) { return false }
|
func (storage *OneDriveStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *OneDriveStorage) IsFastListing() (bool) { return true }
|
func (storage *OneDriveStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *OneDriveStorage) EnableTestMode() {
|
func (storage *OneDriveStorage) EnableTestMode() {
|
||||||
storage.client.TestMode = true
|
storage.client.TestMode = true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,119 +5,119 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
"encoding/json"
|
||||||
"encoding/json"
|
"io/ioutil"
|
||||||
"path"
|
"os"
|
||||||
"io/ioutil"
|
"path"
|
||||||
"reflect"
|
"reflect"
|
||||||
"os"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Preference stores options for each storage.
|
// Preference stores options for each storage.
|
||||||
type Preference struct {
|
type Preference struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
SnapshotID string `json:"id"`
|
SnapshotID string `json:"id"`
|
||||||
StorageURL string `json:"storage"`
|
StorageURL string `json:"storage"`
|
||||||
Encrypted bool `json:"encrypted"`
|
Encrypted bool `json:"encrypted"`
|
||||||
BackupProhibited bool `json:"no_backup"`
|
BackupProhibited bool `json:"no_backup"`
|
||||||
RestoreProhibited bool `json:"no_restore"`
|
RestoreProhibited bool `json:"no_restore"`
|
||||||
DoNotSavePassword bool `json:"no_save_password"`
|
DoNotSavePassword bool `json:"no_save_password"`
|
||||||
Keys map[string]string `json:"keys"`
|
Keys map[string]string `json:"keys"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var preferencePath string
|
var preferencePath string
|
||||||
var Preferences [] Preference
|
var Preferences []Preference
|
||||||
|
|
||||||
func LoadPreferences(repository string) bool {
|
func LoadPreferences(repository string) bool {
|
||||||
|
|
||||||
preferencePath = path.Join(repository, DUPLICACY_DIRECTORY)
|
|
||||||
|
|
||||||
stat, err := os.Stat(preferencePath)
|
|
||||||
if err != nil {
|
|
||||||
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", repository, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !stat.IsDir() {
|
|
||||||
content, err := ioutil.ReadFile(preferencePath)
|
|
||||||
if err != nil {
|
|
||||||
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to locate the preference path: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
realPreferencePath := strings.TrimSpace(string(content))
|
|
||||||
stat, err := os.Stat(realPreferencePath)
|
|
||||||
if err != nil {
|
|
||||||
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", content, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !stat.IsDir() {
|
|
||||||
LOG_ERROR("PREFERENCE_PATH", "The preference path %s is not a directory", realPreferencePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
preferencePath = realPreferencePath
|
preferencePath = path.Join(repository, DUPLICACY_DIRECTORY)
|
||||||
}
|
|
||||||
|
|
||||||
description, err := ioutil.ReadFile(path.Join(preferencePath, "preferences"))
|
|
||||||
if err != nil {
|
|
||||||
LOG_ERROR("PREFERENCE_OPEN", "Failed to read the preference file from repository %s: %v", repository, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
err = json.Unmarshal(description, &Preferences)
|
stat, err := os.Stat(preferencePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("PREFERENCE_PARSE", "Failed to parse the preference file for repository %s: %v", repository, err)
|
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", repository, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(Preferences) == 0 {
|
if !stat.IsDir() {
|
||||||
LOG_ERROR("PREFERENCE_NONE", "No preference found in the preference file")
|
content, err := ioutil.ReadFile(preferencePath)
|
||||||
return false
|
if err != nil {
|
||||||
}
|
LOG_ERROR("DOT_DUPLICACY_PATH", "Failed to locate the preference path: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
realPreferencePath := strings.TrimSpace(string(content))
|
||||||
|
stat, err := os.Stat(realPreferencePath)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("PREFERENCE_PATH", "Failed to retrieve the information about the directory %s: %v", content, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !stat.IsDir() {
|
||||||
|
LOG_ERROR("PREFERENCE_PATH", "The preference path %s is not a directory", realPreferencePath)
|
||||||
|
}
|
||||||
|
|
||||||
return true
|
preferencePath = realPreferencePath
|
||||||
|
}
|
||||||
|
|
||||||
|
description, err := ioutil.ReadFile(path.Join(preferencePath, "preferences"))
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("PREFERENCE_OPEN", "Failed to read the preference file from repository %s: %v", repository, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal(description, &Preferences)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("PREFERENCE_PARSE", "Failed to parse the preference file for repository %s: %v", repository, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(Preferences) == 0 {
|
||||||
|
LOG_ERROR("PREFERENCE_NONE", "No preference found in the preference file")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetDuplicacyPreferencePath() string {
|
func GetDuplicacyPreferencePath() string {
|
||||||
if preferencePath == "" {
|
if preferencePath == "" {
|
||||||
LOG_ERROR("PREFERENCE_PATH", "The preference path has not been set")
|
LOG_ERROR("PREFERENCE_PATH", "The preference path has not been set")
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return preferencePath
|
return preferencePath
|
||||||
}
|
}
|
||||||
|
|
||||||
// Normally 'preferencePath' is set in LoadPreferences; however, if LoadPreferences is not called, this function
|
// Normally 'preferencePath' is set in LoadPreferences; however, if LoadPreferences is not called, this function
|
||||||
// provide another change to set 'preferencePath'
|
// provide another change to set 'preferencePath'
|
||||||
func SetDuplicacyPreferencePath(p string) {
|
func SetDuplicacyPreferencePath(p string) {
|
||||||
preferencePath = p
|
preferencePath = p
|
||||||
}
|
}
|
||||||
|
|
||||||
func SavePreferences() (bool) {
|
func SavePreferences() bool {
|
||||||
description, err := json.MarshalIndent(Preferences, "", " ")
|
description, err := json.MarshalIndent(Preferences, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
|
LOG_ERROR("PREFERENCE_MARSHAL", "Failed to marshal the repository preferences: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
preferenceFile := path.Join(GetDuplicacyPreferencePath(), "preferences")
|
preferenceFile := path.Join(GetDuplicacyPreferencePath(), "preferences")
|
||||||
|
|
||||||
err = ioutil.WriteFile(preferenceFile, description, 0600)
|
|
||||||
if err != nil {
|
|
||||||
LOG_ERROR("PREFERENCE_WRITE", "Failed to save the preference file %s: %v", preferenceFile, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
err = ioutil.WriteFile(preferenceFile, description, 0600)
|
||||||
|
if err != nil {
|
||||||
|
LOG_ERROR("PREFERENCE_WRITE", "Failed to save the preference file %s: %v", preferenceFile, err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func FindPreference(name string) (*Preference) {
|
func FindPreference(name string) *Preference {
|
||||||
for i, preference := range Preferences {
|
for i, preference := range Preferences {
|
||||||
if preference.Name == name || preference.StorageURL == name {
|
if preference.Name == name || preference.StorageURL == name {
|
||||||
return &Preferences[i]
|
return &Preferences[i]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (preference *Preference) Equal(other *Preference) bool {
|
func (preference *Preference) Equal(other *Preference) bool {
|
||||||
return reflect.DeepEqual(preference, other)
|
return reflect.DeepEqual(preference, other)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,208 +5,209 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
"github.com/gilbertchen/goamz/aws"
|
|
||||||
"github.com/gilbertchen/goamz/s3"
|
"github.com/gilbertchen/goamz/aws"
|
||||||
|
"github.com/gilbertchen/goamz/s3"
|
||||||
)
|
)
|
||||||
|
|
||||||
// S3CStorage is a storage backend for s3 compatible storages that require V2 Signing.
|
// S3CStorage is a storage backend for s3 compatible storages that require V2 Signing.
|
||||||
type S3CStorage struct {
|
type S3CStorage struct {
|
||||||
RateLimitedStorage
|
RateLimitedStorage
|
||||||
|
|
||||||
buckets []*s3.Bucket
|
buckets []*s3.Bucket
|
||||||
storageDir string
|
storageDir string
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateS3CStorage creates a amazon s3 storage object.
|
// CreateS3CStorage creates a amazon s3 storage object.
|
||||||
func CreateS3CStorage(regionName string, endpoint string, bucketName string, storageDir string,
|
func CreateS3CStorage(regionName string, endpoint string, bucketName string, storageDir string,
|
||||||
accessKey string, secretKey string, threads int) (storage *S3CStorage, err error) {
|
accessKey string, secretKey string, threads int) (storage *S3CStorage, err error) {
|
||||||
|
|
||||||
var region aws.Region
|
var region aws.Region
|
||||||
|
|
||||||
if endpoint == "" {
|
if endpoint == "" {
|
||||||
if regionName == "" {
|
if regionName == "" {
|
||||||
regionName = "us-east-1"
|
regionName = "us-east-1"
|
||||||
}
|
}
|
||||||
region = aws.Regions[regionName]
|
region = aws.Regions[regionName]
|
||||||
} else {
|
} else {
|
||||||
region = aws.Region { Name: regionName, S3Endpoint:"https://" + endpoint }
|
region = aws.Region{Name: regionName, S3Endpoint: "https://" + endpoint}
|
||||||
}
|
}
|
||||||
|
|
||||||
auth := aws.Auth{ AccessKey: accessKey, SecretKey: secretKey }
|
auth := aws.Auth{AccessKey: accessKey, SecretKey: secretKey}
|
||||||
|
|
||||||
var buckets []*s3.Bucket
|
var buckets []*s3.Bucket
|
||||||
for i := 0; i < threads; i++ {
|
for i := 0; i < threads; i++ {
|
||||||
s3Client := s3.New(auth, region)
|
s3Client := s3.New(auth, region)
|
||||||
s3Client.AttemptStrategy = aws.AttemptStrategy{
|
s3Client.AttemptStrategy = aws.AttemptStrategy{
|
||||||
Min: 8,
|
Min: 8,
|
||||||
Total: 300 * time.Second,
|
Total: 300 * time.Second,
|
||||||
Delay: 1000 * time.Millisecond,
|
Delay: 1000 * time.Millisecond,
|
||||||
}
|
}
|
||||||
|
|
||||||
bucket := s3Client.Bucket(bucketName)
|
bucket := s3Client.Bucket(bucketName)
|
||||||
buckets = append(buckets, bucket)
|
buckets = append(buckets, bucket)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
|
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||||
storageDir += "/"
|
storageDir += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &S3CStorage {
|
storage = &S3CStorage{
|
||||||
buckets: buckets,
|
buckets: buckets,
|
||||||
storageDir: storageDir,
|
storageDir: storageDir,
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage, nil
|
return storage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *S3CStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
func (storage *S3CStorage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
if len(dir) > 0 && dir[len(dir) - 1] != '/' {
|
if len(dir) > 0 && dir[len(dir)-1] != '/' {
|
||||||
dir += "/"
|
dir += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
dirLength := len(storage.storageDir + dir)
|
dirLength := len(storage.storageDir + dir)
|
||||||
if dir == "snapshots/" {
|
if dir == "snapshots/" {
|
||||||
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "/", "", 100)
|
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "/", "", 100)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, subDir := range results.CommonPrefixes {
|
for _, subDir := range results.CommonPrefixes {
|
||||||
files = append(files, subDir[dirLength:])
|
files = append(files, subDir[dirLength:])
|
||||||
}
|
}
|
||||||
return files, nil, nil
|
return files, nil, nil
|
||||||
} else if dir == "chunks/" {
|
} else if dir == "chunks/" {
|
||||||
marker := ""
|
marker := ""
|
||||||
for {
|
for {
|
||||||
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", marker, 1000)
|
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "", marker, 1000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, object := range results.Contents {
|
for _, object := range results.Contents {
|
||||||
files = append(files, object.Key[dirLength:])
|
files = append(files, object.Key[dirLength:])
|
||||||
sizes = append(sizes, object.Size)
|
sizes = append(sizes, object.Size)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !results.IsTruncated {
|
if !results.IsTruncated {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
marker = results.Contents[len(results.Contents) - 1].Key
|
marker = results.Contents[len(results.Contents)-1].Key
|
||||||
}
|
}
|
||||||
return files, sizes, nil
|
return files, sizes, nil
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
results, err := storage.buckets[threadIndex].List(storage.storageDir + dir, "", "", 1000)
|
results, err := storage.buckets[threadIndex].List(storage.storageDir+dir, "", "", 1000)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, object := range results.Contents {
|
for _, object := range results.Contents {
|
||||||
files = append(files, object.Key[dirLength:])
|
files = append(files, object.Key[dirLength:])
|
||||||
}
|
}
|
||||||
return files, nil, nil
|
return files, nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *S3CStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *S3CStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
return storage.buckets[threadIndex].Del(storage.storageDir + filePath)
|
return storage.buckets[threadIndex].Del(storage.storageDir + filePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *S3CStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *S3CStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
|
||||||
options := s3.CopyOptions { ContentType: "application/duplicacy" }
|
options := s3.CopyOptions{ContentType: "application/duplicacy"}
|
||||||
_, err = storage.buckets[threadIndex].PutCopy(storage.storageDir + to, s3.Private, options, storage.buckets[threadIndex].Name + "/" + storage.storageDir + from)
|
_, err = storage.buckets[threadIndex].PutCopy(storage.storageDir+to, s3.Private, options, storage.buckets[threadIndex].Name+"/"+storage.storageDir+from)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage.DeleteFile(threadIndex, from)
|
return storage.DeleteFile(threadIndex, from)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *S3CStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
func (storage *S3CStorage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *S3CStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *S3CStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
response, err := storage.buckets[threadIndex].Head(storage.storageDir + filePath, nil)
|
response, err := storage.buckets[threadIndex].Head(storage.storageDir+filePath, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(*s3.Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
|
if e, ok := err.(*s3.Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
} else {
|
} else {
|
||||||
return false, false, 0, err
|
return false, false, 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if response.StatusCode == 403 || response.StatusCode == 404 {
|
if response.StatusCode == 403 || response.StatusCode == 404 {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
} else {
|
} else {
|
||||||
return true, false, response.ContentLength, nil
|
return true, false, response.ContentLength, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||||
// the suffix '.fsl'.
|
// the suffix '.fsl'.
|
||||||
func (storage *S3CStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
func (storage *S3CStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||||
|
|
||||||
filePath = "chunks/" + chunkID
|
filePath = "chunks/" + chunkID
|
||||||
if isFossil {
|
if isFossil {
|
||||||
filePath += ".fsl"
|
filePath += ".fsl"
|
||||||
}
|
}
|
||||||
|
|
||||||
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
} else {
|
} else {
|
||||||
return filePath, exist, size, err
|
return filePath, exist, size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *S3CStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *S3CStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
readCloser, err := storage.buckets[threadIndex].GetReader(storage.storageDir + filePath)
|
readCloser, err := storage.buckets[threadIndex].GetReader(storage.storageDir + filePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer readCloser.Close()
|
defer readCloser.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit / len(storage.buckets))
|
_, err = RateLimitedCopy(chunk, readCloser, storage.DownloadRateLimit/len(storage.buckets))
|
||||||
return err
|
return err
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *S3CStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *S3CStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
options := s3.Options { }
|
options := s3.Options{}
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.buckets))
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.buckets))
|
||||||
return storage.buckets[threadIndex].PutReader(storage.storageDir + filePath, reader, int64(len(content)), "application/duplicacy", s3.Private, options)
|
return storage.buckets[threadIndex].PutReader(storage.storageDir+filePath, reader, int64(len(content)), "application/duplicacy", s3.Private, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *S3CStorage) IsCacheNeeded () (bool) { return true }
|
func (storage *S3CStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *S3CStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *S3CStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *S3CStorage) IsStrongConsistent() (bool) { return false }
|
func (storage *S3CStorage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *S3CStorage) IsFastListing() (bool) { return true }
|
func (storage *S3CStorage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *S3CStorage) EnableTestMode() {}
|
func (storage *S3CStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -5,266 +5,266 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"strings"
|
"reflect"
|
||||||
"reflect"
|
"strings"
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
"github.com/aws/aws-sdk-go/aws/awserr"
|
"github.com/aws/aws-sdk-go/aws/awserr"
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
)
|
)
|
||||||
|
|
||||||
type S3Storage struct {
|
type S3Storage struct {
|
||||||
RateLimitedStorage
|
RateLimitedStorage
|
||||||
|
|
||||||
client *s3.S3
|
client *s3.S3
|
||||||
bucket string
|
bucket string
|
||||||
storageDir string
|
storageDir string
|
||||||
numberOfThreads int
|
numberOfThreads int
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateS3Storage creates a amazon s3 storage object.
|
// CreateS3Storage creates a amazon s3 storage object.
|
||||||
func CreateS3Storage(regionName string, endpoint string, bucketName string, storageDir string,
|
func CreateS3Storage(regionName string, endpoint string, bucketName string, storageDir string,
|
||||||
accessKey string, secretKey string, threads int,
|
accessKey string, secretKey string, threads int,
|
||||||
isSSLSupported bool, isMinioCompatible bool) (storage *S3Storage, err error) {
|
isSSLSupported bool, isMinioCompatible bool) (storage *S3Storage, err error) {
|
||||||
|
|
||||||
token := ""
|
token := ""
|
||||||
|
|
||||||
auth := credentials.NewStaticCredentials(accessKey, secretKey, token)
|
|
||||||
|
|
||||||
if regionName == "" && endpoint == "" {
|
auth := credentials.NewStaticCredentials(accessKey, secretKey, token)
|
||||||
defaultRegionConfig := &aws.Config {
|
|
||||||
Region: aws.String("us-east-1"),
|
|
||||||
Credentials: auth,
|
|
||||||
}
|
|
||||||
|
|
||||||
s3Client := s3.New(session.New(defaultRegionConfig))
|
|
||||||
|
|
||||||
response, err := s3Client.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String(bucketName)})
|
if regionName == "" && endpoint == "" {
|
||||||
|
defaultRegionConfig := &aws.Config{
|
||||||
|
Region: aws.String("us-east-1"),
|
||||||
|
Credentials: auth,
|
||||||
|
}
|
||||||
|
|
||||||
if err != nil {
|
s3Client := s3.New(session.New(defaultRegionConfig))
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
regionName = "us-east-1"
|
|
||||||
if response.LocationConstraint != nil {
|
|
||||||
regionName = *response.LocationConstraint
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
config := &aws.Config {
|
|
||||||
Region: aws.String(regionName),
|
|
||||||
Credentials: auth,
|
|
||||||
Endpoint: aws.String(endpoint),
|
|
||||||
S3ForcePathStyle: aws.Bool(isMinioCompatible),
|
|
||||||
DisableSSL: aws.Bool(!isSSLSupported),
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(storageDir) > 0 && storageDir[len(storageDir) - 1] != '/' {
|
|
||||||
storageDir += "/"
|
|
||||||
}
|
|
||||||
|
|
||||||
storage = &S3Storage {
|
response, err := s3Client.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String(bucketName)})
|
||||||
client: s3.New(session.New(config)),
|
|
||||||
bucket: bucketName,
|
if err != nil {
|
||||||
storageDir: storageDir,
|
return nil, err
|
||||||
numberOfThreads: threads,
|
}
|
||||||
}
|
|
||||||
|
regionName = "us-east-1"
|
||||||
return storage, nil
|
if response.LocationConstraint != nil {
|
||||||
|
regionName = *response.LocationConstraint
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config := &aws.Config{
|
||||||
|
Region: aws.String(regionName),
|
||||||
|
Credentials: auth,
|
||||||
|
Endpoint: aws.String(endpoint),
|
||||||
|
S3ForcePathStyle: aws.Bool(isMinioCompatible),
|
||||||
|
DisableSSL: aws.Bool(!isSSLSupported),
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(storageDir) > 0 && storageDir[len(storageDir)-1] != '/' {
|
||||||
|
storageDir += "/"
|
||||||
|
}
|
||||||
|
|
||||||
|
storage = &S3Storage{
|
||||||
|
client: s3.New(session.New(config)),
|
||||||
|
bucket: bucketName,
|
||||||
|
storageDir: storageDir,
|
||||||
|
numberOfThreads: threads,
|
||||||
|
}
|
||||||
|
|
||||||
|
return storage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
func (storage *S3Storage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {
|
||||||
if len(dir) > 0 && dir[len(dir) - 1] != '/' {
|
if len(dir) > 0 && dir[len(dir)-1] != '/' {
|
||||||
dir += "/"
|
dir += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
if dir == "snapshots/" {
|
if dir == "snapshots/" {
|
||||||
dir = storage.storageDir + dir
|
dir = storage.storageDir + dir
|
||||||
input := s3.ListObjectsInput {
|
input := s3.ListObjectsInput{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
Prefix: aws.String(dir),
|
Prefix: aws.String(dir),
|
||||||
Delimiter: aws.String("/"),
|
Delimiter: aws.String("/"),
|
||||||
MaxKeys: aws.Int64(1000),
|
MaxKeys: aws.Int64(1000),
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := storage.client.ListObjects(&input)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, subDir := range output.CommonPrefixes {
|
|
||||||
files = append(files, (*subDir.Prefix)[len(dir):])
|
|
||||||
}
|
|
||||||
return files, nil, nil
|
|
||||||
} else {
|
|
||||||
dir = storage.storageDir + dir
|
|
||||||
marker := ""
|
|
||||||
for {
|
|
||||||
input := s3.ListObjectsInput {
|
|
||||||
Bucket: aws.String(storage.bucket),
|
|
||||||
Prefix: aws.String(dir),
|
|
||||||
MaxKeys: aws.Int64(1000),
|
|
||||||
Marker: aws.String(marker),
|
|
||||||
}
|
|
||||||
|
|
||||||
output, err := storage.client.ListObjects(&input)
|
output, err := storage.client.ListObjects(&input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, object := range output.Contents {
|
for _, subDir := range output.CommonPrefixes {
|
||||||
files = append(files, (*object.Key)[len(dir):])
|
files = append(files, (*subDir.Prefix)[len(dir):])
|
||||||
sizes = append(sizes, *object.Size)
|
}
|
||||||
}
|
return files, nil, nil
|
||||||
|
} else {
|
||||||
|
dir = storage.storageDir + dir
|
||||||
|
marker := ""
|
||||||
|
for {
|
||||||
|
input := s3.ListObjectsInput{
|
||||||
|
Bucket: aws.String(storage.bucket),
|
||||||
|
Prefix: aws.String(dir),
|
||||||
|
MaxKeys: aws.Int64(1000),
|
||||||
|
Marker: aws.String(marker),
|
||||||
|
}
|
||||||
|
|
||||||
if !*output.IsTruncated {
|
output, err := storage.client.ListObjects(&input)
|
||||||
break
|
if err != nil {
|
||||||
}
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
marker = *output.Contents[len(output.Contents) - 1].Key
|
for _, object := range output.Contents {
|
||||||
}
|
files = append(files, (*object.Key)[len(dir):])
|
||||||
return files, sizes, nil
|
sizes = append(sizes, *object.Size)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !*output.IsTruncated {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
marker = *output.Contents[len(output.Contents)-1].Key
|
||||||
|
}
|
||||||
|
return files, sizes, nil
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *S3Storage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *S3Storage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
input := &s3.DeleteObjectInput {
|
input := &s3.DeleteObjectInput{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
Key: aws.String(storage.storageDir + filePath),
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
}
|
}
|
||||||
_, err = storage.client.DeleteObject(input)
|
_, err = storage.client.DeleteObject(input)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *S3Storage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *S3Storage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
|
|
||||||
input := &s3.CopyObjectInput {
|
input := &s3.CopyObjectInput{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
CopySource: aws.String(storage.bucket + "/" + storage.storageDir + from),
|
CopySource: aws.String(storage.bucket + "/" + storage.storageDir + from),
|
||||||
Key: aws.String(storage.storageDir + to),
|
Key: aws.String(storage.storageDir + to),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = storage.client.CopyObject(input)
|
_, err = storage.client.CopyObject(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return storage.DeleteFile(threadIndex, from)
|
return storage.DeleteFile(threadIndex, from)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *S3Storage) CreateDirectory(threadIndex int, dir string) (err error) {
|
func (storage *S3Storage) CreateDirectory(threadIndex int, dir string) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *S3Storage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *S3Storage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
|
|
||||||
input := &s3.HeadObjectInput {
|
input := &s3.HeadObjectInput{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
Key: aws.String(storage.storageDir + filePath),
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := storage.client.HeadObject(input)
|
output, err := storage.client.HeadObject(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(awserr.RequestFailure); ok && (e.StatusCode() == 403 || e.StatusCode() == 404) {
|
if e, ok := err.(awserr.RequestFailure); ok && (e.StatusCode() == 403 || e.StatusCode() == 404) {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
} else {
|
} else {
|
||||||
return false, false, 0, err
|
return false, false, 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if output == nil || output.ContentLength == nil {
|
if output == nil || output.ContentLength == nil {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
} else {
|
} else {
|
||||||
return true, false, *output.ContentLength, nil
|
return true, false, *output.ContentLength, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||||
// the suffix '.fsl'.
|
// the suffix '.fsl'.
|
||||||
func (storage *S3Storage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
func (storage *S3Storage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||||
|
|
||||||
filePath = "chunks/" + chunkID
|
filePath = "chunks/" + chunkID
|
||||||
if isFossil {
|
if isFossil {
|
||||||
filePath += ".fsl"
|
filePath += ".fsl"
|
||||||
}
|
}
|
||||||
|
|
||||||
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
exist, _, size, err = storage.GetFileInfo(threadIndex, filePath)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", false, 0, err
|
return "", false, 0, err
|
||||||
} else {
|
} else {
|
||||||
return filePath, exist, size, err
|
return filePath, exist, size, err
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *S3Storage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
|
|
||||||
input := &s3.GetObjectInput {
|
input := &s3.GetObjectInput{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
Key: aws.String(storage.storageDir + filePath),
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
}
|
}
|
||||||
|
|
||||||
output, err := storage.client.GetObject(input)
|
output, err := storage.client.GetObject(input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer output.Body.Close()
|
defer output.Body.Close()
|
||||||
|
|
||||||
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit / len(storage.bucket))
|
_, err = RateLimitedCopy(chunk, output.Body, storage.DownloadRateLimit/len(storage.bucket))
|
||||||
return err
|
return err
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *S3Storage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
attempts := 0
|
attempts := 0
|
||||||
|
|
||||||
for {
|
for {
|
||||||
input := &s3.PutObjectInput {
|
input := &s3.PutObjectInput{
|
||||||
Bucket: aws.String(storage.bucket),
|
Bucket: aws.String(storage.bucket),
|
||||||
Key: aws.String(storage.storageDir + filePath),
|
Key: aws.String(storage.storageDir + filePath),
|
||||||
ACL: aws.String(s3.ObjectCannedACLPrivate),
|
ACL: aws.String(s3.ObjectCannedACLPrivate),
|
||||||
Body: CreateRateLimitedReader(content, storage.UploadRateLimit / len(storage.bucket)),
|
Body: CreateRateLimitedReader(content, storage.UploadRateLimit/len(storage.bucket)),
|
||||||
ContentType: aws.String("application/duplicacy"),
|
ContentType: aws.String("application/duplicacy"),
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = storage.client.PutObject(input)
|
_, err = storage.client.PutObject(input)
|
||||||
if err == nil || attempts >= 3 || !strings.Contains(err.Error(), "XAmzContentSHA256Mismatch") {
|
if err == nil || attempts >= 3 || !strings.Contains(err.Error(), "XAmzContentSHA256Mismatch") {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_INFO("S3_RETRY", "Retrying on %s: %v", reflect.TypeOf(err), err)
|
LOG_INFO("S3_RETRY", "Retrying on %s: %v", reflect.TypeOf(err), err)
|
||||||
attempts += 1
|
attempts += 1
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *S3Storage) IsCacheNeeded () (bool) { return true }
|
func (storage *S3Storage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *S3Storage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *S3Storage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *S3Storage) IsStrongConsistent() (bool) { return false }
|
func (storage *S3Storage) IsStrongConsistent() bool { return false }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *S3Storage) IsFastListing() (bool) { return true }
|
func (storage *S3Storage) IsFastListing() bool { return true }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *S3Storage) EnableTestMode() {}
|
func (storage *S3Storage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -5,305 +5,304 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"path"
|
"os"
|
||||||
"time"
|
"path"
|
||||||
"runtime"
|
"runtime"
|
||||||
"math/rand"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/crypto/ssh"
|
"github.com/pkg/sftp"
|
||||||
"github.com/pkg/sftp"
|
"golang.org/x/crypto/ssh"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SFTPStorage struct {
|
type SFTPStorage struct {
|
||||||
RateLimitedStorage
|
RateLimitedStorage
|
||||||
|
|
||||||
client *sftp.Client
|
client *sftp.Client
|
||||||
storageDir string
|
storageDir string
|
||||||
numberOfThreads int
|
numberOfThreads int
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateSFTPStorageWithPassword(server string, port int, username string, storageDir string,
|
func CreateSFTPStorageWithPassword(server string, port int, username string, storageDir string,
|
||||||
password string, threads int) (storage *SFTPStorage, err error) {
|
password string, threads int) (storage *SFTPStorage, err error) {
|
||||||
|
|
||||||
authMethods := [] ssh.AuthMethod { ssh.Password(password) }
|
authMethods := []ssh.AuthMethod{ssh.Password(password)}
|
||||||
|
|
||||||
|
hostKeyCallback := func(hostname string, remote net.Addr,
|
||||||
|
key ssh.PublicKey) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
hostKeyCallback := func(hostname string, remote net.Addr,
|
return CreateSFTPStorage(server, port, username, storageDir, authMethods, hostKeyCallback, threads)
|
||||||
key ssh.PublicKey) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return CreateSFTPStorage(server, port, username, storageDir, authMethods, hostKeyCallback, threads)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateSFTPStorage(server string, port int, username string, storageDir string,
|
func CreateSFTPStorage(server string, port int, username string, storageDir string,
|
||||||
authMethods [] ssh.AuthMethod,
|
authMethods []ssh.AuthMethod,
|
||||||
hostKeyCallback func(hostname string, remote net.Addr,
|
hostKeyCallback func(hostname string, remote net.Addr,
|
||||||
key ssh.PublicKey) error, threads int) (storage *SFTPStorage, err error) {
|
key ssh.PublicKey) error, threads int) (storage *SFTPStorage, err error) {
|
||||||
|
|
||||||
config := &ssh.ClientConfig{
|
config := &ssh.ClientConfig{
|
||||||
User: username,
|
User: username,
|
||||||
Auth: authMethods,
|
Auth: authMethods,
|
||||||
HostKeyCallback: hostKeyCallback,
|
HostKeyCallback: hostKeyCallback,
|
||||||
}
|
}
|
||||||
|
|
||||||
if server == "sftp.hidrive.strato.com" {
|
if server == "sftp.hidrive.strato.com" {
|
||||||
config.Ciphers = []string {"aes128-cbc", "aes128-ctr", "aes256-ctr"}
|
config.Ciphers = []string{"aes128-cbc", "aes128-ctr", "aes256-ctr"}
|
||||||
}
|
}
|
||||||
|
|
||||||
serverAddress := fmt.Sprintf("%s:%d", server, port)
|
serverAddress := fmt.Sprintf("%s:%d", server, port)
|
||||||
connection, err := ssh.Dial("tcp", serverAddress, config)
|
connection, err := ssh.Dial("tcp", serverAddress, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := sftp.NewClient(connection)
|
client, err := sftp.NewClient(connection)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
connection.Close()
|
connection.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for storageDir[len(storageDir) - 1] == '/' {
|
for storageDir[len(storageDir)-1] == '/' {
|
||||||
storageDir = storageDir[:len(storageDir) - 1]
|
storageDir = storageDir[:len(storageDir)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
fileInfo, err := client.Stat(storageDir)
|
fileInfo, err := client.Stat(storageDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("Can't access the storage path %s: %v", storageDir, err)
|
return nil, fmt.Errorf("Can't access the storage path %s: %v", storageDir, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !fileInfo.IsDir() {
|
if !fileInfo.IsDir() {
|
||||||
return nil, fmt.Errorf("The storage path %s is not a directory", storageDir)
|
return nil, fmt.Errorf("The storage path %s is not a directory", storageDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
storage = &SFTPStorage {
|
storage = &SFTPStorage{
|
||||||
client: client,
|
client: client,
|
||||||
storageDir: storageDir,
|
storageDir: storageDir,
|
||||||
numberOfThreads: threads,
|
numberOfThreads: threads,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Random number fo generating the temporary chunk file suffix.
|
// Random number fo generating the temporary chunk file suffix.
|
||||||
rand.Seed(time.Now().UnixNano())
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
|
||||||
runtime.SetFinalizer(storage, CloseSFTPStorage)
|
runtime.SetFinalizer(storage, CloseSFTPStorage)
|
||||||
|
|
||||||
return storage, nil
|
return storage, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func CloseSFTPStorage(storage *SFTPStorage) {
|
func CloseSFTPStorage(storage *SFTPStorage) {
|
||||||
storage.client.Close()
|
storage.client.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListFiles return the list of files and subdirectories under 'file' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'file' (non-recursively)
|
||||||
func (storage *SFTPStorage) ListFiles(threadIndex int, dirPath string) (files []string, sizes []int64, err error) {
|
func (storage *SFTPStorage) ListFiles(threadIndex int, dirPath string) (files []string, sizes []int64, err error) {
|
||||||
|
|
||||||
entries, err := storage.client.ReadDir(path.Join(storage.storageDir, dirPath))
|
entries, err := storage.client.ReadDir(path.Join(storage.storageDir, dirPath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
name := entry.Name()
|
name := entry.Name()
|
||||||
if entry.IsDir() && name[len(name) - 1] != '/' {
|
if entry.IsDir() && name[len(name)-1] != '/' {
|
||||||
name += "/"
|
name += "/"
|
||||||
}
|
}
|
||||||
|
|
||||||
files = append(files, name)
|
files = append(files, name)
|
||||||
sizes = append(sizes, entry.Size())
|
sizes = append(sizes, entry.Size())
|
||||||
}
|
}
|
||||||
|
|
||||||
return files, sizes, nil
|
return files, sizes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
func (storage *SFTPStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
func (storage *SFTPStorage) DeleteFile(threadIndex int, filePath string) (err error) {
|
||||||
fullPath := path.Join(storage.storageDir, filePath)
|
fullPath := path.Join(storage.storageDir, filePath)
|
||||||
fileInfo, err := storage.client.Stat(fullPath)
|
fileInfo, err := storage.client.Stat(fullPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
LOG_TRACE("SFTP_STORAGE", "File %s has disappeared before deletion", filePath)
|
LOG_TRACE("SFTP_STORAGE", "File %s has disappeared before deletion", filePath)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if fileInfo == nil {
|
if fileInfo == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return storage.client.Remove(path.Join(storage.storageDir, filePath))
|
return storage.client.Remove(path.Join(storage.storageDir, filePath))
|
||||||
}
|
}
|
||||||
|
|
||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
func (storage *SFTPStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
func (storage *SFTPStorage) MoveFile(threadIndex int, from string, to string) (err error) {
|
||||||
toPath := path.Join(storage.storageDir, to)
|
toPath := path.Join(storage.storageDir, to)
|
||||||
fileInfo, err := storage.client.Stat(toPath)
|
fileInfo, err := storage.client.Stat(toPath)
|
||||||
if fileInfo != nil {
|
if fileInfo != nil {
|
||||||
return fmt.Errorf("The destination file %s already exists", toPath)
|
return fmt.Errorf("The destination file %s already exists", toPath)
|
||||||
}
|
}
|
||||||
return storage.client.Rename(path.Join(storage.storageDir, from),
|
return storage.client.Rename(path.Join(storage.storageDir, from),
|
||||||
path.Join(storage.storageDir, to))
|
path.Join(storage.storageDir, to))
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
func (storage *SFTPStorage) CreateDirectory(threadIndex int, dirPath string) (err error) {
|
func (storage *SFTPStorage) CreateDirectory(threadIndex int, dirPath string) (err error) {
|
||||||
fullPath := path.Join(storage.storageDir, dirPath)
|
fullPath := path.Join(storage.storageDir, dirPath)
|
||||||
fileInfo, err := storage.client.Stat(fullPath)
|
fileInfo, err := storage.client.Stat(fullPath)
|
||||||
if fileInfo != nil && fileInfo.IsDir() {
|
if fileInfo != nil && fileInfo.IsDir() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return storage.client.Mkdir(path.Join(storage.storageDir, dirPath))
|
return storage.client.Mkdir(path.Join(storage.storageDir, dirPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
func (storage *SFTPStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
func (storage *SFTPStorage) GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error) {
|
||||||
fileInfo, err := storage.client.Stat(path.Join(storage.storageDir, filePath))
|
fileInfo, err := storage.client.Stat(path.Join(storage.storageDir, filePath))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
} else {
|
} else {
|
||||||
return false, false, 0, err
|
return false, false, 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if fileInfo == nil {
|
if fileInfo == nil {
|
||||||
return false, false, 0, nil
|
return false, false, 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, fileInfo.IsDir(), fileInfo.Size(), nil
|
return true, fileInfo.IsDir(), fileInfo.Size(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||||
// the suffix '.fsl'.
|
// the suffix '.fsl'.
|
||||||
func (storage *SFTPStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
func (storage *SFTPStorage) FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error) {
|
||||||
dir := path.Join(storage.storageDir, "chunks")
|
dir := path.Join(storage.storageDir, "chunks")
|
||||||
|
|
||||||
suffix := ""
|
suffix := ""
|
||||||
if isFossil {
|
if isFossil {
|
||||||
suffix = ".fsl"
|
suffix = ".fsl"
|
||||||
}
|
}
|
||||||
|
|
||||||
// The minimum level of directories to dive into before searching for the chunk file.
|
// The minimum level of directories to dive into before searching for the chunk file.
|
||||||
minimumLevel := 2
|
minimumLevel := 2
|
||||||
|
|
||||||
for level := 0; level * 2 < len(chunkID); level ++ {
|
for level := 0; level*2 < len(chunkID); level++ {
|
||||||
if level >= minimumLevel {
|
if level >= minimumLevel {
|
||||||
filePath = path.Join(dir, chunkID[2 * level:]) + suffix
|
filePath = path.Join(dir, chunkID[2*level:]) + suffix
|
||||||
if stat, err := storage.client.Stat(filePath); err == nil && !stat.IsDir() {
|
if stat, err := storage.client.Stat(filePath); err == nil && !stat.IsDir() {
|
||||||
return filePath[len(storage.storageDir) + 1:], true, stat.Size(), nil
|
return filePath[len(storage.storageDir)+1:], true, stat.Size(), nil
|
||||||
} else if err == nil && stat.IsDir() {
|
} else if err == nil && stat.IsDir() {
|
||||||
return filePath[len(storage.storageDir) + 1:], true, 0, fmt.Errorf("The path %s is a directory", filePath)
|
return filePath[len(storage.storageDir)+1:], true, 0, fmt.Errorf("The path %s is a directory", filePath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find the subdirectory the chunk file may reside.
|
// Find the subdirectory the chunk file may reside.
|
||||||
subDir := path.Join(dir, chunkID[2 * level: 2 * level + 2])
|
subDir := path.Join(dir, chunkID[2*level:2*level+2])
|
||||||
stat, err := storage.client.Stat(subDir)
|
stat, err := storage.client.Stat(subDir)
|
||||||
if err == nil && stat.IsDir() {
|
if err == nil && stat.IsDir() {
|
||||||
dir = subDir
|
dir = subDir
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if level < minimumLevel {
|
if level < minimumLevel {
|
||||||
// Create the subdirectory if is doesn't exist.
|
// Create the subdirectory if is doesn't exist.
|
||||||
|
|
||||||
if err == nil && !stat.IsDir() {
|
if err == nil && !stat.IsDir() {
|
||||||
return "", false, 0, fmt.Errorf("The path %s is not a directory", subDir)
|
return "", false, 0, fmt.Errorf("The path %s is not a directory", subDir)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storage.client.Mkdir(subDir)
|
err = storage.client.Mkdir(subDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// The directory may have been created by other threads so check it again.
|
// The directory may have been created by other threads so check it again.
|
||||||
stat, _ := storage.client.Stat(subDir)
|
stat, _ := storage.client.Stat(subDir)
|
||||||
if stat == nil || !stat.IsDir() {
|
if stat == nil || !stat.IsDir() {
|
||||||
return "", false, 0, fmt.Errorf("Failed to create the directory %s: %v", subDir, err)
|
return "", false, 0, fmt.Errorf("Failed to create the directory %s: %v", subDir, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dir = subDir
|
dir = subDir
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Teh chunk must be under this subdirectory but it doesn't exist.
|
// Teh chunk must be under this subdirectory but it doesn't exist.
|
||||||
return path.Join(dir, chunkID[2 * level:])[len(storage.storageDir) + 1:] + suffix, false, 0, nil
|
return path.Join(dir, chunkID[2*level:])[len(storage.storageDir)+1:] + suffix, false, 0, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_FATAL("CHUNK_FIND", "Chunk %s is still not found after having searched a maximum level of directories",
|
LOG_FATAL("CHUNK_FIND", "Chunk %s is still not found after having searched a maximum level of directories",
|
||||||
chunkID)
|
chunkID)
|
||||||
return "", false, 0, nil
|
return "", false, 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
func (storage *SFTPStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
func (storage *SFTPStorage) DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error) {
|
||||||
file, err := storage.client.Open(path.Join(storage.storageDir, filePath))
|
file, err := storage.client.Open(path.Join(storage.storageDir, filePath))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit / storage.numberOfThreads); err != nil {
|
if _, err = RateLimitedCopy(chunk, file, storage.DownloadRateLimit/storage.numberOfThreads); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
func (storage *SFTPStorage) UploadFile(threadIndex int, filePath string, content []byte) (err error) {
|
||||||
|
|
||||||
fullPath := path.Join(storage.storageDir, filePath)
|
fullPath := path.Join(storage.storageDir, filePath)
|
||||||
|
|
||||||
letters := "abcdefghijklmnopqrstuvwxyz"
|
letters := "abcdefghijklmnopqrstuvwxyz"
|
||||||
suffix := make([]byte, 8)
|
suffix := make([]byte, 8)
|
||||||
for i := range suffix {
|
for i := range suffix {
|
||||||
suffix[i] = letters[rand.Intn(len(letters))]
|
suffix[i] = letters[rand.Intn(len(letters))]
|
||||||
}
|
}
|
||||||
|
|
||||||
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
|
temporaryFile := fullPath + "." + string(suffix) + ".tmp"
|
||||||
|
|
||||||
file, err := storage.client.OpenFile(temporaryFile, os.O_WRONLY | os.O_CREATE | os.O_TRUNC)
|
file, err := storage.client.OpenFile(temporaryFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
reader := CreateRateLimitedReader(content, storage.UploadRateLimit / storage.numberOfThreads)
|
reader := CreateRateLimitedReader(content, storage.UploadRateLimit/storage.numberOfThreads)
|
||||||
_, err = io.Copy(file, reader)
|
_, err = io.Copy(file, reader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
file.Close()
|
file.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
file.Close()
|
file.Close()
|
||||||
|
|
||||||
err = storage.client.Rename(temporaryFile, fullPath)
|
err = storage.client.Rename(temporaryFile, fullPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
||||||
if _, err = storage.client.Stat(fullPath); err == nil {
|
if _, err = storage.client.Stat(fullPath); err == nil {
|
||||||
storage.client.Remove(temporaryFile)
|
storage.client.Remove(temporaryFile)
|
||||||
return nil
|
return nil
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("Uploaded file but failed to store it at %s: %v", fullPath, err)
|
return fmt.Errorf("Uploaded file but failed to store it at %s: %v", fullPath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
func (storage *SFTPStorage) IsCacheNeeded () (bool) { return true }
|
func (storage *SFTPStorage) IsCacheNeeded() bool { return true }
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
func (storage *SFTPStorage) IsMoveFileImplemented() (bool) { return true }
|
func (storage *SFTPStorage) IsMoveFileImplemented() bool { return true }
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
func (storage *SFTPStorage) IsStrongConsistent() (bool) { return true }
|
func (storage *SFTPStorage) IsStrongConsistent() bool { return true }
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
func (storage *SFTPStorage) IsFastListing() (bool) { return false }
|
func (storage *SFTPStorage) IsFastListing() bool { return false }
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
func (storage *SFTPStorage) EnableTestMode() {}
|
func (storage *SFTPStorage) EnableTestMode() {}
|
||||||
|
|||||||
@@ -7,7 +7,7 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
|
func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
func DeleteShadowCopy() {}
|
func DeleteShadowCopy() {}
|
||||||
|
|||||||
@@ -5,327 +5,325 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"syscall"
|
"os"
|
||||||
"unsafe"
|
"runtime"
|
||||||
"time"
|
"syscall"
|
||||||
"os"
|
"time"
|
||||||
"runtime"
|
"unsafe"
|
||||||
|
|
||||||
ole "github.com/gilbertchen/go-ole"
|
ole "github.com/gilbertchen/go-ole"
|
||||||
)
|
)
|
||||||
|
|
||||||
//507C37B4-CF5B-4e95-B0AF-14EB9767467E
|
//507C37B4-CF5B-4e95-B0AF-14EB9767467E
|
||||||
var IID_IVSS_ASYNC = &ole.GUID{0x507C37B4, 0xCF5B, 0x4e95, [8]byte{0xb0, 0xaf, 0x14, 0xeb, 0x97, 0x67, 0x46, 0x7e}}
|
var IID_IVSS_ASYNC = &ole.GUID{0x507C37B4, 0xCF5B, 0x4e95, [8]byte{0xb0, 0xaf, 0x14, 0xeb, 0x97, 0x67, 0x46, 0x7e}}
|
||||||
|
|
||||||
type IVSSAsync struct {
|
type IVSSAsync struct {
|
||||||
ole.IUnknown
|
ole.IUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
type IVSSAsyncVtbl struct {
|
type IVSSAsyncVtbl struct {
|
||||||
ole.IUnknownVtbl
|
ole.IUnknownVtbl
|
||||||
cancel uintptr
|
cancel uintptr
|
||||||
wait uintptr
|
wait uintptr
|
||||||
queryStatus uintptr
|
queryStatus uintptr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (async *IVSSAsync) VTable() * IVSSAsyncVtbl {
|
func (async *IVSSAsync) VTable() *IVSSAsyncVtbl {
|
||||||
return (*IVSSAsyncVtbl)(unsafe.Pointer(async.RawVTable))
|
return (*IVSSAsyncVtbl)(unsafe.Pointer(async.RawVTable))
|
||||||
}
|
}
|
||||||
|
|
||||||
var VSS_S_ASYNC_PENDING int32 = 0x00042309
|
var VSS_S_ASYNC_PENDING int32 = 0x00042309
|
||||||
var VSS_S_ASYNC_FINISHED int32 = 0x0004230A
|
var VSS_S_ASYNC_FINISHED int32 = 0x0004230A
|
||||||
var VSS_S_ASYNC_CANCELLED int32 = 0x0004230B
|
var VSS_S_ASYNC_CANCELLED int32 = 0x0004230B
|
||||||
|
|
||||||
func (async *IVSSAsync) Wait(seconds int) bool {
|
func (async *IVSSAsync) Wait(seconds int) bool {
|
||||||
|
|
||||||
startTime := time.Now().Unix()
|
startTime := time.Now().Unix()
|
||||||
for {
|
for {
|
||||||
ret, _, _ := syscall.Syscall(async.VTable().wait, 2, uintptr(unsafe.Pointer(async)), uintptr(1000), 0)
|
ret, _, _ := syscall.Syscall(async.VTable().wait, 2, uintptr(unsafe.Pointer(async)), uintptr(1000), 0)
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
LOG_WARN("IVSSASYNC_WAIT", "IVssAsync::Wait returned %d\n", ret)
|
LOG_WARN("IVSSASYNC_WAIT", "IVssAsync::Wait returned %d\n", ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
var status int32
|
var status int32
|
||||||
ret, _, _ = syscall.Syscall(async.VTable().queryStatus, 3, uintptr(unsafe.Pointer(async)),
|
ret, _, _ = syscall.Syscall(async.VTable().queryStatus, 3, uintptr(unsafe.Pointer(async)),
|
||||||
uintptr(unsafe.Pointer(&status)), 0)
|
uintptr(unsafe.Pointer(&status)), 0)
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
LOG_WARN("IVSSASYNC_QUERY", "IVssAsync::QueryStatus returned %d\n", ret)
|
LOG_WARN("IVSSASYNC_QUERY", "IVssAsync::QueryStatus returned %d\n", ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
if status == VSS_S_ASYNC_FINISHED {
|
if status == VSS_S_ASYNC_FINISHED {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if time.Now().Unix() - startTime > int64(seconds) {
|
if time.Now().Unix()-startTime > int64(seconds) {
|
||||||
LOG_WARN("IVSSASYNC_TIMEOUT", "IVssAsync is pending for more than %d seconds\n", seconds)
|
LOG_WARN("IVSSASYNC_TIMEOUT", "IVssAsync is pending for more than %d seconds\n", seconds)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getIVSSAsync(unknown *ole.IUnknown, iid *ole.GUID) (async *IVSSAsync) {
|
func getIVSSAsync(unknown *ole.IUnknown, iid *ole.GUID) (async *IVSSAsync) {
|
||||||
r, _, _ := syscall.Syscall(
|
r, _, _ := syscall.Syscall(
|
||||||
unknown.VTable().QueryInterface,
|
unknown.VTable().QueryInterface,
|
||||||
3,
|
3,
|
||||||
uintptr(unsafe.Pointer(unknown)),
|
uintptr(unsafe.Pointer(unknown)),
|
||||||
uintptr(unsafe.Pointer(iid)),
|
uintptr(unsafe.Pointer(iid)),
|
||||||
uintptr(unsafe.Pointer(&async)))
|
uintptr(unsafe.Pointer(&async)))
|
||||||
|
|
||||||
if r != 0 {
|
if r != 0 {
|
||||||
LOG_WARN("IVSSASYNC_QUERY", "IVSSAsync::QueryInterface returned %d\n", r)
|
LOG_WARN("IVSSASYNC_QUERY", "IVSSAsync::QueryInterface returned %d\n", r)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//665c1d5f-c218-414d-a05d-7fef5f9d5c86
|
//665c1d5f-c218-414d-a05d-7fef5f9d5c86
|
||||||
var IID_IVSS = &ole.GUID{0x665c1d5f, 0xc218, 0x414d, [8]byte{0xa0, 0x5d, 0x7f, 0xef, 0x5f, 0x9d, 0x5c, 0x86}}
|
var IID_IVSS = &ole.GUID{0x665c1d5f, 0xc218, 0x414d, [8]byte{0xa0, 0x5d, 0x7f, 0xef, 0x5f, 0x9d, 0x5c, 0x86}}
|
||||||
|
|
||||||
type IVSS struct {
|
type IVSS struct {
|
||||||
ole.IUnknown
|
ole.IUnknown
|
||||||
}
|
}
|
||||||
|
|
||||||
type IVSSVtbl struct {
|
type IVSSVtbl struct {
|
||||||
ole.IUnknownVtbl
|
ole.IUnknownVtbl
|
||||||
getWriterComponentsCount uintptr
|
getWriterComponentsCount uintptr
|
||||||
getWriterComponents uintptr
|
getWriterComponents uintptr
|
||||||
initializeForBackup uintptr
|
initializeForBackup uintptr
|
||||||
setBackupState uintptr
|
setBackupState uintptr
|
||||||
initializeForRestore uintptr
|
initializeForRestore uintptr
|
||||||
setRestoreState uintptr
|
setRestoreState uintptr
|
||||||
gatherWriterMetadata uintptr
|
gatherWriterMetadata uintptr
|
||||||
getWriterMetadataCount uintptr
|
getWriterMetadataCount uintptr
|
||||||
getWriterMetadata uintptr
|
getWriterMetadata uintptr
|
||||||
freeWriterMetadata uintptr
|
freeWriterMetadata uintptr
|
||||||
addComponent uintptr
|
addComponent uintptr
|
||||||
prepareForBackup uintptr
|
prepareForBackup uintptr
|
||||||
abortBackup uintptr
|
abortBackup uintptr
|
||||||
gatherWriterStatus uintptr
|
gatherWriterStatus uintptr
|
||||||
getWriterStatusCount uintptr
|
getWriterStatusCount uintptr
|
||||||
freeWriterStatus uintptr
|
freeWriterStatus uintptr
|
||||||
getWriterStatus uintptr
|
getWriterStatus uintptr
|
||||||
setBackupSucceeded uintptr
|
setBackupSucceeded uintptr
|
||||||
setBackupOptions uintptr
|
setBackupOptions uintptr
|
||||||
setSelectedForRestore uintptr
|
setSelectedForRestore uintptr
|
||||||
setRestoreOptions uintptr
|
setRestoreOptions uintptr
|
||||||
setAdditionalRestores uintptr
|
setAdditionalRestores uintptr
|
||||||
setPreviousBackupStamp uintptr
|
setPreviousBackupStamp uintptr
|
||||||
saveAsXML uintptr
|
saveAsXML uintptr
|
||||||
backupComplete uintptr
|
backupComplete uintptr
|
||||||
addAlternativeLocationMapping uintptr
|
addAlternativeLocationMapping uintptr
|
||||||
addRestoreSubcomponent uintptr
|
addRestoreSubcomponent uintptr
|
||||||
setFileRestoreStatus uintptr
|
setFileRestoreStatus uintptr
|
||||||
addNewTarget uintptr
|
addNewTarget uintptr
|
||||||
setRangesFilePath uintptr
|
setRangesFilePath uintptr
|
||||||
preRestore uintptr
|
preRestore uintptr
|
||||||
postRestore uintptr
|
postRestore uintptr
|
||||||
setContext uintptr
|
setContext uintptr
|
||||||
startSnapshotSet uintptr
|
startSnapshotSet uintptr
|
||||||
addToSnapshotSet uintptr
|
addToSnapshotSet uintptr
|
||||||
doSnapshotSet uintptr
|
doSnapshotSet uintptr
|
||||||
deleteSnapshots uintptr
|
deleteSnapshots uintptr
|
||||||
importSnapshots uintptr
|
importSnapshots uintptr
|
||||||
breakSnapshotSet uintptr
|
breakSnapshotSet uintptr
|
||||||
getSnapshotProperties uintptr
|
getSnapshotProperties uintptr
|
||||||
query uintptr
|
query uintptr
|
||||||
isVolumeSupported uintptr
|
isVolumeSupported uintptr
|
||||||
disableWriterClasses uintptr
|
disableWriterClasses uintptr
|
||||||
enableWriterClasses uintptr
|
enableWriterClasses uintptr
|
||||||
disableWriterInstances uintptr
|
disableWriterInstances uintptr
|
||||||
exposeSnapshot uintptr
|
exposeSnapshot uintptr
|
||||||
revertToSnapshot uintptr
|
revertToSnapshot uintptr
|
||||||
queryRevertStatus uintptr
|
queryRevertStatus uintptr
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vss *IVSS) VTable() * IVSSVtbl {
|
func (vss *IVSS) VTable() *IVSSVtbl {
|
||||||
return (*IVSSVtbl)(unsafe.Pointer(vss.RawVTable))
|
return (*IVSSVtbl)(unsafe.Pointer(vss.RawVTable))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vss *IVSS) InitializeForBackup() int {
|
func (vss *IVSS) InitializeForBackup() int {
|
||||||
ret, _, _ := syscall.Syscall(vss.VTable().initializeForBackup, 2, uintptr(unsafe.Pointer(vss)), 0, 0)
|
ret, _, _ := syscall.Syscall(vss.VTable().initializeForBackup, 2, uintptr(unsafe.Pointer(vss)), 0, 0)
|
||||||
return int(ret)
|
return int(ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vss *IVSS) GatherWriterMetadata() (int, *IVSSAsync) {
|
func (vss *IVSS) GatherWriterMetadata() (int, *IVSSAsync) {
|
||||||
var unknown *ole.IUnknown
|
var unknown *ole.IUnknown
|
||||||
ret, _, _ := syscall.Syscall(vss.VTable().gatherWriterMetadata, 2,
|
ret, _, _ := syscall.Syscall(vss.VTable().gatherWriterMetadata, 2,
|
||||||
uintptr(unsafe.Pointer(vss)),
|
uintptr(unsafe.Pointer(vss)),
|
||||||
uintptr(unsafe.Pointer(&unknown)), 0)
|
uintptr(unsafe.Pointer(&unknown)), 0)
|
||||||
|
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
return int(ret), nil
|
return int(ret), nil
|
||||||
} else {
|
} else {
|
||||||
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
|
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vss *IVSS) StartSnapshotSet(snapshotID *ole.GUID) int {
|
func (vss *IVSS) StartSnapshotSet(snapshotID *ole.GUID) int {
|
||||||
ret, _, _ := syscall.Syscall(vss.VTable().startSnapshotSet, 2,
|
ret, _, _ := syscall.Syscall(vss.VTable().startSnapshotSet, 2,
|
||||||
uintptr(unsafe.Pointer(vss)),
|
uintptr(unsafe.Pointer(vss)),
|
||||||
uintptr(unsafe.Pointer(snapshotID)), 0)
|
uintptr(unsafe.Pointer(snapshotID)), 0)
|
||||||
return int(ret)
|
return int(ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vss *IVSS) AddToSnapshotSet(drive string, snapshotID *ole.GUID) int {
|
func (vss *IVSS) AddToSnapshotSet(drive string, snapshotID *ole.GUID) int {
|
||||||
|
|
||||||
volumeName := syscall.StringToUTF16Ptr(drive)
|
volumeName := syscall.StringToUTF16Ptr(drive)
|
||||||
|
|
||||||
var ret uintptr
|
var ret uintptr
|
||||||
if runtime.GOARCH == "386" {
|
if runtime.GOARCH == "386" {
|
||||||
// On 32-bit Windows, GUID is passed by value
|
// On 32-bit Windows, GUID is passed by value
|
||||||
ret, _, _ = syscall.Syscall9(vss.VTable().addToSnapshotSet, 7,
|
ret, _, _ = syscall.Syscall9(vss.VTable().addToSnapshotSet, 7,
|
||||||
uintptr(unsafe.Pointer(vss)),
|
uintptr(unsafe.Pointer(vss)),
|
||||||
uintptr(unsafe.Pointer(volumeName)),
|
uintptr(unsafe.Pointer(volumeName)),
|
||||||
0, 0, 0, 0,
|
0, 0, 0, 0,
|
||||||
uintptr(unsafe.Pointer(snapshotID)), 0, 0)
|
uintptr(unsafe.Pointer(snapshotID)), 0, 0)
|
||||||
} else {
|
} else {
|
||||||
ret, _, _ = syscall.Syscall6(vss.VTable().addToSnapshotSet, 4,
|
ret, _, _ = syscall.Syscall6(vss.VTable().addToSnapshotSet, 4,
|
||||||
uintptr(unsafe.Pointer(vss)),
|
uintptr(unsafe.Pointer(vss)),
|
||||||
uintptr(unsafe.Pointer(volumeName)),
|
uintptr(unsafe.Pointer(volumeName)),
|
||||||
uintptr(unsafe.Pointer(ole.IID_NULL)),
|
uintptr(unsafe.Pointer(ole.IID_NULL)),
|
||||||
uintptr(unsafe.Pointer(snapshotID)), 0, 0)
|
uintptr(unsafe.Pointer(snapshotID)), 0, 0)
|
||||||
}
|
}
|
||||||
return int(ret)
|
return int(ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vss *IVSS) SetBackupState() int {
|
func (vss *IVSS) SetBackupState() int {
|
||||||
VSS_BT_COPY := 5
|
VSS_BT_COPY := 5
|
||||||
ret, _, _ := syscall.Syscall6(vss.VTable().setBackupState, 4,
|
ret, _, _ := syscall.Syscall6(vss.VTable().setBackupState, 4,
|
||||||
uintptr(unsafe.Pointer(vss)),
|
uintptr(unsafe.Pointer(vss)),
|
||||||
0, 0, uintptr(VSS_BT_COPY), 0, 0)
|
0, 0, uintptr(VSS_BT_COPY), 0, 0)
|
||||||
return int(ret)
|
return int(ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vss *IVSS) PrepareForBackup() (int, *IVSSAsync) {
|
func (vss *IVSS) PrepareForBackup() (int, *IVSSAsync) {
|
||||||
var unknown *ole.IUnknown
|
var unknown *ole.IUnknown
|
||||||
ret, _, _ := syscall.Syscall(vss.VTable().prepareForBackup, 2,
|
ret, _, _ := syscall.Syscall(vss.VTable().prepareForBackup, 2,
|
||||||
uintptr(unsafe.Pointer(vss)),
|
uintptr(unsafe.Pointer(vss)),
|
||||||
uintptr(unsafe.Pointer(&unknown)), 0)
|
uintptr(unsafe.Pointer(&unknown)), 0)
|
||||||
|
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
return int(ret), nil
|
return int(ret), nil
|
||||||
} else {
|
} else {
|
||||||
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
|
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vss *IVSS) DoSnapshotSet() (int, *IVSSAsync) {
|
func (vss *IVSS) DoSnapshotSet() (int, *IVSSAsync) {
|
||||||
var unknown *ole.IUnknown
|
var unknown *ole.IUnknown
|
||||||
ret, _, _ := syscall.Syscall(vss.VTable().doSnapshotSet, 2,
|
ret, _, _ := syscall.Syscall(vss.VTable().doSnapshotSet, 2,
|
||||||
uintptr(unsafe.Pointer(vss)),
|
uintptr(unsafe.Pointer(vss)),
|
||||||
uintptr(unsafe.Pointer(&unknown)), 0)
|
uintptr(unsafe.Pointer(&unknown)), 0)
|
||||||
|
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
return int(ret), nil
|
return int(ret), nil
|
||||||
} else {
|
} else {
|
||||||
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
|
return int(ret), getIVSSAsync(unknown, IID_IVSS_ASYNC)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type SnapshotProperties struct {
|
type SnapshotProperties struct {
|
||||||
SnapshotID ole.GUID
|
SnapshotID ole.GUID
|
||||||
SnapshotSetID ole.GUID
|
SnapshotSetID ole.GUID
|
||||||
SnapshotsCount uint32
|
SnapshotsCount uint32
|
||||||
SnapshotDeviceObject *uint16
|
SnapshotDeviceObject *uint16
|
||||||
OriginalVolumeName *uint16
|
OriginalVolumeName *uint16
|
||||||
OriginatingMachine *uint16
|
OriginatingMachine *uint16
|
||||||
ServiceMachine *uint16
|
ServiceMachine *uint16
|
||||||
ExposedName *uint16
|
ExposedName *uint16
|
||||||
ExposedPath *uint16
|
ExposedPath *uint16
|
||||||
ProviderId ole.GUID
|
ProviderId ole.GUID
|
||||||
SnapshotAttributes uint32
|
SnapshotAttributes uint32
|
||||||
CreationTimestamp int64
|
CreationTimestamp int64
|
||||||
Status int
|
Status int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vss *IVSS) GetSnapshotProperties(snapshotSetID ole.GUID, properties *SnapshotProperties) (int) {
|
func (vss *IVSS) GetSnapshotProperties(snapshotSetID ole.GUID, properties *SnapshotProperties) int {
|
||||||
var ret uintptr
|
var ret uintptr
|
||||||
if runtime.GOARCH == "386" {
|
if runtime.GOARCH == "386" {
|
||||||
address := uint(uintptr(unsafe.Pointer(&snapshotSetID)))
|
address := uint(uintptr(unsafe.Pointer(&snapshotSetID)))
|
||||||
ret, _, _ = syscall.Syscall6(vss.VTable().getSnapshotProperties, 6,
|
ret, _, _ = syscall.Syscall6(vss.VTable().getSnapshotProperties, 6,
|
||||||
uintptr(unsafe.Pointer(vss)),
|
uintptr(unsafe.Pointer(vss)),
|
||||||
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address)))),
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address)))),
|
||||||
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 4)))),
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 4)))),
|
||||||
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 8)))),
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 8)))),
|
||||||
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 12)))),
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 12)))),
|
||||||
uintptr(unsafe.Pointer(properties)))
|
uintptr(unsafe.Pointer(properties)))
|
||||||
} else {
|
} else {
|
||||||
ret, _, _ = syscall.Syscall(vss.VTable().getSnapshotProperties, 3,
|
ret, _, _ = syscall.Syscall(vss.VTable().getSnapshotProperties, 3,
|
||||||
uintptr(unsafe.Pointer(vss)),
|
uintptr(unsafe.Pointer(vss)),
|
||||||
uintptr(unsafe.Pointer(&snapshotSetID)),
|
uintptr(unsafe.Pointer(&snapshotSetID)),
|
||||||
uintptr(unsafe.Pointer(properties)))
|
uintptr(unsafe.Pointer(properties)))
|
||||||
}
|
}
|
||||||
return int(ret)
|
return int(ret)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vss *IVSS) DeleteSnapshots(snapshotID ole.GUID) (int, int, ole.GUID) {
|
func (vss *IVSS) DeleteSnapshots(snapshotID ole.GUID) (int, int, ole.GUID) {
|
||||||
|
|
||||||
VSS_OBJECT_SNAPSHOT := 3
|
VSS_OBJECT_SNAPSHOT := 3
|
||||||
|
|
||||||
deleted := int32(0)
|
deleted := int32(0)
|
||||||
|
|
||||||
var deletedGUID ole.GUID
|
var deletedGUID ole.GUID
|
||||||
|
|
||||||
var ret uintptr
|
var ret uintptr
|
||||||
if runtime.GOARCH == "386" {
|
if runtime.GOARCH == "386" {
|
||||||
address := uint(uintptr(unsafe.Pointer(&snapshotID)))
|
address := uint(uintptr(unsafe.Pointer(&snapshotID)))
|
||||||
ret, _, _ = syscall.Syscall9(vss.VTable().deleteSnapshots, 9,
|
ret, _, _ = syscall.Syscall9(vss.VTable().deleteSnapshots, 9,
|
||||||
uintptr(unsafe.Pointer(vss)),
|
uintptr(unsafe.Pointer(vss)),
|
||||||
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address)))),
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address)))),
|
||||||
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 4)))),
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 4)))),
|
||||||
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 8)))),
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 8)))),
|
||||||
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 12)))),
|
uintptr(*(*uint32)(unsafe.Pointer(uintptr(address + 12)))),
|
||||||
uintptr(VSS_OBJECT_SNAPSHOT),
|
uintptr(VSS_OBJECT_SNAPSHOT),
|
||||||
uintptr(1),
|
uintptr(1),
|
||||||
uintptr(unsafe.Pointer(&deleted)),
|
uintptr(unsafe.Pointer(&deleted)),
|
||||||
uintptr(unsafe.Pointer(&deletedGUID)))
|
uintptr(unsafe.Pointer(&deletedGUID)))
|
||||||
} else {
|
} else {
|
||||||
ret, _, _ = syscall.Syscall6(vss.VTable().deleteSnapshots, 6,
|
ret, _, _ = syscall.Syscall6(vss.VTable().deleteSnapshots, 6,
|
||||||
uintptr(unsafe.Pointer(vss)),
|
uintptr(unsafe.Pointer(vss)),
|
||||||
uintptr(unsafe.Pointer(&snapshotID)),
|
uintptr(unsafe.Pointer(&snapshotID)),
|
||||||
uintptr(VSS_OBJECT_SNAPSHOT),
|
uintptr(VSS_OBJECT_SNAPSHOT),
|
||||||
uintptr(1),
|
uintptr(1),
|
||||||
uintptr(unsafe.Pointer(&deleted)),
|
uintptr(unsafe.Pointer(&deleted)),
|
||||||
uintptr(unsafe.Pointer(&deletedGUID)))
|
uintptr(unsafe.Pointer(&deletedGUID)))
|
||||||
}
|
}
|
||||||
|
|
||||||
return int(ret), int(deleted), deletedGUID
|
return int(ret), int(deleted), deletedGUID
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func uint16ArrayToString(p *uint16) string {
|
||||||
|
if p == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
s := make([]uint16, 0)
|
||||||
|
address := uintptr(unsafe.Pointer(p))
|
||||||
|
for {
|
||||||
|
c := *(*uint16)(unsafe.Pointer(address))
|
||||||
|
if c == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
func uint16ArrayToString(p *uint16) (string) {
|
s = append(s, c)
|
||||||
if p == nil {
|
address = uintptr(int(address) + 2)
|
||||||
return ""
|
}
|
||||||
}
|
|
||||||
s := make([]uint16, 0)
|
|
||||||
address := uintptr(unsafe.Pointer(p))
|
|
||||||
for {
|
|
||||||
c := *(*uint16)(unsafe.Pointer(address))
|
|
||||||
if c == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
s = append(s, c)
|
return syscall.UTF16ToString(s)
|
||||||
address = uintptr(int(address) + 2)
|
|
||||||
}
|
|
||||||
|
|
||||||
return syscall.UTF16ToString(s)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func getIVSS(unknown *ole.IUnknown, iid *ole.GUID) (ivss *IVSS) {
|
func getIVSS(unknown *ole.IUnknown, iid *ole.GUID) (ivss *IVSS) {
|
||||||
r, _, _ := syscall.Syscall(
|
r, _, _ := syscall.Syscall(
|
||||||
unknown.VTable().QueryInterface,
|
unknown.VTable().QueryInterface,
|
||||||
3,
|
3,
|
||||||
uintptr(unsafe.Pointer(unknown)),
|
uintptr(unsafe.Pointer(unknown)),
|
||||||
uintptr(unsafe.Pointer(iid)),
|
uintptr(unsafe.Pointer(iid)),
|
||||||
uintptr(unsafe.Pointer(&ivss)))
|
uintptr(unsafe.Pointer(&ivss)))
|
||||||
|
|
||||||
if r != 0 {
|
if r != 0 {
|
||||||
LOG_WARN("IVSS_QUERY", "IVSS::QueryInterface returned %d\n", r)
|
LOG_WARN("IVSS_QUERY", "IVSS::QueryInterface returned %d\n", r)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return ivss
|
return ivss
|
||||||
}
|
}
|
||||||
|
|
||||||
var vssBackupComponent *IVSS
|
var vssBackupComponent *IVSS
|
||||||
@@ -333,193 +331,189 @@ var snapshotID ole.GUID
|
|||||||
var shadowLink string
|
var shadowLink string
|
||||||
|
|
||||||
func DeleteShadowCopy() {
|
func DeleteShadowCopy() {
|
||||||
if vssBackupComponent != nil {
|
if vssBackupComponent != nil {
|
||||||
defer vssBackupComponent.Release()
|
defer vssBackupComponent.Release()
|
||||||
|
|
||||||
LOG_TRACE("VSS_DELETE", "Deleting the shadow copy used for this backup")
|
LOG_TRACE("VSS_DELETE", "Deleting the shadow copy used for this backup")
|
||||||
ret, _, _ := vssBackupComponent.DeleteSnapshots(snapshotID)
|
ret, _, _ := vssBackupComponent.DeleteSnapshots(snapshotID)
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
LOG_WARN("VSS_DELETE", "Failed to delete the shadow copy: %x\n", uint(ret))
|
LOG_WARN("VSS_DELETE", "Failed to delete the shadow copy: %x\n", uint(ret))
|
||||||
} else {
|
} else {
|
||||||
LOG_INFO("VSS_DELETE", "The shadow copy has been successfully deleted")
|
LOG_INFO("VSS_DELETE", "The shadow copy has been successfully deleted")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if shadowLink != "" {
|
if shadowLink != "" {
|
||||||
err := os.Remove(shadowLink)
|
err := os.Remove(shadowLink)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("VSS_SYMLINK", "Failed to remove the symbolic link for the shadow copy: %v", err)
|
LOG_WARN("VSS_SYMLINK", "Failed to remove the symbolic link for the shadow copy: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ole.CoUninitialize()
|
ole.CoUninitialize()
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
|
func CreateShadowCopy(top string, shadowCopy bool) (shadowTop string) {
|
||||||
|
|
||||||
if !shadowCopy {
|
if !shadowCopy {
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
ole.CoInitialize(0)
|
ole.CoInitialize(0)
|
||||||
defer ole.CoUninitialize()
|
defer ole.CoUninitialize()
|
||||||
|
|
||||||
dllVssApi := syscall.NewLazyDLL("VssApi.dll")
|
dllVssApi := syscall.NewLazyDLL("VssApi.dll")
|
||||||
procCreateVssBackupComponents :=
|
procCreateVssBackupComponents :=
|
||||||
dllVssApi.NewProc("?CreateVssBackupComponents@@YAJPEAPEAVIVssBackupComponents@@@Z")
|
dllVssApi.NewProc("?CreateVssBackupComponents@@YAJPEAPEAVIVssBackupComponents@@@Z")
|
||||||
if runtime.GOARCH == "386" {
|
if runtime.GOARCH == "386" {
|
||||||
procCreateVssBackupComponents =
|
procCreateVssBackupComponents =
|
||||||
dllVssApi.NewProc("?CreateVssBackupComponents@@YGJPAPAVIVssBackupComponents@@@Z")
|
dllVssApi.NewProc("?CreateVssBackupComponents@@YGJPAPAVIVssBackupComponents@@@Z")
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(top) < 3 || top[1] != ':' || (top[2] != '/' && top[2] != '\\') {
|
if len(top) < 3 || top[1] != ':' || (top[2] != '/' && top[2] != '\\') {
|
||||||
LOG_ERROR("VSS_PATH", "Invalid repository path: %s", top)
|
LOG_ERROR("VSS_PATH", "Invalid repository path: %s", top)
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
volume := top[:1] + ":\\"
|
volume := top[:1] + ":\\"
|
||||||
|
|
||||||
LOG_INFO("VSS_CREATE", "Creating a shadow copy for %s", volume)
|
LOG_INFO("VSS_CREATE", "Creating a shadow copy for %s", volume)
|
||||||
|
|
||||||
var unknown *ole.IUnknown
|
var unknown *ole.IUnknown
|
||||||
r, _, err := procCreateVssBackupComponents.Call(uintptr(unsafe.Pointer(&unknown)))
|
r, _, err := procCreateVssBackupComponents.Call(uintptr(unsafe.Pointer(&unknown)))
|
||||||
|
|
||||||
if r == 0x80070005 {
|
if r == 0x80070005 {
|
||||||
LOG_ERROR("VSS_CREATE", "Only administrators can create shadow copies")
|
LOG_ERROR("VSS_CREATE", "Only administrators can create shadow copies")
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
if r != 0 {
|
if r != 0 {
|
||||||
LOG_ERROR("VSS_CREATE", "Failed to create the VSS backup component: %d", r)
|
LOG_ERROR("VSS_CREATE", "Failed to create the VSS backup component: %d", r)
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
vssBackupComponent = getIVSS(unknown, IID_IVSS)
|
vssBackupComponent = getIVSS(unknown, IID_IVSS)
|
||||||
if vssBackupComponent == nil {
|
if vssBackupComponent == nil {
|
||||||
LOG_ERROR("VSS_CREATE", "Failed to create the VSS backup component")
|
LOG_ERROR("VSS_CREATE", "Failed to create the VSS backup component")
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
ret := vssBackupComponent.InitializeForBackup()
|
ret := vssBackupComponent.InitializeForBackup()
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
LOG_ERROR("VSS_INIT", "Shadow copy creation failed: InitializeForBackup returned %x", uint(ret))
|
LOG_ERROR("VSS_INIT", "Shadow copy creation failed: InitializeForBackup returned %x", uint(ret))
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
var async *IVSSAsync
|
var async *IVSSAsync
|
||||||
ret, async = vssBackupComponent.GatherWriterMetadata()
|
ret, async = vssBackupComponent.GatherWriterMetadata()
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
LOG_ERROR("VSS_GATHER", "Shadow copy creation failed: GatherWriterMetadata returned %x", uint(ret))
|
LOG_ERROR("VSS_GATHER", "Shadow copy creation failed: GatherWriterMetadata returned %x", uint(ret))
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
if async == nil {
|
if async == nil {
|
||||||
LOG_ERROR("VSS_GATHER",
|
LOG_ERROR("VSS_GATHER",
|
||||||
"Shadow copy creation failed: GatherWriterMetadata failed to return a valid IVssAsync object")
|
"Shadow copy creation failed: GatherWriterMetadata failed to return a valid IVssAsync object")
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
if !async.Wait(20) {
|
if !async.Wait(20) {
|
||||||
LOG_ERROR("VSS_GATHER", "Shadow copy creation failed: GatherWriterMetadata didn't finish properly")
|
LOG_ERROR("VSS_GATHER", "Shadow copy creation failed: GatherWriterMetadata didn't finish properly")
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
async.Release()
|
async.Release()
|
||||||
|
|
||||||
var snapshotSetID ole.GUID
|
var snapshotSetID ole.GUID
|
||||||
|
|
||||||
ret = vssBackupComponent.StartSnapshotSet(&snapshotSetID)
|
ret = vssBackupComponent.StartSnapshotSet(&snapshotSetID)
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
LOG_ERROR("VSS_START", "Shadow copy creation failed: StartSnapshotSet returned %x", uint(ret))
|
LOG_ERROR("VSS_START", "Shadow copy creation failed: StartSnapshotSet returned %x", uint(ret))
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = vssBackupComponent.AddToSnapshotSet(volume, &snapshotID)
|
ret = vssBackupComponent.AddToSnapshotSet(volume, &snapshotID)
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
LOG_ERROR("VSS_ADD", "Shadow copy creation failed: AddToSnapshotSet returned %x", uint(ret))
|
LOG_ERROR("VSS_ADD", "Shadow copy creation failed: AddToSnapshotSet returned %x", uint(ret))
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
s, _ := ole.StringFromIID(&snapshotID)
|
s, _ := ole.StringFromIID(&snapshotID)
|
||||||
LOG_DEBUG("VSS_ID", "Creating shadow copy %s", s)
|
LOG_DEBUG("VSS_ID", "Creating shadow copy %s", s)
|
||||||
|
|
||||||
ret = vssBackupComponent.SetBackupState()
|
ret = vssBackupComponent.SetBackupState()
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
LOG_ERROR("VSS_SET", "Shadow copy creation failed: SetBackupState returned %x", uint(ret))
|
LOG_ERROR("VSS_SET", "Shadow copy creation failed: SetBackupState returned %x", uint(ret))
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
ret, async = vssBackupComponent.PrepareForBackup()
|
ret, async = vssBackupComponent.PrepareForBackup()
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
LOG_ERROR("VSS_PREPARE", "Shadow copy creation failed: PrepareForBackup returned %x", uint(ret))
|
LOG_ERROR("VSS_PREPARE", "Shadow copy creation failed: PrepareForBackup returned %x", uint(ret))
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
if async == nil {
|
if async == nil {
|
||||||
LOG_ERROR("VSS_PREPARE",
|
LOG_ERROR("VSS_PREPARE",
|
||||||
"Shadow copy creation failed: PrepareForBackup failed to return a valid IVssAsync object")
|
"Shadow copy creation failed: PrepareForBackup failed to return a valid IVssAsync object")
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
if !async.Wait(20) {
|
if !async.Wait(20) {
|
||||||
LOG_ERROR("VSS_PREPARE", "Shadow copy creation failed: PrepareForBackup didn't finish properly")
|
LOG_ERROR("VSS_PREPARE", "Shadow copy creation failed: PrepareForBackup didn't finish properly")
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
async.Release()
|
async.Release()
|
||||||
|
|
||||||
ret, async = vssBackupComponent.DoSnapshotSet()
|
ret, async = vssBackupComponent.DoSnapshotSet()
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
LOG_ERROR("VSS_SNAPSHOT", "Shadow copy creation failed: DoSnapshotSet returned %x", uint(ret))
|
LOG_ERROR("VSS_SNAPSHOT", "Shadow copy creation failed: DoSnapshotSet returned %x", uint(ret))
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
if async == nil {
|
if async == nil {
|
||||||
LOG_ERROR("VSS_SNAPSHOT",
|
LOG_ERROR("VSS_SNAPSHOT",
|
||||||
"Shadow copy creation failed: DoSnapshotSet failed to return a valid IVssAsync object")
|
"Shadow copy creation failed: DoSnapshotSet failed to return a valid IVssAsync object")
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
|
|
||||||
if !async.Wait(60) {
|
if !async.Wait(60) {
|
||||||
LOG_ERROR("VSS_SNAPSHOT", "Shadow copy creation failed: DoSnapshotSet didn't finish properly")
|
LOG_ERROR("VSS_SNAPSHOT", "Shadow copy creation failed: DoSnapshotSet didn't finish properly")
|
||||||
return top
|
return top
|
||||||
}
|
}
|
||||||
async.Release()
|
async.Release()
|
||||||
|
|
||||||
|
properties := SnapshotProperties{}
|
||||||
|
|
||||||
properties := SnapshotProperties {
|
ret = vssBackupComponent.GetSnapshotProperties(snapshotID, &properties)
|
||||||
}
|
if ret != 0 {
|
||||||
|
LOG_ERROR("VSS_PROPERTIES", "GetSnapshotProperties returned %x", ret)
|
||||||
|
return top
|
||||||
|
}
|
||||||
|
|
||||||
ret = vssBackupComponent.GetSnapshotProperties(snapshotID, &properties)
|
SnapshotIDString, _ := ole.StringFromIID(&properties.SnapshotID)
|
||||||
if ret != 0 {
|
SnapshotSetIDString, _ := ole.StringFromIID(&properties.SnapshotSetID)
|
||||||
LOG_ERROR("VSS_PROPERTIES", "GetSnapshotProperties returned %x", ret)
|
|
||||||
return top
|
|
||||||
}
|
|
||||||
|
|
||||||
SnapshotIDString, _ := ole.StringFromIID(&properties.SnapshotID)
|
LOG_DEBUG("VSS_PROPERTY", "SnapshotID: %s", SnapshotIDString)
|
||||||
SnapshotSetIDString, _ := ole.StringFromIID(&properties.SnapshotSetID)
|
LOG_DEBUG("VSS_PROPERTY", "SnapshotSetID: %s", SnapshotSetIDString)
|
||||||
|
|
||||||
LOG_DEBUG("VSS_PROPERTY", "SnapshotID: %s", SnapshotIDString)
|
LOG_DEBUG("VSS_PROPERTY", "SnapshotDeviceObject: %s", uint16ArrayToString(properties.SnapshotDeviceObject))
|
||||||
LOG_DEBUG("VSS_PROPERTY", "SnapshotSetID: %s", SnapshotSetIDString)
|
LOG_DEBUG("VSS_PROPERTY", "OriginalVolumeName: %s", uint16ArrayToString(properties.OriginalVolumeName))
|
||||||
|
LOG_DEBUG("VSS_PROPERTY", "OriginatingMachine: %s", uint16ArrayToString(properties.OriginatingMachine))
|
||||||
|
LOG_DEBUG("VSS_PROPERTY", "OriginatingMachine: %s", uint16ArrayToString(properties.OriginatingMachine))
|
||||||
|
LOG_DEBUG("VSS_PROPERTY", "ServiceMachine: %s", uint16ArrayToString(properties.ServiceMachine))
|
||||||
|
LOG_DEBUG("VSS_PROPERTY", "ExposedName: %s", uint16ArrayToString(properties.ExposedName))
|
||||||
|
LOG_DEBUG("VSS_PROPERTY", "ExposedPath: %s", uint16ArrayToString(properties.ExposedPath))
|
||||||
|
|
||||||
LOG_DEBUG("VSS_PROPERTY", "SnapshotDeviceObject: %s", uint16ArrayToString(properties.SnapshotDeviceObject))
|
LOG_INFO("VSS_DONE", "Shadow copy %s created", SnapshotIDString)
|
||||||
LOG_DEBUG("VSS_PROPERTY", "OriginalVolumeName: %s", uint16ArrayToString(properties.OriginalVolumeName))
|
|
||||||
LOG_DEBUG("VSS_PROPERTY", "OriginatingMachine: %s", uint16ArrayToString(properties.OriginatingMachine))
|
|
||||||
LOG_DEBUG("VSS_PROPERTY", "OriginatingMachine: %s", uint16ArrayToString(properties.OriginatingMachine))
|
|
||||||
LOG_DEBUG("VSS_PROPERTY", "ServiceMachine: %s", uint16ArrayToString(properties.ServiceMachine))
|
|
||||||
LOG_DEBUG("VSS_PROPERTY", "ExposedName: %s", uint16ArrayToString(properties.ExposedName))
|
|
||||||
LOG_DEBUG("VSS_PROPERTY", "ExposedPath: %s", uint16ArrayToString(properties.ExposedPath))
|
|
||||||
|
|
||||||
LOG_INFO("VSS_DONE", "Shadow copy %s created", SnapshotIDString)
|
snapshotPath := uint16ArrayToString(properties.SnapshotDeviceObject)
|
||||||
|
|
||||||
snapshotPath := uint16ArrayToString(properties.SnapshotDeviceObject)
|
preferencePath := GetDuplicacyPreferencePath()
|
||||||
|
shadowLink = preferencePath + "\\shadow"
|
||||||
preferencePath := GetDuplicacyPreferencePath()
|
os.Remove(shadowLink)
|
||||||
shadowLink = preferencePath + "\\shadow"
|
err = os.Symlink(snapshotPath+"\\", shadowLink)
|
||||||
os.Remove(shadowLink)
|
if err != nil {
|
||||||
err = os.Symlink(snapshotPath + "\\", shadowLink)
|
LOG_ERROR("VSS_SYMLINK", "Failed to create a symbolic link to the shadow copy just created: %v", err)
|
||||||
if err != nil {
|
return top
|
||||||
LOG_ERROR("VSS_SYMLINK", "Failed to create a symbolic link to the shadow copy just created: %v", err)
|
}
|
||||||
return top
|
|
||||||
}
|
|
||||||
|
|
||||||
return shadowLink + "\\" + top[2:]
|
return shadowLink + "\\" + top[2:]
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,419 +5,417 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"encoding/hex"
|
||||||
"fmt"
|
"encoding/json"
|
||||||
"time"
|
"fmt"
|
||||||
"path"
|
"io/ioutil"
|
||||||
"strings"
|
"os"
|
||||||
"strconv"
|
"path"
|
||||||
"io/ioutil"
|
"strconv"
|
||||||
"encoding/json"
|
"strings"
|
||||||
"encoding/hex"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Snapshot represents a backup of the repository.
|
// Snapshot represents a backup of the repository.
|
||||||
type Snapshot struct {
|
type Snapshot struct {
|
||||||
ID string // the snapshot id; must be different for different repositories
|
ID string // the snapshot id; must be different for different repositories
|
||||||
Revision int // the revision number
|
Revision int // the revision number
|
||||||
Options string // options used to create this snapshot (some not included)
|
Options string // options used to create this snapshot (some not included)
|
||||||
Tag string // user-assigned tag
|
Tag string // user-assigned tag
|
||||||
StartTime int64 // at what time the snapshot was created
|
StartTime int64 // at what time the snapshot was created
|
||||||
EndTime int64 // at what time the snapshot was done
|
EndTime int64 // at what time the snapshot was done
|
||||||
FileSize int64 // total file size
|
FileSize int64 // total file size
|
||||||
NumberOfFiles int64 // number of files
|
NumberOfFiles int64 // number of files
|
||||||
|
|
||||||
// A sequence of chunks whose aggregated content is the json representation of 'Files'.
|
// A sequence of chunks whose aggregated content is the json representation of 'Files'.
|
||||||
FileSequence []string
|
FileSequence []string
|
||||||
|
|
||||||
// A sequence of chunks whose aggregated content is the json representation of 'ChunkHashes'.
|
// A sequence of chunks whose aggregated content is the json representation of 'ChunkHashes'.
|
||||||
ChunkSequence []string
|
ChunkSequence []string
|
||||||
|
|
||||||
// A sequence of chunks whose aggregated content is the json representation of 'ChunkLengths'.
|
// A sequence of chunks whose aggregated content is the json representation of 'ChunkLengths'.
|
||||||
LengthSequence []string
|
LengthSequence []string
|
||||||
|
|
||||||
Files []*Entry // list of files and subdirectories
|
Files []*Entry // list of files and subdirectories
|
||||||
|
|
||||||
ChunkHashes []string // a sequence of chunks representing the file content
|
ChunkHashes []string // a sequence of chunks representing the file content
|
||||||
ChunkLengths []int // the length of each chunk
|
ChunkLengths []int // the length of each chunk
|
||||||
|
|
||||||
Flag bool // used to mark certain snapshots for deletion or copy
|
Flag bool // used to mark certain snapshots for deletion or copy
|
||||||
|
|
||||||
discardAttributes bool
|
discardAttributes bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateEmptySnapshot creates an empty snapshot.
|
// CreateEmptySnapshot creates an empty snapshot.
|
||||||
func CreateEmptySnapshot (id string) (snapshto *Snapshot) {
|
func CreateEmptySnapshot(id string) (snapshto *Snapshot) {
|
||||||
return &Snapshot{
|
return &Snapshot{
|
||||||
ID : id,
|
ID: id,
|
||||||
Revision : 0,
|
Revision: 0,
|
||||||
StartTime: time.Now().Unix(),
|
StartTime: time.Now().Unix(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateSnapshotFromDirectory creates a snapshot from the local directory 'top'. Only 'Files'
|
// CreateSnapshotFromDirectory creates a snapshot from the local directory 'top'. Only 'Files'
|
||||||
// will be constructed, while 'ChunkHashes' and 'ChunkLengths' can only be populated after uploading.
|
// will be constructed, while 'ChunkHashes' and 'ChunkLengths' can only be populated after uploading.
|
||||||
func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, skippedDirectories []string,
|
func CreateSnapshotFromDirectory(id string, top string) (snapshot *Snapshot, skippedDirectories []string,
|
||||||
skippedFiles []string, err error) {
|
skippedFiles []string, err error) {
|
||||||
|
|
||||||
snapshot = &Snapshot {
|
snapshot = &Snapshot{
|
||||||
ID : id,
|
ID: id,
|
||||||
Revision: 0,
|
Revision: 0,
|
||||||
StartTime: time.Now().Unix(),
|
StartTime: time.Now().Unix(),
|
||||||
}
|
}
|
||||||
|
|
||||||
var patterns []string
|
var patterns []string
|
||||||
|
|
||||||
patternFile, err := ioutil.ReadFile(path.Join(GetDuplicacyPreferencePath(), "filters"))
|
patternFile, err := ioutil.ReadFile(path.Join(GetDuplicacyPreferencePath(), "filters"))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
for _, pattern := range strings.Split(string(patternFile), "\n") {
|
for _, pattern := range strings.Split(string(patternFile), "\n") {
|
||||||
pattern = strings.TrimSpace(pattern)
|
pattern = strings.TrimSpace(pattern)
|
||||||
if len(pattern) == 0 {
|
if len(pattern) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if pattern[0] == '#' {
|
if pattern[0] == '#' {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if IsUnspecifiedFilter(pattern) {
|
if IsUnspecifiedFilter(pattern) {
|
||||||
pattern = "+" + pattern
|
pattern = "+" + pattern
|
||||||
}
|
}
|
||||||
|
|
||||||
if IsEmptyFilter(pattern) {
|
if IsEmptyFilter(pattern) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
||||||
valid, err := IsValidRegex(pattern[2:])
|
valid, err := IsValidRegex(pattern[2:])
|
||||||
if !valid || err != nil {
|
if !valid || err != nil {
|
||||||
LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
|
LOG_ERROR("SNAPSHOT_FILTER", "Invalid regular expression encountered for filter: \"%s\", error: %v", pattern, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
patterns = append(patterns, pattern)
|
patterns = append(patterns, pattern)
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))
|
LOG_DEBUG("REGEX_DEBUG", "There are %d compiled regular expressions stored", len(RegexMap))
|
||||||
|
|
||||||
LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
|
LOG_INFO("SNAPSHOT_FILTER", "Loaded %d include/exclude pattern(s)", len(patterns))
|
||||||
|
|
||||||
if IsTracing() {
|
if IsTracing() {
|
||||||
for _, pattern := range patterns {
|
for _, pattern := range patterns {
|
||||||
LOG_TRACE("SNAPSHOT_PATTERN", "Pattern: %s", pattern)
|
LOG_TRACE("SNAPSHOT_PATTERN", "Pattern: %s", pattern)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
directories := make([]*Entry, 0, 256)
|
directories := make([]*Entry, 0, 256)
|
||||||
directories = append(directories, CreateEntry("", 0, 0, 0))
|
directories = append(directories, CreateEntry("", 0, 0, 0))
|
||||||
|
|
||||||
snapshot.Files = make([]*Entry, 0, 256)
|
snapshot.Files = make([]*Entry, 0, 256)
|
||||||
|
|
||||||
attributeThreshold := 1024 * 1024
|
attributeThreshold := 1024 * 1024
|
||||||
if attributeThresholdValue, found := os.LookupEnv("DUPLICACY_ATTRIBUTE_THRESHOLD"); found && attributeThresholdValue != "" {
|
if attributeThresholdValue, found := os.LookupEnv("DUPLICACY_ATTRIBUTE_THRESHOLD"); found && attributeThresholdValue != "" {
|
||||||
attributeThreshold, _ = strconv.Atoi(attributeThresholdValue)
|
attributeThreshold, _ = strconv.Atoi(attributeThresholdValue)
|
||||||
}
|
}
|
||||||
|
|
||||||
for len(directories) > 0 {
|
for len(directories) > 0 {
|
||||||
|
|
||||||
directory := directories[len(directories) - 1]
|
directory := directories[len(directories)-1]
|
||||||
directories = directories[:len(directories) - 1]
|
directories = directories[:len(directories)-1]
|
||||||
snapshot.Files = append(snapshot.Files, directory)
|
snapshot.Files = append(snapshot.Files, directory)
|
||||||
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, snapshot.discardAttributes)
|
subdirectories, skipped, err := ListEntries(top, directory.Path, &snapshot.Files, patterns, snapshot.discardAttributes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory: %v", err)
|
LOG_WARN("LIST_FAILURE", "Failed to list subdirectory: %v", err)
|
||||||
skippedDirectories = append(skippedDirectories, directory.Path)
|
skippedDirectories = append(skippedDirectories, directory.Path)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
directories = append(directories, subdirectories...)
|
directories = append(directories, subdirectories...)
|
||||||
skippedFiles = append(skippedFiles, skipped...)
|
skippedFiles = append(skippedFiles, skipped...)
|
||||||
|
|
||||||
if !snapshot.discardAttributes && len(snapshot.Files) > attributeThreshold {
|
if !snapshot.discardAttributes && len(snapshot.Files) > attributeThreshold {
|
||||||
LOG_INFO("LIST_ATTRIBUTES", "Discarding file attributes")
|
LOG_INFO("LIST_ATTRIBUTES", "Discarding file attributes")
|
||||||
snapshot.discardAttributes = true
|
snapshot.discardAttributes = true
|
||||||
for _, file := range snapshot.Files {
|
for _, file := range snapshot.Files {
|
||||||
file.Attributes = nil
|
file.Attributes = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove the root entry
|
// Remove the root entry
|
||||||
snapshot.Files = snapshot.Files[1:]
|
snapshot.Files = snapshot.Files[1:]
|
||||||
|
|
||||||
return snapshot, skippedDirectories, skippedFiles, nil
|
return snapshot, skippedDirectories, skippedFiles, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is the struct used to save/load incomplete snapshots
|
// This is the struct used to save/load incomplete snapshots
|
||||||
type IncompleteSnapshot struct {
|
type IncompleteSnapshot struct {
|
||||||
Files [] *Entry
|
Files []*Entry
|
||||||
ChunkHashes []string
|
ChunkHashes []string
|
||||||
ChunkLengths [] int
|
ChunkLengths []int
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadIncompleteSnapshot loads the incomplete snapshot if it exists
|
// LoadIncompleteSnapshot loads the incomplete snapshot if it exists
|
||||||
func LoadIncompleteSnapshot() (snapshot *Snapshot) {
|
func LoadIncompleteSnapshot() (snapshot *Snapshot) {
|
||||||
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
|
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
|
||||||
description, err := ioutil.ReadFile(snapshotFile)
|
description, err := ioutil.ReadFile(snapshotFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("INCOMPLETE_LOCATE", "Failed to locate incomplete snapshot: %v", err)
|
LOG_DEBUG("INCOMPLETE_LOCATE", "Failed to locate incomplete snapshot: %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var incompleteSnapshot IncompleteSnapshot
|
var incompleteSnapshot IncompleteSnapshot
|
||||||
|
|
||||||
err = json.Unmarshal(description, &incompleteSnapshot)
|
err = json.Unmarshal(description, &incompleteSnapshot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("INCOMPLETE_PARSE", "Failed to parse incomplete snapshot: %v", err)
|
LOG_DEBUG("INCOMPLETE_PARSE", "Failed to parse incomplete snapshot: %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var chunkHashes []string
|
var chunkHashes []string
|
||||||
for _, chunkHash := range incompleteSnapshot.ChunkHashes {
|
for _, chunkHash := range incompleteSnapshot.ChunkHashes {
|
||||||
hash, err := hex.DecodeString(chunkHash)
|
hash, err := hex.DecodeString(chunkHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("INCOMPLETE_DECODE", "Failed to decode incomplete snapshot: %v", err)
|
LOG_DEBUG("INCOMPLETE_DECODE", "Failed to decode incomplete snapshot: %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
chunkHashes = append(chunkHashes, string(hash))
|
chunkHashes = append(chunkHashes, string(hash))
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshot = &Snapshot {
|
snapshot = &Snapshot{
|
||||||
Files: incompleteSnapshot.Files,
|
Files: incompleteSnapshot.Files,
|
||||||
ChunkHashes: chunkHashes,
|
ChunkHashes: chunkHashes,
|
||||||
ChunkLengths: incompleteSnapshot.ChunkLengths,
|
ChunkLengths: incompleteSnapshot.ChunkLengths,
|
||||||
}
|
}
|
||||||
LOG_INFO("INCOMPLETE_LOAD", "Incomplete snapshot loaded from %s", snapshotFile)
|
LOG_INFO("INCOMPLETE_LOAD", "Incomplete snapshot loaded from %s", snapshotFile)
|
||||||
return snapshot
|
return snapshot
|
||||||
}
|
}
|
||||||
|
|
||||||
// SaveIncompleteSnapshot saves the incomplete snapshot under the preference directory
|
// SaveIncompleteSnapshot saves the incomplete snapshot under the preference directory
|
||||||
func SaveIncompleteSnapshot(snapshot *Snapshot) {
|
func SaveIncompleteSnapshot(snapshot *Snapshot) {
|
||||||
var files []*Entry
|
var files []*Entry
|
||||||
for _, file := range snapshot.Files {
|
for _, file := range snapshot.Files {
|
||||||
// All unprocessed files will have a size of -1
|
// All unprocessed files will have a size of -1
|
||||||
if file.Size >= 0 {
|
if file.Size >= 0 {
|
||||||
file.Attributes = nil
|
file.Attributes = nil
|
||||||
files = append(files, file)
|
files = append(files, file)
|
||||||
} else {
|
} else {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var chunkHashes []string
|
var chunkHashes []string
|
||||||
for _, chunkHash := range snapshot.ChunkHashes {
|
for _, chunkHash := range snapshot.ChunkHashes {
|
||||||
chunkHashes = append(chunkHashes, hex.EncodeToString([]byte(chunkHash)))
|
chunkHashes = append(chunkHashes, hex.EncodeToString([]byte(chunkHash)))
|
||||||
}
|
}
|
||||||
|
|
||||||
incompleteSnapshot := IncompleteSnapshot {
|
incompleteSnapshot := IncompleteSnapshot{
|
||||||
Files: files,
|
Files: files,
|
||||||
ChunkHashes: chunkHashes,
|
ChunkHashes: chunkHashes,
|
||||||
ChunkLengths: snapshot.ChunkLengths,
|
ChunkLengths: snapshot.ChunkLengths,
|
||||||
}
|
}
|
||||||
|
|
||||||
description, err := json.MarshalIndent(incompleteSnapshot, "", " ")
|
description, err := json.MarshalIndent(incompleteSnapshot, "", " ")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("INCOMPLETE_ENCODE", "Failed to encode the incomplete snapshot: %v", err)
|
LOG_WARN("INCOMPLETE_ENCODE", "Failed to encode the incomplete snapshot: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
|
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
|
||||||
err = ioutil.WriteFile(snapshotFile, description, 0644)
|
err = ioutil.WriteFile(snapshotFile, description, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_WARN("INCOMPLETE_WRITE", "Failed to save the incomplete snapshot: %v", err)
|
LOG_WARN("INCOMPLETE_WRITE", "Failed to save the incomplete snapshot: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_INFO("INCOMPLETE_SAVE", "Incomplete snapshot saved to %s", snapshotFile)
|
LOG_INFO("INCOMPLETE_SAVE", "Incomplete snapshot saved to %s", snapshotFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
func RemoveIncompleteSnapshot() {
|
func RemoveIncompleteSnapshot() {
|
||||||
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
|
snapshotFile := path.Join(GetDuplicacyPreferencePath(), "incomplete")
|
||||||
if stat, err := os.Stat(snapshotFile); err == nil && !stat.IsDir() {
|
if stat, err := os.Stat(snapshotFile); err == nil && !stat.IsDir() {
|
||||||
err = os.Remove(snapshotFile)
|
err = os.Remove(snapshotFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_INFO("INCOMPLETE_SAVE", "Failed to remove ncomplete snapshot: %v", err)
|
LOG_INFO("INCOMPLETE_SAVE", "Failed to remove ncomplete snapshot: %v", err)
|
||||||
} else {
|
} else {
|
||||||
LOG_INFO("INCOMPLETE_SAVE", "Removed incomplete snapshot %s", snapshotFile)
|
LOG_INFO("INCOMPLETE_SAVE", "Removed incomplete snapshot %s", snapshotFile)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateSnapshotFromDescription creates a snapshot from json decription.
|
// CreateSnapshotFromDescription creates a snapshot from json decription.
|
||||||
func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err error) {
|
func CreateSnapshotFromDescription(description []byte) (snapshot *Snapshot, err error) {
|
||||||
|
|
||||||
var root map[string] interface{}
|
var root map[string]interface{}
|
||||||
|
|
||||||
err = json.Unmarshal(description, &root)
|
err = json.Unmarshal(description, &root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshot = &Snapshot {}
|
snapshot = &Snapshot{}
|
||||||
|
|
||||||
if value, ok := root["id"]; !ok {
|
if value, ok := root["id"]; !ok {
|
||||||
return nil, fmt.Errorf("No id is specified in the snapshot")
|
return nil, fmt.Errorf("No id is specified in the snapshot")
|
||||||
} else if snapshot.ID, ok = value.(string); !ok {
|
} else if snapshot.ID, ok = value.(string); !ok {
|
||||||
return nil, fmt.Errorf("Invalid id is specified in the snapshot")
|
return nil, fmt.Errorf("Invalid id is specified in the snapshot")
|
||||||
}
|
}
|
||||||
|
|
||||||
if value, ok := root["revision"]; !ok {
|
if value, ok := root["revision"]; !ok {
|
||||||
return nil, fmt.Errorf("No revision is specified in the snapshot")
|
return nil, fmt.Errorf("No revision is specified in the snapshot")
|
||||||
} else if _, ok = value.(float64); !ok {
|
} else if _, ok = value.(float64); !ok {
|
||||||
return nil, fmt.Errorf("Invalid revision is specified in the snapshot")
|
return nil, fmt.Errorf("Invalid revision is specified in the snapshot")
|
||||||
} else {
|
} else {
|
||||||
snapshot.Revision = int(value.(float64))
|
snapshot.Revision = int(value.(float64))
|
||||||
}
|
}
|
||||||
|
|
||||||
if value, ok := root["tag"]; !ok {
|
if value, ok := root["tag"]; !ok {
|
||||||
} else if snapshot.Tag, ok = value.(string); !ok {
|
} else if snapshot.Tag, ok = value.(string); !ok {
|
||||||
return nil, fmt.Errorf("Invalid tag is specified in the snapshot")
|
return nil, fmt.Errorf("Invalid tag is specified in the snapshot")
|
||||||
}
|
}
|
||||||
|
|
||||||
if value, ok := root["options"]; !ok {
|
if value, ok := root["options"]; !ok {
|
||||||
} else if snapshot.Options, ok = value.(string); !ok {
|
} else if snapshot.Options, ok = value.(string); !ok {
|
||||||
return nil, fmt.Errorf("Invalid options is specified in the snapshot")
|
return nil, fmt.Errorf("Invalid options is specified in the snapshot")
|
||||||
}
|
}
|
||||||
|
|
||||||
if value, ok := root["start_time"]; !ok {
|
if value, ok := root["start_time"]; !ok {
|
||||||
return nil, fmt.Errorf("No creation time is specified in the snapshot")
|
return nil, fmt.Errorf("No creation time is specified in the snapshot")
|
||||||
} else if _, ok = value.(float64); !ok {
|
} else if _, ok = value.(float64); !ok {
|
||||||
return nil, fmt.Errorf("Invalid creation time is specified in the snapshot")
|
return nil, fmt.Errorf("Invalid creation time is specified in the snapshot")
|
||||||
} else {
|
} else {
|
||||||
snapshot.StartTime = int64(value.(float64))
|
snapshot.StartTime = int64(value.(float64))
|
||||||
}
|
}
|
||||||
|
|
||||||
if value, ok := root["end_time"]; !ok {
|
if value, ok := root["end_time"]; !ok {
|
||||||
return nil, fmt.Errorf("No creation time is specified in the snapshot")
|
return nil, fmt.Errorf("No creation time is specified in the snapshot")
|
||||||
} else if _, ok = value.(float64); !ok {
|
} else if _, ok = value.(float64); !ok {
|
||||||
return nil, fmt.Errorf("Invalid creation time is specified in the snapshot")
|
return nil, fmt.Errorf("Invalid creation time is specified in the snapshot")
|
||||||
} else {
|
} else {
|
||||||
snapshot.EndTime = int64(value.(float64))
|
snapshot.EndTime = int64(value.(float64))
|
||||||
}
|
}
|
||||||
|
|
||||||
if value, ok := root["file_size"]; ok {
|
if value, ok := root["file_size"]; ok {
|
||||||
if _, ok = value.(float64); ok {
|
if _, ok = value.(float64); ok {
|
||||||
snapshot.FileSize = int64(value.(float64))
|
snapshot.FileSize = int64(value.(float64))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if value, ok := root["number_of_files"]; ok {
|
if value, ok := root["number_of_files"]; ok {
|
||||||
if _, ok = value.(float64); ok {
|
if _, ok = value.(float64); ok {
|
||||||
snapshot.NumberOfFiles = int64(value.(float64))
|
snapshot.NumberOfFiles = int64(value.(float64))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sequenceType := range []string { "files", "chunks", "lengths" } {
|
for _, sequenceType := range []string{"files", "chunks", "lengths"} {
|
||||||
if value, ok := root[sequenceType]; !ok {
|
if value, ok := root[sequenceType]; !ok {
|
||||||
return nil, fmt.Errorf("No %s are specified in the snapshot", sequenceType)
|
return nil, fmt.Errorf("No %s are specified in the snapshot", sequenceType)
|
||||||
} else if _, ok = value.([]interface{}); !ok {
|
} else if _, ok = value.([]interface{}); !ok {
|
||||||
return nil, fmt.Errorf("Invalid %s are specified in the snapshot", sequenceType)
|
return nil, fmt.Errorf("Invalid %s are specified in the snapshot", sequenceType)
|
||||||
} else {
|
} else {
|
||||||
array := value.([]interface{})
|
array := value.([]interface{})
|
||||||
sequence := make([]string, len(array))
|
sequence := make([]string, len(array))
|
||||||
for i := 0; i < len(array); i++ {
|
for i := 0; i < len(array); i++ {
|
||||||
if hashInHex, ok := array[i].(string); !ok {
|
if hashInHex, ok := array[i].(string); !ok {
|
||||||
return nil, fmt.Errorf("Invalid file sequence is specified in the snapshot")
|
return nil, fmt.Errorf("Invalid file sequence is specified in the snapshot")
|
||||||
} else if hash, err := hex.DecodeString(hashInHex); err != nil {
|
} else if hash, err := hex.DecodeString(hashInHex); err != nil {
|
||||||
return nil, fmt.Errorf("Hash %s is not a valid hex string in the snapshot", hashInHex)
|
return nil, fmt.Errorf("Hash %s is not a valid hex string in the snapshot", hashInHex)
|
||||||
} else {
|
} else {
|
||||||
sequence[i] = string(hash)
|
sequence[i] = string(hash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshot.SetSequence(sequenceType, sequence)
|
snapshot.SetSequence(sequenceType, sequence)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return snapshot, nil
|
return snapshot, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadChunks construct 'ChunkHashes' from the json description.
|
// LoadChunks construct 'ChunkHashes' from the json description.
|
||||||
func (snapshot *Snapshot) LoadChunks(description []byte) (err error) {
|
func (snapshot *Snapshot) LoadChunks(description []byte) (err error) {
|
||||||
|
|
||||||
var root [] interface {}
|
var root []interface{}
|
||||||
err = json.Unmarshal(description, &root)
|
err = json.Unmarshal(description, &root)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshot.ChunkHashes = make([]string, len(root))
|
snapshot.ChunkHashes = make([]string, len(root))
|
||||||
|
|
||||||
for i, object := range root {
|
for i, object := range root {
|
||||||
if hashInHex, ok := object.(string); !ok {
|
if hashInHex, ok := object.(string); !ok {
|
||||||
return fmt.Errorf("Invalid chunk hash is specified in the snapshot")
|
return fmt.Errorf("Invalid chunk hash is specified in the snapshot")
|
||||||
} else if hash, err := hex.DecodeString(hashInHex); err != nil {
|
} else if hash, err := hex.DecodeString(hashInHex); err != nil {
|
||||||
return fmt.Errorf("The chunk hash %s is not a valid hex string", hashInHex)
|
return fmt.Errorf("The chunk hash %s is not a valid hex string", hashInHex)
|
||||||
} else {
|
} else {
|
||||||
snapshot.ChunkHashes[i] = string(hash)
|
snapshot.ChunkHashes[i] = string(hash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadLengths construct 'ChunkLengths' from the json description.
|
// LoadLengths construct 'ChunkLengths' from the json description.
|
||||||
func (snapshot *Snapshot) LoadLengths(description []byte) (err error) {
|
func (snapshot *Snapshot) LoadLengths(description []byte) (err error) {
|
||||||
return json.Unmarshal(description, &snapshot.ChunkLengths)
|
return json.Unmarshal(description, &snapshot.ChunkLengths)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON creates a json representation of the snapshot.
|
// MarshalJSON creates a json representation of the snapshot.
|
||||||
func (snapshot *Snapshot) MarshalJSON() ([] byte, error) {
|
func (snapshot *Snapshot) MarshalJSON() ([]byte, error) {
|
||||||
|
|
||||||
object := make(map[string]interface{})
|
object := make(map[string]interface{})
|
||||||
|
|
||||||
object["id"] = snapshot.ID
|
object["id"] = snapshot.ID
|
||||||
object["revision"] = snapshot.Revision
|
object["revision"] = snapshot.Revision
|
||||||
object["options"] = snapshot.Options
|
object["options"] = snapshot.Options
|
||||||
object["tag"] = snapshot.Tag
|
object["tag"] = snapshot.Tag
|
||||||
object["start_time"] = snapshot.StartTime
|
object["start_time"] = snapshot.StartTime
|
||||||
object["end_time"] = snapshot.EndTime
|
object["end_time"] = snapshot.EndTime
|
||||||
|
|
||||||
if snapshot.FileSize != 0 && snapshot.NumberOfFiles != 0 {
|
if snapshot.FileSize != 0 && snapshot.NumberOfFiles != 0 {
|
||||||
object["file_size"] = snapshot.FileSize
|
object["file_size"] = snapshot.FileSize
|
||||||
object["number_of_files"] = snapshot.NumberOfFiles
|
object["number_of_files"] = snapshot.NumberOfFiles
|
||||||
}
|
}
|
||||||
object["files"] = encodeSequence(snapshot.FileSequence)
|
object["files"] = encodeSequence(snapshot.FileSequence)
|
||||||
object["chunks"] = encodeSequence(snapshot.ChunkSequence)
|
object["chunks"] = encodeSequence(snapshot.ChunkSequence)
|
||||||
object["lengths"] = encodeSequence(snapshot.LengthSequence)
|
object["lengths"] = encodeSequence(snapshot.LengthSequence)
|
||||||
|
|
||||||
return json.Marshal(object)
|
return json.Marshal(object)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalSequence creates a json represetion for the specified chunk sequence.
|
// MarshalSequence creates a json represetion for the specified chunk sequence.
|
||||||
func (snapshot *Snapshot) MarshalSequence(sequenceType string) ([] byte, error) {
|
func (snapshot *Snapshot) MarshalSequence(sequenceType string) ([]byte, error) {
|
||||||
|
|
||||||
if sequenceType == "files" {
|
if sequenceType == "files" {
|
||||||
return json.Marshal(snapshot.Files)
|
return json.Marshal(snapshot.Files)
|
||||||
} else if sequenceType == "chunks" {
|
} else if sequenceType == "chunks" {
|
||||||
return json.Marshal(encodeSequence(snapshot.ChunkHashes))
|
return json.Marshal(encodeSequence(snapshot.ChunkHashes))
|
||||||
} else {
|
} else {
|
||||||
return json.Marshal(snapshot.ChunkLengths)
|
return json.Marshal(snapshot.ChunkLengths)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetSequence assign a chunk sequence to the specified field.
|
// SetSequence assign a chunk sequence to the specified field.
|
||||||
func (snapshot *Snapshot) SetSequence(sequenceType string, sequence [] string) {
|
func (snapshot *Snapshot) SetSequence(sequenceType string, sequence []string) {
|
||||||
if sequenceType == "files" {
|
if sequenceType == "files" {
|
||||||
snapshot.FileSequence = sequence
|
snapshot.FileSequence = sequence
|
||||||
} else if sequenceType == "chunks" {
|
} else if sequenceType == "chunks" {
|
||||||
snapshot.ChunkSequence = sequence
|
snapshot.ChunkSequence = sequence
|
||||||
} else {
|
} else {
|
||||||
snapshot.LengthSequence = sequence
|
snapshot.LengthSequence = sequence
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// encodeSequence turns a sequence of binary hashes into a sequence of hex hashes.
|
// encodeSequence turns a sequence of binary hashes into a sequence of hex hashes.
|
||||||
func encodeSequence(sequence[] string) ([] string) {
|
func encodeSequence(sequence []string) []string {
|
||||||
|
|
||||||
sequenceInHex := make([]string, len(sequence))
|
sequenceInHex := make([]string, len(sequence))
|
||||||
|
|
||||||
for i, hash := range sequence {
|
for i, hash := range sequence {
|
||||||
sequenceInHex[i] = hex.EncodeToString([]byte(hash))
|
sequenceInHex[i] = hex.EncodeToString([]byte(hash))
|
||||||
}
|
}
|
||||||
|
|
||||||
return sequenceInHex
|
return sequenceInHex
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -5,463 +5,463 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"crypto/rand"
|
||||||
"os"
|
"encoding/hex"
|
||||||
"fmt"
|
"encoding/json"
|
||||||
"time"
|
"fmt"
|
||||||
"path"
|
"os"
|
||||||
"strings"
|
"path"
|
||||||
"crypto/rand"
|
"strings"
|
||||||
"encoding/json"
|
"testing"
|
||||||
"encoding/hex"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func createDummySnapshot(snapshotID string, revision int, endTime int64) * Snapshot {
|
func createDummySnapshot(snapshotID string, revision int, endTime int64) *Snapshot {
|
||||||
return &Snapshot {
|
return &Snapshot{
|
||||||
ID: snapshotID,
|
ID: snapshotID,
|
||||||
Revision: revision,
|
Revision: revision,
|
||||||
EndTime: endTime,
|
EndTime: endTime,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestIsDeletable(t *testing.T) {
|
func TestIsDeletable(t *testing.T) {
|
||||||
|
|
||||||
//SetLoggingLevel(DEBUG)
|
//SetLoggingLevel(DEBUG)
|
||||||
|
|
||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
day := int64(3600 * 24)
|
day := int64(3600 * 24)
|
||||||
|
|
||||||
allSnapshots := make(map[string][] *Snapshot)
|
allSnapshots := make(map[string][]*Snapshot)
|
||||||
allSnapshots["host1"] = append([]*Snapshot{}, createDummySnapshot("host1", 1, now - 2 * day))
|
allSnapshots["host1"] = append([]*Snapshot{}, createDummySnapshot("host1", 1, now-2*day))
|
||||||
allSnapshots["host2"] = append([]*Snapshot{}, createDummySnapshot("host2", 1, now - 2 * day))
|
allSnapshots["host2"] = append([]*Snapshot{}, createDummySnapshot("host2", 1, now-2*day))
|
||||||
allSnapshots["host1"] = append(allSnapshots["host1"], createDummySnapshot("host1", 2, now - 1 * day))
|
allSnapshots["host1"] = append(allSnapshots["host1"], createDummySnapshot("host1", 2, now-1*day))
|
||||||
allSnapshots["host2"] = append(allSnapshots["host2"], createDummySnapshot("host2", 2, now - 1 * day))
|
allSnapshots["host2"] = append(allSnapshots["host2"], createDummySnapshot("host2", 2, now-1*day))
|
||||||
|
|
||||||
collection := & FossilCollection {
|
collection := &FossilCollection{
|
||||||
EndTime: now - day - 3600,
|
EndTime: now - day - 3600,
|
||||||
LastRevisions: make(map[string] int),
|
LastRevisions: make(map[string]int),
|
||||||
}
|
}
|
||||||
|
|
||||||
collection.LastRevisions["host1"] = 1
|
collection.LastRevisions["host1"] = 1
|
||||||
collection.LastRevisions["host2"] = 1
|
collection.LastRevisions["host2"] = 1
|
||||||
|
|
||||||
isDeletable, newSnapshots := collection.IsDeletable(true, nil, allSnapshots)
|
isDeletable, newSnapshots := collection.IsDeletable(true, nil, allSnapshots)
|
||||||
if !isDeletable || len(newSnapshots) != 2 {
|
if !isDeletable || len(newSnapshots) != 2 {
|
||||||
t.Errorf("Scenario 1: should be deletable, 2 new snapshots")
|
t.Errorf("Scenario 1: should be deletable, 2 new snapshots")
|
||||||
}
|
}
|
||||||
|
|
||||||
collection.LastRevisions["host3"] = 1
|
collection.LastRevisions["host3"] = 1
|
||||||
allSnapshots["host3"] = append([]*Snapshot{}, createDummySnapshot("host3", 1, now - 2 * day))
|
allSnapshots["host3"] = append([]*Snapshot{}, createDummySnapshot("host3", 1, now-2*day))
|
||||||
|
|
||||||
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
if isDeletable {
|
if isDeletable {
|
||||||
t.Errorf("Scenario 2: should not be deletable")
|
t.Errorf("Scenario 2: should not be deletable")
|
||||||
}
|
}
|
||||||
|
|
||||||
allSnapshots["host3"] = append(allSnapshots["host3"], createDummySnapshot("host3", 2, now - day))
|
allSnapshots["host3"] = append(allSnapshots["host3"], createDummySnapshot("host3", 2, now-day))
|
||||||
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
if !isDeletable || len(newSnapshots) != 3 {
|
if !isDeletable || len(newSnapshots) != 3 {
|
||||||
t.Errorf("Scenario 3: should be deletable, 3 new snapshots")
|
t.Errorf("Scenario 3: should be deletable, 3 new snapshots")
|
||||||
}
|
}
|
||||||
|
|
||||||
collection.LastRevisions["host4"] = 1
|
collection.LastRevisions["host4"] = 1
|
||||||
allSnapshots["host4"] = append([]*Snapshot{}, createDummySnapshot("host4", 1, now - 8 * day))
|
allSnapshots["host4"] = append([]*Snapshot{}, createDummySnapshot("host4", 1, now-8*day))
|
||||||
|
|
||||||
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
if !isDeletable || len(newSnapshots) != 3 {
|
if !isDeletable || len(newSnapshots) != 3 {
|
||||||
t.Errorf("Scenario 4: should be deletable, 3 new snapshots")
|
t.Errorf("Scenario 4: should be deletable, 3 new snapshots")
|
||||||
}
|
}
|
||||||
|
|
||||||
collection.LastRevisions["repository1@host5"] = 1
|
collection.LastRevisions["repository1@host5"] = 1
|
||||||
allSnapshots["repository1@host5"] = append([]*Snapshot{}, createDummySnapshot("repository1@host5", 1, now - 3 * day))
|
allSnapshots["repository1@host5"] = append([]*Snapshot{}, createDummySnapshot("repository1@host5", 1, now-3*day))
|
||||||
|
|
||||||
collection.LastRevisions["repository2@host5"] = 1
|
collection.LastRevisions["repository2@host5"] = 1
|
||||||
allSnapshots["repository2@host5"] = append([]*Snapshot{}, createDummySnapshot("repository2@host5", 1, now - 2 * day))
|
allSnapshots["repository2@host5"] = append([]*Snapshot{}, createDummySnapshot("repository2@host5", 1, now-2*day))
|
||||||
|
|
||||||
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
if isDeletable {
|
if isDeletable {
|
||||||
t.Errorf("Scenario 5: should not be deletable")
|
t.Errorf("Scenario 5: should not be deletable")
|
||||||
}
|
}
|
||||||
|
|
||||||
allSnapshots["repository1@host5"] = append(allSnapshots["repository1@host5"], createDummySnapshot("repository1@host5", 2, now - day))
|
allSnapshots["repository1@host5"] = append(allSnapshots["repository1@host5"], createDummySnapshot("repository1@host5", 2, now-day))
|
||||||
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
isDeletable, newSnapshots = collection.IsDeletable(true, nil, allSnapshots)
|
||||||
if !isDeletable || len(newSnapshots) != 4 {
|
if !isDeletable || len(newSnapshots) != 4 {
|
||||||
t.Errorf("Scenario 6: should be deletable, 4 new snapshots")
|
t.Errorf("Scenario 6: should be deletable, 4 new snapshots")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createTestSnapshotManager(testDir string) *SnapshotManager {
|
func createTestSnapshotManager(testDir string) *SnapshotManager {
|
||||||
|
|
||||||
os.RemoveAll(testDir)
|
os.RemoveAll(testDir)
|
||||||
os.MkdirAll(testDir, 0700)
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
storage, _ := CreateFileStorage(testDir, 2, false, 1)
|
storage, _ := CreateFileStorage(testDir, 2, false, 1)
|
||||||
storage.CreateDirectory(0, "chunks")
|
storage.CreateDirectory(0, "chunks")
|
||||||
storage.CreateDirectory(0, "snapshots")
|
storage.CreateDirectory(0, "snapshots")
|
||||||
config := CreateConfig()
|
config := CreateConfig()
|
||||||
snapshotManager := CreateSnapshotManager(config, storage)
|
snapshotManager := CreateSnapshotManager(config, storage)
|
||||||
|
|
||||||
cacheDir := path.Join(testDir, "cache")
|
cacheDir := path.Join(testDir, "cache")
|
||||||
snapshotCache, _ := CreateFileStorage(cacheDir, 2, false, 1)
|
snapshotCache, _ := CreateFileStorage(cacheDir, 2, false, 1)
|
||||||
snapshotCache.CreateDirectory(0, "chunks")
|
snapshotCache.CreateDirectory(0, "chunks")
|
||||||
snapshotCache.CreateDirectory(0, "snapshots")
|
snapshotCache.CreateDirectory(0, "snapshots")
|
||||||
|
|
||||||
snapshotManager.snapshotCache = snapshotCache
|
snapshotManager.snapshotCache = snapshotCache
|
||||||
return snapshotManager
|
return snapshotManager
|
||||||
}
|
}
|
||||||
|
|
||||||
func uploadTestChunk(manager *SnapshotManager, content []byte) string {
|
func uploadTestChunk(manager *SnapshotManager, content []byte) string {
|
||||||
|
|
||||||
completionFunc := func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
|
completionFunc := func(chunk *Chunk, chunkIndex int, skipped bool, chunkSize int, uploadSize int) {
|
||||||
LOG_INFO("UPLOAD_CHUNK", "Chunk %s size %d uploaded", chunk.GetID(), chunkSize)
|
LOG_INFO("UPLOAD_CHUNK", "Chunk %s size %d uploaded", chunk.GetID(), chunkSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, testThreads, nil)
|
chunkUploader := CreateChunkUploader(manager.config, manager.storage, nil, testThreads, nil)
|
||||||
chunkUploader.completionFunc = completionFunc
|
chunkUploader.completionFunc = completionFunc
|
||||||
chunkUploader.Start()
|
chunkUploader.Start()
|
||||||
|
|
||||||
chunk := CreateChunk(manager.config, true)
|
chunk := CreateChunk(manager.config, true)
|
||||||
chunk.Reset(true)
|
chunk.Reset(true)
|
||||||
chunk.Write(content)
|
chunk.Write(content)
|
||||||
chunkUploader.StartChunk(chunk, 0)
|
chunkUploader.StartChunk(chunk, 0)
|
||||||
chunkUploader.Stop()
|
chunkUploader.Stop()
|
||||||
|
|
||||||
return chunk.GetHash()
|
return chunk.GetHash()
|
||||||
}
|
}
|
||||||
|
|
||||||
func uploadRandomChunk(manager *SnapshotManager, chunkSize int) string {
|
func uploadRandomChunk(manager *SnapshotManager, chunkSize int) string {
|
||||||
content := make([]byte, chunkSize)
|
content := make([]byte, chunkSize)
|
||||||
_, err := rand.Read(content)
|
_, err := rand.Read(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("UPLOAD_RANDOM", "Error generating random content: %v", err)
|
LOG_ERROR("UPLOAD_RANDOM", "Error generating random content: %v", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
return uploadTestChunk(manager, content)
|
return uploadTestChunk(manager, content)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision int, startTime int64, endTime int64, chunkHashes []string) {
|
func createTestSnapshot(manager *SnapshotManager, snapshotID string, revision int, startTime int64, endTime int64, chunkHashes []string) {
|
||||||
|
|
||||||
snapshot := &Snapshot {
|
snapshot := &Snapshot{
|
||||||
ID: snapshotID,
|
ID: snapshotID,
|
||||||
Revision: revision,
|
Revision: revision,
|
||||||
StartTime: startTime,
|
StartTime: startTime,
|
||||||
EndTime: endTime,
|
EndTime: endTime,
|
||||||
ChunkHashes: chunkHashes,
|
ChunkHashes: chunkHashes,
|
||||||
}
|
}
|
||||||
|
|
||||||
var chunkHashesInHex [] string
|
var chunkHashesInHex []string
|
||||||
for _, chunkHash := range chunkHashes {
|
for _, chunkHash := range chunkHashes {
|
||||||
chunkHashesInHex = append(chunkHashesInHex, hex.EncodeToString([]byte(chunkHash)))
|
chunkHashesInHex = append(chunkHashesInHex, hex.EncodeToString([]byte(chunkHash)))
|
||||||
}
|
}
|
||||||
|
|
||||||
sequence, _ := json.Marshal(chunkHashesInHex)
|
sequence, _ := json.Marshal(chunkHashesInHex)
|
||||||
snapshot.ChunkSequence = []string { uploadTestChunk(manager, sequence) }
|
snapshot.ChunkSequence = []string{uploadTestChunk(manager, sequence)}
|
||||||
|
|
||||||
description, _ := snapshot.MarshalJSON()
|
description, _ := snapshot.MarshalJSON()
|
||||||
path := fmt.Sprintf("snapshots/%s/%d", snapshotID, snapshot.Revision)
|
path := fmt.Sprintf("snapshots/%s/%d", snapshotID, snapshot.Revision)
|
||||||
manager.storage.CreateDirectory(0, "snapshots/" + snapshotID)
|
manager.storage.CreateDirectory(0, "snapshots/"+snapshotID)
|
||||||
manager.UploadFile(path, path, description)
|
manager.UploadFile(path, path, description)
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkTestSnapshots(manager *SnapshotManager, expectedSnapshots int, expectedFossils int) {
|
func checkTestSnapshots(manager *SnapshotManager, expectedSnapshots int, expectedFossils int) {
|
||||||
|
|
||||||
var snapshotIDs [] string
|
var snapshotIDs []string
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
chunks := make(map[string]bool)
|
chunks := make(map[string]bool)
|
||||||
files, _ := manager.ListAllFiles(manager.storage, "chunks/")
|
files, _ := manager.ListAllFiles(manager.storage, "chunks/")
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if file[len(file) - 1] == '/' {
|
if file[len(file)-1] == '/' {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
chunk := strings.Replace(file, "/", "", -1)
|
chunk := strings.Replace(file, "/", "", -1)
|
||||||
chunks[chunk] = false
|
chunks[chunk] = false
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshotIDs, err = manager.ListSnapshotIDs()
|
snapshotIDs, err = manager.ListSnapshotIDs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all snapshots: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
numberOfSnapshots := 0
|
numberOfSnapshots := 0
|
||||||
|
|
||||||
for _, snapshotID := range snapshotIDs {
|
for _, snapshotID := range snapshotIDs {
|
||||||
|
|
||||||
revisions, err := manager.ListSnapshotRevisions(snapshotID)
|
revisions, err := manager.ListSnapshotRevisions(snapshotID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", snapshotID, err)
|
LOG_ERROR("SNAPSHOT_LIST", "Failed to list all revisions for snapshot %s: %v", snapshotID, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, revision := range revisions {
|
for _, revision := range revisions {
|
||||||
snapshot := manager.DownloadSnapshot(snapshotID, revision)
|
snapshot := manager.DownloadSnapshot(snapshotID, revision)
|
||||||
numberOfSnapshots++
|
numberOfSnapshots++
|
||||||
|
|
||||||
for _, chunk := range manager.GetSnapshotChunks(snapshot) {
|
for _, chunk := range manager.GetSnapshotChunks(snapshot) {
|
||||||
chunks[chunk] = true
|
chunks[chunk] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
numberOfFossils := 0
|
numberOfFossils := 0
|
||||||
for chunk, referenced := range chunks {
|
for chunk, referenced := range chunks {
|
||||||
if !referenced {
|
if !referenced {
|
||||||
LOG_INFO("UNREFERENCED_CHUNK", "Unreferenced chunk %s", chunk)
|
LOG_INFO("UNREFERENCED_CHUNK", "Unreferenced chunk %s", chunk)
|
||||||
numberOfFossils++
|
numberOfFossils++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if numberOfSnapshots != expectedSnapshots {
|
if numberOfSnapshots != expectedSnapshots {
|
||||||
LOG_ERROR("SNAPSHOT_COUNT", "Expecting %d snapshots, got %d instead", expectedSnapshots, numberOfSnapshots)
|
LOG_ERROR("SNAPSHOT_COUNT", "Expecting %d snapshots, got %d instead", expectedSnapshots, numberOfSnapshots)
|
||||||
}
|
}
|
||||||
|
|
||||||
if numberOfFossils != expectedFossils {
|
if numberOfFossils != expectedFossils {
|
||||||
LOG_ERROR("FOSSIL_COUNT", "Expecting %d unreferenced chunks, got %d instead", expectedFossils, numberOfFossils)
|
LOG_ERROR("FOSSIL_COUNT", "Expecting %d unreferenced chunks, got %d instead", expectedFossils, numberOfFossils)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSingleRepositoryPrune(t *testing.T) {
|
func TestSingleRepositoryPrune(t *testing.T) {
|
||||||
|
|
||||||
setTestingT(t)
|
setTestingT(t)
|
||||||
|
|
||||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
snapshotManager := createTestSnapshotManager(testDir)
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
chunkSize := 1024
|
chunkSize := 1024
|
||||||
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
|
||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
day := int64(24 * 3600)
|
day := int64(24 * 3600)
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
createTestSnapshot(snapshotManager, "repository1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2 })
|
createTestSnapshot(snapshotManager, "repository1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||||
checkTestSnapshots(snapshotManager, 1, 2)
|
checkTestSnapshots(snapshotManager, 1, 2)
|
||||||
|
|
||||||
t.Logf("Creating 2 snapshots")
|
t.Logf("Creating 2 snapshots")
|
||||||
createTestSnapshot(snapshotManager, "repository1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3 })
|
createTestSnapshot(snapshotManager, "repository1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||||
createTestSnapshot(snapshotManager, "repository1", 3, now - 1 * day - 3600, now - 1 * day - 60, []string { chunkHash3, chunkHash4 })
|
createTestSnapshot(snapshotManager, "repository1", 3, now-1*day-3600, now-1*day-60, []string{chunkHash3, chunkHash4})
|
||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot repository1 revision 1 with --exclusive")
|
t.Logf("Removing snapshot repository1 revision 1 with --exclusive")
|
||||||
snapshotManager.PruneSnapshots("repository1", "repository1", []int{1}, []string{}, []string{}, false, true, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("repository1", "repository1", []int{1}, []string{}, []string{}, false, true, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 0)
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot repository1 revision 2 without --exclusive")
|
t.Logf("Removing snapshot repository1 revision 2 without --exclusive")
|
||||||
snapshotManager.PruneSnapshots("repository1", "repository1", []int{2}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("repository1", "repository1", []int{2}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 1, 2)
|
checkTestSnapshots(snapshotManager, 1, 2)
|
||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
createTestSnapshot(snapshotManager, "repository1", 4, now + 1 * day - 3600 , now + 1 * day, []string { chunkHash4, chunkHash5 })
|
createTestSnapshot(snapshotManager, "repository1", 4, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots("repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("repository1", "repository1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 0)
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSingleHostPrune(t *testing.T) {
|
func TestSingleHostPrune(t *testing.T) {
|
||||||
|
|
||||||
setTestingT(t)
|
setTestingT(t)
|
||||||
|
|
||||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
snapshotManager := createTestSnapshotManager(testDir)
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
chunkSize := 1024
|
chunkSize := 1024
|
||||||
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
|
||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
day := int64(24 * 3600)
|
day := int64(24 * 3600)
|
||||||
t.Logf("Creating 3 snapshots")
|
t.Logf("Creating 3 snapshots")
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2 })
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3 })
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||||
createTestSnapshot(snapshotManager, "vm2@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash3, chunkHash4 })
|
createTestSnapshot(snapshotManager, "vm2@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4})
|
||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
createTestSnapshot(snapshotManager, "vm2@host1", 2, now + 1 * day - 3600 , now + 1 * day, []string { chunkHash4, chunkHash5 })
|
createTestSnapshot(snapshotManager, "vm2@host1", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
|
||||||
checkTestSnapshots(snapshotManager, 3, 2)
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMultipleHostPrune(t *testing.T) {
|
func TestMultipleHostPrune(t *testing.T) {
|
||||||
|
|
||||||
setTestingT(t)
|
setTestingT(t)
|
||||||
|
|
||||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
snapshotManager := createTestSnapshotManager(testDir)
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
chunkSize := 1024
|
chunkSize := 1024
|
||||||
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
|
||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
day := int64(24 * 3600)
|
day := int64(24 * 3600)
|
||||||
t.Logf("Creating 3 snapshot")
|
t.Logf("Creating 3 snapshot")
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2 })
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3 })
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||||
createTestSnapshot(snapshotManager, "vm2@host2", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash3, chunkHash4 })
|
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-3*day-3600, now-3*day-60, []string{chunkHash3, chunkHash4})
|
||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
createTestSnapshot(snapshotManager, "vm2@host2", 2, now + 1 * day - 3600 , now + 1 * day, []string {chunkHash4, chunkHash5})
|
createTestSnapshot(snapshotManager, "vm2@host2", 2, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
|
||||||
checkTestSnapshots(snapshotManager, 3, 2)
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 3, 2)
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
chunkHash6 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash6 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 3, now + 1 * day - 3600 , now + 1 * day, []string {chunkHash5, chunkHash6})
|
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash5, chunkHash6})
|
||||||
checkTestSnapshots(snapshotManager, 4, 2)
|
checkTestSnapshots(snapshotManager, 4, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 4, 0)
|
checkTestSnapshots(snapshotManager, 4, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestPruneAndResurrect(t *testing.T) {
|
func TestPruneAndResurrect(t *testing.T) {
|
||||||
|
|
||||||
setTestingT(t)
|
setTestingT(t)
|
||||||
|
|
||||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
snapshotManager := createTestSnapshotManager(testDir)
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
chunkSize := 1024
|
chunkSize := 1024
|
||||||
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
|
||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
day := int64(24 * 3600)
|
day := int64(24 * 3600)
|
||||||
t.Logf("Creating 2 snapshots")
|
t.Logf("Creating 2 snapshots")
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2})
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3})
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||||
checkTestSnapshots(snapshotManager, 2, 0)
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
t.Logf("Removing snapshot vm1@host1 revision 1 without --exclusive")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 1, 2)
|
checkTestSnapshots(snapshotManager, 1, 2)
|
||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 4, now + 1 * day - 3600 , now + 1 * day, []string { chunkHash4, chunkHash1})
|
createTestSnapshot(snapshotManager, "vm1@host1", 4, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash1})
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- one fossil will be resurrected")
|
t.Logf("Prune without removing any snapshots -- one fossil will be resurrected")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 0)
|
checkTestSnapshots(snapshotManager, 2, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestInactiveHostPrune(t *testing.T) {
|
func TestInactiveHostPrune(t *testing.T) {
|
||||||
|
|
||||||
setTestingT(t)
|
setTestingT(t)
|
||||||
|
|
||||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
snapshotManager := createTestSnapshotManager(testDir)
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
chunkSize := 1024
|
chunkSize := 1024
|
||||||
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash1 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash2 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash3 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash4 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
|
|
||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
day := int64(24 * 3600)
|
day := int64(24 * 3600)
|
||||||
t.Logf("Creating 3 snapshot")
|
t.Logf("Creating 3 snapshot")
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 1, now - 3 * day - 3600, now - 3 * day - 60, []string { chunkHash1, chunkHash2} )
|
createTestSnapshot(snapshotManager, "vm1@host1", 1, now-3*day-3600, now-3*day-60, []string{chunkHash1, chunkHash2})
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 2, now - 2 * day - 3600, now - 2 * day - 60, []string { chunkHash2, chunkHash3} )
|
createTestSnapshot(snapshotManager, "vm1@host1", 2, now-2*day-3600, now-2*day-60, []string{chunkHash2, chunkHash3})
|
||||||
// Host2 is inactive
|
// Host2 is inactive
|
||||||
createTestSnapshot(snapshotManager, "vm2@host2", 1, now - 7 * day - 3600, now - 7 * day - 60, []string { chunkHash3, chunkHash4} )
|
createTestSnapshot(snapshotManager, "vm2@host2", 1, now-7*day-3600, now-7*day-60, []string{chunkHash3, chunkHash4})
|
||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 revision 1")
|
t.Logf("Removing snapshot vm1@host1 revision 1")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{1}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- no fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 2, 2)
|
checkTestSnapshots(snapshotManager, 2, 2)
|
||||||
|
|
||||||
t.Logf("Creating 1 snapshot")
|
t.Logf("Creating 1 snapshot")
|
||||||
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
chunkHash5 := uploadRandomChunk(snapshotManager, chunkSize)
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", 3, now + 1 * day - 3600 , now + 1 * day, []string { chunkHash4, chunkHash5} )
|
createTestSnapshot(snapshotManager, "vm1@host1", 3, now+1*day-3600, now+1*day, []string{chunkHash4, chunkHash5})
|
||||||
checkTestSnapshots(snapshotManager, 3, 2)
|
checkTestSnapshots(snapshotManager, 3, 2)
|
||||||
|
|
||||||
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
t.Logf("Prune without removing any snapshots -- fossils will be deleted")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{}, false, false, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 3, 0)
|
checkTestSnapshots(snapshotManager, 3, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRetentionPolicy(t *testing.T) {
|
func TestRetentionPolicy(t *testing.T) {
|
||||||
|
|
||||||
setTestingT(t)
|
setTestingT(t)
|
||||||
|
|
||||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "snapshot_test")
|
||||||
|
|
||||||
snapshotManager := createTestSnapshotManager(testDir)
|
snapshotManager := createTestSnapshotManager(testDir)
|
||||||
|
|
||||||
chunkSize := 1024
|
chunkSize := 1024
|
||||||
var chunkHashes [] string
|
var chunkHashes []string
|
||||||
for i := 0; i < 30; i++ {
|
for i := 0; i < 30; i++ {
|
||||||
chunkHashes = append(chunkHashes, uploadRandomChunk(snapshotManager, chunkSize))
|
chunkHashes = append(chunkHashes, uploadRandomChunk(snapshotManager, chunkSize))
|
||||||
}
|
}
|
||||||
|
|
||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
day := int64(24 * 3600)
|
day := int64(24 * 3600)
|
||||||
t.Logf("Creating 30 snapshots")
|
t.Logf("Creating 30 snapshots")
|
||||||
for i := 0; i < 30; i++ {
|
for i := 0; i < 30; i++ {
|
||||||
createTestSnapshot(snapshotManager, "vm1@host1", i + 1, now - int64(30 - i) * day - 3600, now - int64(30 - i) * day - 60, []string { chunkHashes[i] })
|
createTestSnapshot(snapshotManager, "vm1@host1", i+1, now-int64(30-i)*day-3600, now-int64(30-i)*day-60, []string{chunkHashes[i]})
|
||||||
}
|
}
|
||||||
|
|
||||||
checkTestSnapshots(snapshotManager, 30, 0)
|
checkTestSnapshots(snapshotManager, 30, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive")
|
t.Logf("Removing snapshot vm1@host1 0:20 with --exclusive")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 19, 0)
|
checkTestSnapshots(snapshotManager, 19, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 -k 0:20 with --exclusive")
|
t.Logf("Removing snapshot vm1@host1 -k 0:20 with --exclusive")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"0:20"}, false, true, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 19, 0)
|
checkTestSnapshots(snapshotManager, 19, 0)
|
||||||
|
|
||||||
t.Logf("Removing snapshot vm1@host1 -k 3:14 -k 2:7 with --exclusive")
|
t.Logf("Removing snapshot vm1@host1 -k 3:14 -k 2:7 with --exclusive")
|
||||||
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false)
|
snapshotManager.PruneSnapshots("vm1@host1", "vm1@host1", []int{}, []string{}, []string{"3:14", "2:7"}, false, true, []string{}, false, false, false)
|
||||||
checkTestSnapshots(snapshotManager, 12, 0)
|
checkTestSnapshots(snapshotManager, 12, 0)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,474 +5,473 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
"io/ioutil"
|
||||||
"strings"
|
"net"
|
||||||
"strconv"
|
"os"
|
||||||
"os"
|
"path"
|
||||||
"net"
|
"regexp"
|
||||||
"path"
|
"runtime"
|
||||||
"io/ioutil"
|
"strconv"
|
||||||
"runtime"
|
"strings"
|
||||||
|
|
||||||
"golang.org/x/crypto/ssh"
|
"golang.org/x/crypto/ssh"
|
||||||
"golang.org/x/crypto/ssh/agent"
|
"golang.org/x/crypto/ssh/agent"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Storage interface {
|
type Storage interface {
|
||||||
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
// ListFiles return the list of files and subdirectories under 'dir' (non-recursively)
|
||||||
ListFiles(threadIndex int, dir string) (files []string, size []int64, err error)
|
ListFiles(threadIndex int, dir string) (files []string, size []int64, err error)
|
||||||
|
|
||||||
// DeleteFile deletes the file or directory at 'filePath'.
|
// DeleteFile deletes the file or directory at 'filePath'.
|
||||||
DeleteFile(threadIndex int, filePath string) (err error)
|
DeleteFile(threadIndex int, filePath string) (err error)
|
||||||
|
|
||||||
// MoveFile renames the file.
|
// MoveFile renames the file.
|
||||||
MoveFile(threadIndex int, from string, to string) (err error)
|
MoveFile(threadIndex int, from string, to string) (err error)
|
||||||
|
|
||||||
// CreateDirectory creates a new directory.
|
// CreateDirectory creates a new directory.
|
||||||
CreateDirectory(threadIndex int, dir string) (err error)
|
CreateDirectory(threadIndex int, dir string) (err error)
|
||||||
|
|
||||||
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
// GetFileInfo returns the information about the file or directory at 'filePath'.
|
||||||
GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error)
|
GetFileInfo(threadIndex int, filePath string) (exist bool, isDir bool, size int64, err error)
|
||||||
|
|
||||||
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
// FindChunk finds the chunk with the specified id. If 'isFossil' is true, it will search for chunk files with
|
||||||
// the suffix '.fsl'.
|
// the suffix '.fsl'.
|
||||||
FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error)
|
FindChunk(threadIndex int, chunkID string, isFossil bool) (filePath string, exist bool, size int64, err error)
|
||||||
|
|
||||||
// DownloadFile reads the file at 'filePath' into the chunk.
|
// DownloadFile reads the file at 'filePath' into the chunk.
|
||||||
DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error)
|
DownloadFile(threadIndex int, filePath string, chunk *Chunk) (err error)
|
||||||
|
|
||||||
// UploadFile writes 'content' to the file at 'filePath'.
|
// UploadFile writes 'content' to the file at 'filePath'.
|
||||||
UploadFile(threadIndex int, filePath string, content []byte) (err error)
|
UploadFile(threadIndex int, filePath string, content []byte) (err error)
|
||||||
|
|
||||||
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
// If a local snapshot cache is needed for the storage to avoid downloading/uploading chunks too often when
|
||||||
// managing snapshots.
|
// managing snapshots.
|
||||||
IsCacheNeeded() (bool)
|
IsCacheNeeded() bool
|
||||||
|
|
||||||
// If the 'MoveFile' method is implemented.
|
// If the 'MoveFile' method is implemented.
|
||||||
IsMoveFileImplemented() (bool)
|
IsMoveFileImplemented() bool
|
||||||
|
|
||||||
// If the storage can guarantee strong consistency.
|
// If the storage can guarantee strong consistency.
|
||||||
IsStrongConsistent() (bool)
|
IsStrongConsistent() bool
|
||||||
|
|
||||||
// If the storage supports fast listing of files names.
|
// If the storage supports fast listing of files names.
|
||||||
IsFastListing() (bool)
|
IsFastListing() bool
|
||||||
|
|
||||||
// Enable the test mode.
|
// Enable the test mode.
|
||||||
EnableTestMode()
|
EnableTestMode()
|
||||||
|
|
||||||
// Set the maximum transfer speeds.
|
// Set the maximum transfer speeds.
|
||||||
SetRateLimits(downloadRateLimit int, uploadRateLimit int)
|
SetRateLimits(downloadRateLimit int, uploadRateLimit int)
|
||||||
}
|
}
|
||||||
|
|
||||||
type RateLimitedStorage struct {
|
type RateLimitedStorage struct {
|
||||||
DownloadRateLimit int
|
DownloadRateLimit int
|
||||||
UploadRateLimit int
|
UploadRateLimit int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (storage *RateLimitedStorage) SetRateLimits(downloadRateLimit int, uploadRateLimit int) {
|
func (storage *RateLimitedStorage) SetRateLimits(downloadRateLimit int, uploadRateLimit int) {
|
||||||
storage.DownloadRateLimit = downloadRateLimit
|
storage.DownloadRateLimit = downloadRateLimit
|
||||||
storage.UploadRateLimit = uploadRateLimit
|
storage.UploadRateLimit = uploadRateLimit
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkHostKey(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
func checkHostKey(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||||
|
|
||||||
preferencePath := GetDuplicacyPreferencePath()
|
|
||||||
hostFile := path.Join(preferencePath, "known_hosts")
|
|
||||||
file, err := os.OpenFile(hostFile, os.O_RDWR | os.O_CREATE, 0600)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer file.Close()
|
preferencePath := GetDuplicacyPreferencePath()
|
||||||
content, err := ioutil.ReadAll(file)
|
hostFile := path.Join(preferencePath, "known_hosts")
|
||||||
if err != nil {
|
file, err := os.OpenFile(hostFile, os.O_RDWR|os.O_CREATE, 0600)
|
||||||
return err
|
if err != nil {
|
||||||
}
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
lineRegex := regexp.MustCompile(`^([^\s]+)\s+(.+)`)
|
defer file.Close()
|
||||||
|
content, err := ioutil.ReadAll(file)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
keyString := string(ssh.MarshalAuthorizedKey(key))
|
lineRegex := regexp.MustCompile(`^([^\s]+)\s+(.+)`)
|
||||||
keyString = strings.Replace(keyString, "\n", "", -1)
|
|
||||||
remoteAddress := remote.String()
|
|
||||||
if strings.HasSuffix(remoteAddress, ":22") {
|
|
||||||
remoteAddress = remoteAddress[:len(remoteAddress) - len(":22")]
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, line := range strings.Split(string(content), "\n") {
|
keyString := string(ssh.MarshalAuthorizedKey(key))
|
||||||
matched := lineRegex.FindStringSubmatch(line)
|
keyString = strings.Replace(keyString, "\n", "", -1)
|
||||||
if matched == nil {
|
remoteAddress := remote.String()
|
||||||
continue
|
if strings.HasSuffix(remoteAddress, ":22") {
|
||||||
}
|
remoteAddress = remoteAddress[:len(remoteAddress)-len(":22")]
|
||||||
|
}
|
||||||
|
|
||||||
if matched[1] == remote.String() {
|
for i, line := range strings.Split(string(content), "\n") {
|
||||||
if keyString != matched[2] {
|
matched := lineRegex.FindStringSubmatch(line)
|
||||||
LOG_WARN("HOSTKEY_OLD", "The existing key for '%s' is %s (file %s, line %d)",
|
if matched == nil {
|
||||||
remote.String(), matched[2], hostFile, i)
|
continue
|
||||||
LOG_WARN("HOSTKEY_NEW", "The new key is '%s'", keyString)
|
}
|
||||||
return fmt.Errorf("The host key for '%s' has changed", remote.String())
|
|
||||||
} else {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
file.Write([]byte(remote.String() + " " + keyString + "\n"))
|
if matched[1] == remote.String() {
|
||||||
return nil
|
if keyString != matched[2] {
|
||||||
|
LOG_WARN("HOSTKEY_OLD", "The existing key for '%s' is %s (file %s, line %d)",
|
||||||
|
remote.String(), matched[2], hostFile, i)
|
||||||
|
LOG_WARN("HOSTKEY_NEW", "The new key is '%s'", keyString)
|
||||||
|
return fmt.Errorf("The host key for '%s' has changed", remote.String())
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
file.Write([]byte(remote.String() + " " + keyString + "\n"))
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateStorage creates a storage object based on the provide storage URL.
|
// CreateStorage creates a storage object based on the provide storage URL.
|
||||||
func CreateStorage(preference Preference, resetPassword bool, threads int) (storage Storage) {
|
func CreateStorage(preference Preference, resetPassword bool, threads int) (storage Storage) {
|
||||||
|
|
||||||
storageURL := preference.StorageURL
|
storageURL := preference.StorageURL
|
||||||
|
|
||||||
isFileStorage := false
|
isFileStorage := false
|
||||||
isCacheNeeded := false
|
isCacheNeeded := false
|
||||||
|
|
||||||
if strings.HasPrefix(storageURL, "/") {
|
if strings.HasPrefix(storageURL, "/") {
|
||||||
isFileStorage = true
|
isFileStorage = true
|
||||||
} else if runtime.GOOS == "windows" {
|
} else if runtime.GOOS == "windows" {
|
||||||
if len(storageURL) >= 3 && storageURL[1] == ':' && (storageURL[2] == '/' || storageURL[2] == '\\') {
|
if len(storageURL) >= 3 && storageURL[1] == ':' && (storageURL[2] == '/' || storageURL[2] == '\\') {
|
||||||
volume := strings.ToLower(storageURL[:1])
|
volume := strings.ToLower(storageURL[:1])
|
||||||
if volume[0] >= 'a' && volume[0] <= 'z' {
|
if volume[0] >= 'a' && volume[0] <= 'z' {
|
||||||
isFileStorage = true
|
isFileStorage = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !isFileStorage && strings.HasPrefix(storageURL, `\\`) {
|
if !isFileStorage && strings.HasPrefix(storageURL, `\\`) {
|
||||||
isFileStorage = true
|
isFileStorage = true
|
||||||
isCacheNeeded = true
|
isCacheNeeded = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if isFileStorage {
|
if isFileStorage {
|
||||||
fileStorage, err := CreateFileStorage(storageURL, 2, isCacheNeeded, threads)
|
fileStorage, err := CreateFileStorage(storageURL, 2, isCacheNeeded, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fileStorage
|
return fileStorage
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(storageURL, "flat://") {
|
if strings.HasPrefix(storageURL, "flat://") {
|
||||||
fileStorage, err := CreateFileStorage(storageURL[7:], 0, false, threads)
|
fileStorage, err := CreateFileStorage(storageURL[7:], 0, false, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fileStorage
|
return fileStorage
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(storageURL, "samba://") {
|
if strings.HasPrefix(storageURL, "samba://") {
|
||||||
fileStorage, err := CreateFileStorage(storageURL[8:], 2, true, threads)
|
fileStorage, err := CreateFileStorage(storageURL[8:], 2, true, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the file storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fileStorage
|
return fileStorage
|
||||||
}
|
}
|
||||||
|
|
||||||
urlRegex := regexp.MustCompile(`^(\w+)://([\w\-]+@)?([^/]+)(/(.+))?`)
|
urlRegex := regexp.MustCompile(`^(\w+)://([\w\-]+@)?([^/]+)(/(.+))?`)
|
||||||
|
|
||||||
matched := urlRegex.FindStringSubmatch(storageURL)
|
matched := urlRegex.FindStringSubmatch(storageURL)
|
||||||
|
|
||||||
if matched == nil {
|
if matched == nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Unrecognizable storage URL: %s", storageURL)
|
LOG_ERROR("STORAGE_CREATE", "Unrecognizable storage URL: %s", storageURL)
|
||||||
return nil
|
return nil
|
||||||
} else if matched[1] == "sftp" {
|
} else if matched[1] == "sftp" {
|
||||||
server := matched[3]
|
server := matched[3]
|
||||||
username := matched[2]
|
username := matched[2]
|
||||||
storageDir := matched[5]
|
storageDir := matched[5]
|
||||||
port := 22
|
port := 22
|
||||||
|
|
||||||
if strings.Contains(server, ":") {
|
if strings.Contains(server, ":") {
|
||||||
index := strings.Index(server, ":")
|
index := strings.Index(server, ":")
|
||||||
port, _ = strconv.Atoi(server[index + 1:])
|
port, _ = strconv.Atoi(server[index+1:])
|
||||||
server = server[:index]
|
server = server[:index]
|
||||||
}
|
}
|
||||||
|
|
||||||
if storageDir == "" {
|
if storageDir == "" {
|
||||||
LOG_ERROR("STORAGE_CREATE", "The SFTP storage directory can't be empty")
|
LOG_ERROR("STORAGE_CREATE", "The SFTP storage directory can't be empty")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if username != "" {
|
if username != "" {
|
||||||
username = username[:len(username) - 1]
|
username = username[:len(username)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
// If ssh_key_file is set, skip password-based login
|
// If ssh_key_file is set, skip password-based login
|
||||||
keyFile := GetPasswordFromPreference(preference, "ssh_key_file")
|
keyFile := GetPasswordFromPreference(preference, "ssh_key_file")
|
||||||
|
|
||||||
password := ""
|
password := ""
|
||||||
passwordCallback := func() (string, error) {
|
passwordCallback := func() (string, error) {
|
||||||
LOG_DEBUG("SSH_PASSWORD", "Attempting password login")
|
LOG_DEBUG("SSH_PASSWORD", "Attempting password login")
|
||||||
password = GetPassword(preference, "ssh_password", "Enter SSH password:", false, resetPassword)
|
password = GetPassword(preference, "ssh_password", "Enter SSH password:", false, resetPassword)
|
||||||
return password, nil
|
return password, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
keyboardInteractive := func (user, instruction string, questions []string, echos []bool) (answers []string,
|
keyboardInteractive := func(user, instruction string, questions []string, echos []bool) (answers []string,
|
||||||
err error) {
|
err error) {
|
||||||
if len(questions) == 1 {
|
if len(questions) == 1 {
|
||||||
LOG_DEBUG("SSH_INTERACTIVE", "Attempting keyboard interactive login")
|
LOG_DEBUG("SSH_INTERACTIVE", "Attempting keyboard interactive login")
|
||||||
password = GetPassword(preference, "ssh_password", "Enter SSH password:", false, resetPassword)
|
password = GetPassword(preference, "ssh_password", "Enter SSH password:", false, resetPassword)
|
||||||
answers = []string { password }
|
answers = []string{password}
|
||||||
return answers, nil
|
return answers, nil
|
||||||
} else {
|
} else {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
publicKeysCallback := func() ([]ssh.Signer, error) {
|
publicKeysCallback := func() ([]ssh.Signer, error) {
|
||||||
LOG_DEBUG("SSH_PUBLICKEY", "Attempting public key authentication")
|
LOG_DEBUG("SSH_PUBLICKEY", "Attempting public key authentication")
|
||||||
|
|
||||||
signers := []ssh.Signer {}
|
signers := []ssh.Signer{}
|
||||||
|
|
||||||
agentSock := os.Getenv("SSH_AUTH_SOCK")
|
agentSock := os.Getenv("SSH_AUTH_SOCK")
|
||||||
if agentSock != "" {
|
if agentSock != "" {
|
||||||
connection, err := net.Dial("unix", agentSock)
|
connection, err := net.Dial("unix", agentSock)
|
||||||
// TODO: looks like we need to close the connection
|
// TODO: looks like we need to close the connection
|
||||||
if err == nil {
|
if err == nil {
|
||||||
LOG_DEBUG("SSH_AGENT", "Attempting public key authentication via agent")
|
LOG_DEBUG("SSH_AGENT", "Attempting public key authentication via agent")
|
||||||
sshAgent := agent.NewClient(connection)
|
sshAgent := agent.NewClient(connection)
|
||||||
signers, err = sshAgent.Signers()
|
signers, err = sshAgent.Signers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_DEBUG("SSH_AGENT", "Can't log in using public key authentication via agent: %v", err)
|
LOG_DEBUG("SSH_AGENT", "Can't log in using public key authentication via agent: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
keyFile = GetPassword(preference, "ssh_key_file", "Enter the path of the private key file:",
|
keyFile = GetPassword(preference, "ssh_key_file", "Enter the path of the private key file:",
|
||||||
true, resetPassword)
|
true, resetPassword)
|
||||||
|
|
||||||
var key ssh.Signer
|
var key ssh.Signer
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if keyFile == "" {
|
if keyFile == "" {
|
||||||
LOG_INFO("SSH_PUBLICKEY", "No private key file is provided")
|
LOG_INFO("SSH_PUBLICKEY", "No private key file is provided")
|
||||||
} else {
|
} else {
|
||||||
var content []byte
|
var content []byte
|
||||||
content, err = ioutil.ReadFile(keyFile)
|
content, err = ioutil.ReadFile(keyFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_INFO("SSH_PUBLICKEY", "Failed to read the private key file: %v", err)
|
LOG_INFO("SSH_PUBLICKEY", "Failed to read the private key file: %v", err)
|
||||||
} else {
|
} else {
|
||||||
key, err = ssh.ParsePrivateKey(content)
|
key, err = ssh.ParsePrivateKey(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the private key file %s: %v", keyFile, err)
|
LOG_INFO("SSH_PUBLICKEY", "Failed to parse the private key file %s: %v", keyFile, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if key != nil {
|
if key != nil {
|
||||||
signers = append(signers, key)
|
signers = append(signers, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(signers) > 0 {
|
if len(signers) > 0 {
|
||||||
return signers, nil
|
return signers, nil
|
||||||
} else {
|
} else {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
authMethods := [] ssh.AuthMethod {
|
authMethods := []ssh.AuthMethod{}
|
||||||
}
|
passwordAuthMethods := []ssh.AuthMethod{
|
||||||
passwordAuthMethods := [] ssh.AuthMethod {
|
ssh.PasswordCallback(passwordCallback),
|
||||||
ssh.PasswordCallback(passwordCallback),
|
ssh.KeyboardInteractive(keyboardInteractive),
|
||||||
ssh.KeyboardInteractive(keyboardInteractive),
|
}
|
||||||
}
|
keyFileAuthMethods := []ssh.AuthMethod{
|
||||||
keyFileAuthMethods := [] ssh.AuthMethod {
|
ssh.PublicKeysCallback(publicKeysCallback),
|
||||||
ssh.PublicKeysCallback(publicKeysCallback),
|
}
|
||||||
}
|
if keyFile != "" {
|
||||||
if keyFile != "" {
|
authMethods = append(keyFileAuthMethods, passwordAuthMethods...)
|
||||||
authMethods = append(keyFileAuthMethods, passwordAuthMethods...)
|
} else {
|
||||||
} else {
|
authMethods = append(passwordAuthMethods, keyFileAuthMethods...)
|
||||||
authMethods = append(passwordAuthMethods, keyFileAuthMethods...)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if RunInBackground {
|
if RunInBackground {
|
||||||
|
|
||||||
passwordKey := "ssh_password"
|
passwordKey := "ssh_password"
|
||||||
keyFileKey := "ssh_key_file"
|
keyFileKey := "ssh_key_file"
|
||||||
if preference.Name != "default" {
|
if preference.Name != "default" {
|
||||||
passwordKey = preference.Name + "_" + passwordKey
|
passwordKey = preference.Name + "_" + passwordKey
|
||||||
keyFileKey = preference.Name + "_" + keyFileKey
|
keyFileKey = preference.Name + "_" + keyFileKey
|
||||||
}
|
}
|
||||||
|
|
||||||
authMethods = [] ssh.AuthMethod {}
|
authMethods = []ssh.AuthMethod{}
|
||||||
if keyringGet(passwordKey) != "" {
|
if keyringGet(passwordKey) != "" {
|
||||||
authMethods = append(authMethods, ssh.PasswordCallback(passwordCallback))
|
authMethods = append(authMethods, ssh.PasswordCallback(passwordCallback))
|
||||||
authMethods = append(authMethods, ssh.KeyboardInteractive(keyboardInteractive))
|
authMethods = append(authMethods, ssh.KeyboardInteractive(keyboardInteractive))
|
||||||
}
|
}
|
||||||
if keyringGet(keyFileKey) != "" || os.Getenv("SSH_AUTH_SOCK") != "" {
|
if keyringGet(keyFileKey) != "" || os.Getenv("SSH_AUTH_SOCK") != "" {
|
||||||
authMethods = append(authMethods, ssh.PublicKeysCallback(publicKeysCallback))
|
authMethods = append(authMethods, ssh.PublicKeysCallback(publicKeysCallback))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hostKeyChecker := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
hostKeyChecker := func(hostname string, remote net.Addr, key ssh.PublicKey) error {
|
||||||
return checkHostKey(hostname, remote, key)
|
return checkHostKey(hostname, remote, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, authMethods, hostKeyChecker, threads)
|
sftpStorage, err := CreateSFTPStorage(server, port, username, storageDir, authMethods, hostKeyChecker, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the SFTP storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the SFTP storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if keyFile != "" {
|
if keyFile != "" {
|
||||||
SavePassword(preference, "ssh_key_file", keyFile)
|
SavePassword(preference, "ssh_key_file", keyFile)
|
||||||
} else if password != "" {
|
} else if password != "" {
|
||||||
SavePassword(preference, "ssh_password", password)
|
SavePassword(preference, "ssh_password", password)
|
||||||
}
|
}
|
||||||
return sftpStorage
|
return sftpStorage
|
||||||
} else if matched[1] == "s3" || matched[1] == "s3c" || matched[1] == "minio" || matched[1] == "minios" {
|
} else if matched[1] == "s3" || matched[1] == "s3c" || matched[1] == "minio" || matched[1] == "minios" {
|
||||||
|
|
||||||
// urlRegex := regexp.MustCompile(`^(\w+)://([\w\-]+@)?([^/]+)(/(.+))?`)
|
// urlRegex := regexp.MustCompile(`^(\w+)://([\w\-]+@)?([^/]+)(/(.+))?`)
|
||||||
|
|
||||||
region := matched[2]
|
region := matched[2]
|
||||||
endpoint := matched[3]
|
endpoint := matched[3]
|
||||||
bucket := matched[5]
|
bucket := matched[5]
|
||||||
|
|
||||||
if region != "" {
|
if region != "" {
|
||||||
region = region[:len(region) - 1]
|
region = region[:len(region)-1]
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.EqualFold(endpoint, "amazon") || strings.EqualFold(endpoint, "amazon.com") {
|
if strings.EqualFold(endpoint, "amazon") || strings.EqualFold(endpoint, "amazon.com") {
|
||||||
endpoint = ""
|
endpoint = ""
|
||||||
}
|
}
|
||||||
|
|
||||||
storageDir := ""
|
storageDir := ""
|
||||||
if strings.Contains(bucket, "/") {
|
if strings.Contains(bucket, "/") {
|
||||||
firstSlash := strings.Index(bucket, "/")
|
firstSlash := strings.Index(bucket, "/")
|
||||||
storageDir = bucket[firstSlash + 1:]
|
storageDir = bucket[firstSlash+1:]
|
||||||
bucket = bucket[:firstSlash]
|
bucket = bucket[:firstSlash]
|
||||||
}
|
}
|
||||||
|
|
||||||
accessKey := GetPassword(preference, "s3_id", "Enter S3 Access Key ID:", true, resetPassword)
|
accessKey := GetPassword(preference, "s3_id", "Enter S3 Access Key ID:", true, resetPassword)
|
||||||
secretKey := GetPassword(preference, "s3_secret", "Enter S3 Secret Access Key:", true, resetPassword)
|
secretKey := GetPassword(preference, "s3_secret", "Enter S3 Secret Access Key:", true, resetPassword)
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if matched[1] == "s3c" {
|
if matched[1] == "s3c" {
|
||||||
storage, err = CreateS3CStorage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads)
|
storage, err = CreateS3CStorage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3C storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3C storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
isMinioCompatible := (matched[1] == "minio" || matched[1] == "minios")
|
isMinioCompatible := (matched[1] == "minio" || matched[1] == "minios")
|
||||||
isSSLSupported := (matched[1] == "s3" || matched[1] == "minios")
|
isSSLSupported := (matched[1] == "s3" || matched[1] == "minios")
|
||||||
storage, err = CreateS3Storage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads, isSSLSupported, isMinioCompatible)
|
storage, err = CreateS3Storage(region, endpoint, bucket, storageDir, accessKey, secretKey, threads, isSSLSupported, isMinioCompatible)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3 storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the S3 storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
SavePassword(preference, "s3_id", accessKey)
|
SavePassword(preference, "s3_id", accessKey)
|
||||||
SavePassword(preference, "s3_secret", secretKey)
|
SavePassword(preference, "s3_secret", secretKey)
|
||||||
|
|
||||||
return storage
|
return storage
|
||||||
} else if matched[1] == "dropbox" {
|
} else if matched[1] == "dropbox" {
|
||||||
storageDir := matched[3] + matched[5]
|
storageDir := matched[3] + matched[5]
|
||||||
token := GetPassword(preference, "dropbox_token", "Enter Dropbox access token:", true, resetPassword)
|
token := GetPassword(preference, "dropbox_token", "Enter Dropbox access token:", true, resetPassword)
|
||||||
dropboxStorage, err := CreateDropboxStorage(token, storageDir, threads)
|
dropboxStorage, err := CreateDropboxStorage(token, storageDir, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the dropbox storage: %v", err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the dropbox storage: %v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
SavePassword(preference, "dropbox_token", token)
|
SavePassword(preference, "dropbox_token", token)
|
||||||
return dropboxStorage
|
return dropboxStorage
|
||||||
} else if matched[1] == "b2" {
|
} else if matched[1] == "b2" {
|
||||||
bucket := matched[3]
|
bucket := matched[3]
|
||||||
|
|
||||||
accountID := GetPassword(preference, "b2_id", "Enter Backblaze Account ID:", true, resetPassword)
|
accountID := GetPassword(preference, "b2_id", "Enter Backblaze Account ID:", true, resetPassword)
|
||||||
applicationKey := GetPassword(preference, "b2_key", "Enter Backblaze Application Key:", true, resetPassword)
|
applicationKey := GetPassword(preference, "b2_key", "Enter Backblaze Application Key:", true, resetPassword)
|
||||||
|
|
||||||
b2Storage, err := CreateB2Storage(accountID, applicationKey, bucket, threads)
|
b2Storage, err := CreateB2Storage(accountID, applicationKey, bucket, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Backblaze B2 storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
SavePassword(preference, "b2_id", accountID)
|
SavePassword(preference, "b2_id", accountID)
|
||||||
SavePassword(preference, "b2_key", applicationKey)
|
SavePassword(preference, "b2_key", applicationKey)
|
||||||
return b2Storage
|
return b2Storage
|
||||||
} else if matched[1] == "azure" {
|
} else if matched[1] == "azure" {
|
||||||
account := matched[3]
|
account := matched[3]
|
||||||
container := matched[5]
|
container := matched[5]
|
||||||
|
|
||||||
if container == "" {
|
if container == "" {
|
||||||
LOG_ERROR("STORAGE_CREATE", "The container name for the Azure storage can't be empty")
|
LOG_ERROR("STORAGE_CREATE", "The container name for the Azure storage can't be empty")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
prompt := fmt.Sprintf("Enter the Access Key for the Azure storage account %s:", account)
|
prompt := fmt.Sprintf("Enter the Access Key for the Azure storage account %s:", account)
|
||||||
accessKey := GetPassword(preference, "azure_key", prompt, true, resetPassword)
|
accessKey := GetPassword(preference, "azure_key", prompt, true, resetPassword)
|
||||||
|
|
||||||
azureStorage, err := CreateAzureStorage(account, accessKey, container, threads)
|
azureStorage, err := CreateAzureStorage(account, accessKey, container, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Azure storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Azure storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
SavePassword(preference, "azure_key", accessKey)
|
SavePassword(preference, "azure_key", accessKey)
|
||||||
return azureStorage
|
return azureStorage
|
||||||
} else if matched[1] == "acd" {
|
} else if matched[1] == "acd" {
|
||||||
storagePath := matched[3] + matched[4]
|
storagePath := matched[3] + matched[4]
|
||||||
prompt := fmt.Sprintf("Enter the path of the Amazon Cloud Drive token file (downloadable from https://duplicacy.com/acd_start):")
|
prompt := fmt.Sprintf("Enter the path of the Amazon Cloud Drive token file (downloadable from https://duplicacy.com/acd_start):")
|
||||||
tokenFile := GetPassword(preference, "acd_token", prompt, true, resetPassword)
|
tokenFile := GetPassword(preference, "acd_token", prompt, true, resetPassword)
|
||||||
acdStorage, err := CreateACDStorage(tokenFile, storagePath, threads)
|
acdStorage, err := CreateACDStorage(tokenFile, storagePath, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Amazon Cloud Drive storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Amazon Cloud Drive storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
SavePassword(preference, "acd_token", tokenFile)
|
SavePassword(preference, "acd_token", tokenFile)
|
||||||
return acdStorage
|
return acdStorage
|
||||||
} else if matched[1] == "gcs" {
|
} else if matched[1] == "gcs" {
|
||||||
bucket := matched[3]
|
bucket := matched[3]
|
||||||
storageDir := matched[5]
|
storageDir := matched[5]
|
||||||
prompt := fmt.Sprintf("Enter the path of the Google Cloud Storage token file (downloadable from https://duplicacy.com/gcs_start) or the service account credential file:")
|
prompt := fmt.Sprintf("Enter the path of the Google Cloud Storage token file (downloadable from https://duplicacy.com/gcs_start) or the service account credential file:")
|
||||||
tokenFile := GetPassword(preference, "gcs_token", prompt, true, resetPassword)
|
tokenFile := GetPassword(preference, "gcs_token", prompt, true, resetPassword)
|
||||||
gcsStorage, err := CreateGCSStorage(tokenFile, bucket, storageDir, threads)
|
gcsStorage, err := CreateGCSStorage(tokenFile, bucket, storageDir, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Cloud Storage backend at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Cloud Storage backend at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
SavePassword(preference, "gcs_token", tokenFile)
|
SavePassword(preference, "gcs_token", tokenFile)
|
||||||
return gcsStorage
|
return gcsStorage
|
||||||
} else if matched[1] == "gcd" {
|
} else if matched[1] == "gcd" {
|
||||||
storagePath := matched[3] + matched[4]
|
storagePath := matched[3] + matched[4]
|
||||||
prompt := fmt.Sprintf("Enter the path of the Google Drive token file (downloadable from https://duplicacy.com/gcd_start):")
|
prompt := fmt.Sprintf("Enter the path of the Google Drive token file (downloadable from https://duplicacy.com/gcd_start):")
|
||||||
tokenFile := GetPassword(preference, "gcd_token", prompt, true, resetPassword)
|
tokenFile := GetPassword(preference, "gcd_token", prompt, true, resetPassword)
|
||||||
gcdStorage, err := CreateGCDStorage(tokenFile, storagePath, threads)
|
gcdStorage, err := CreateGCDStorage(tokenFile, storagePath, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Drive storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Google Drive storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
SavePassword(preference, "gcd_token", tokenFile)
|
SavePassword(preference, "gcd_token", tokenFile)
|
||||||
return gcdStorage
|
return gcdStorage
|
||||||
} else if matched[1] == "one" {
|
} else if matched[1] == "one" {
|
||||||
storagePath := matched[3] + matched[4]
|
storagePath := matched[3] + matched[4]
|
||||||
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
|
prompt := fmt.Sprintf("Enter the path of the OneDrive token file (downloadable from https://duplicacy.com/one_start):")
|
||||||
tokenFile := GetPassword(preference, "one_token", prompt, true, resetPassword)
|
tokenFile := GetPassword(preference, "one_token", prompt, true, resetPassword)
|
||||||
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, storagePath, threads)
|
oneDriveStorage, err := CreateOneDriveStorage(tokenFile, storagePath, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the OneDrive storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
SavePassword(preference, "one_token", tokenFile)
|
SavePassword(preference, "one_token", tokenFile)
|
||||||
return oneDriveStorage
|
return oneDriveStorage
|
||||||
} else if matched[1] == "hubic" {
|
} else if matched[1] == "hubic" {
|
||||||
storagePath := matched[3] + matched[4]
|
storagePath := matched[3] + matched[4]
|
||||||
prompt := fmt.Sprintf("Enter the path of the Hubic token file (downloadable from https://duplicacy.com/hubic_start):")
|
prompt := fmt.Sprintf("Enter the path of the Hubic token file (downloadable from https://duplicacy.com/hubic_start):")
|
||||||
tokenFile := GetPassword(preference, "hubic_token", prompt, true, resetPassword)
|
tokenFile := GetPassword(preference, "hubic_token", prompt, true, resetPassword)
|
||||||
hubicStorage, err := CreateHubicStorage(tokenFile, storagePath, threads)
|
hubicStorage, err := CreateHubicStorage(tokenFile, storagePath, threads)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_CREATE", "Failed to load the Hubic storage at %s: %v", storageURL, err)
|
LOG_ERROR("STORAGE_CREATE", "Failed to load the Hubic storage at %s: %v", storageURL, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
SavePassword(preference, "hubic_token", tokenFile)
|
SavePassword(preference, "hubic_token", tokenFile)
|
||||||
return hubicStorage
|
return hubicStorage
|
||||||
} else {
|
} else {
|
||||||
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
|
LOG_ERROR("STORAGE_CREATE", "The storage type '%s' is not supported", matched[1])
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,22 +5,22 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"crypto/sha256"
|
||||||
"fmt"
|
"encoding/hex"
|
||||||
"time"
|
"encoding/json"
|
||||||
"flag"
|
"flag"
|
||||||
"path"
|
"fmt"
|
||||||
"testing"
|
"io/ioutil"
|
||||||
"strings"
|
"os"
|
||||||
"strconv"
|
"path"
|
||||||
"io/ioutil"
|
"runtime/debug"
|
||||||
"crypto/sha256"
|
"strconv"
|
||||||
"encoding/hex"
|
"strings"
|
||||||
"encoding/json"
|
"testing"
|
||||||
"runtime/debug"
|
"time"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
)
|
)
|
||||||
|
|
||||||
var testStorageName string
|
var testStorageName string
|
||||||
@@ -30,492 +30,491 @@ var testThreads int
|
|||||||
var testFixedChunkSize bool
|
var testFixedChunkSize bool
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
|
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
|
||||||
flag.IntVar(&testRateLimit, "limit-rate", 0, "maximum transfer speed in kbytes/sec")
|
flag.IntVar(&testRateLimit, "limit-rate", 0, "maximum transfer speed in kbytes/sec")
|
||||||
flag.BoolVar(&testQuickMode, "quick", false, "quick test")
|
flag.BoolVar(&testQuickMode, "quick", false, "quick test")
|
||||||
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
|
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
|
||||||
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
|
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
func loadStorage(localStoragePath string, threads int) (Storage, error) {
|
||||||
|
|
||||||
if testStorageName == "" || testStorageName == "file" {
|
if testStorageName == "" || testStorageName == "file" {
|
||||||
return CreateFileStorage(localStoragePath, 2, false, threads)
|
return CreateFileStorage(localStoragePath, 2, false, threads)
|
||||||
}
|
}
|
||||||
|
|
||||||
config, err := ioutil.ReadFile("test_storage.conf")
|
config, err := ioutil.ReadFile("test_storage.conf")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
storages := make(map[string]map[string]string)
|
storages := make(map[string]map[string]string)
|
||||||
|
|
||||||
err = json.Unmarshal(config, &storages)
|
err = json.Unmarshal(config, &storages)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
storage, found := storages[testStorageName]
|
storage, found := storages[testStorageName]
|
||||||
if !found {
|
if !found {
|
||||||
return nil, fmt.Errorf("No storage named '%s' found", testStorageName)
|
return nil, fmt.Errorf("No storage named '%s' found", testStorageName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if testStorageName == "flat" {
|
if testStorageName == "flat" {
|
||||||
return CreateFileStorage(localStoragePath, 0, false, threads)
|
return CreateFileStorage(localStoragePath, 0, false, threads)
|
||||||
} else if testStorageName == "samba" {
|
} else if testStorageName == "samba" {
|
||||||
return CreateFileStorage(localStoragePath, 2, true, threads)
|
return CreateFileStorage(localStoragePath, 2, true, threads)
|
||||||
} else if testStorageName == "sftp" {
|
} else if testStorageName == "sftp" {
|
||||||
port, _ := strconv.Atoi(storage["port"])
|
port, _ := strconv.Atoi(storage["port"])
|
||||||
return CreateSFTPStorageWithPassword(storage["server"], port, storage["username"], storage["directory"], storage["password"], threads)
|
return CreateSFTPStorageWithPassword(storage["server"], port, storage["username"], storage["directory"], storage["password"], threads)
|
||||||
} else if testStorageName == "s3" || testStorageName == "wasabi" {
|
} else if testStorageName == "s3" || testStorageName == "wasabi" {
|
||||||
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, false)
|
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, false)
|
||||||
} else if testStorageName == "s3c" {
|
} else if testStorageName == "s3c" {
|
||||||
return CreateS3CStorage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads)
|
return CreateS3CStorage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads)
|
||||||
} else if testStorageName == "minio" {
|
} else if testStorageName == "minio" {
|
||||||
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, false, true)
|
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, false, true)
|
||||||
} else if testStorageName == "minios" {
|
} else if testStorageName == "minios" {
|
||||||
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, true)
|
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, true)
|
||||||
} else if testStorageName == "dropbox" {
|
} else if testStorageName == "dropbox" {
|
||||||
return CreateDropboxStorage(storage["token"], storage["directory"], threads)
|
return CreateDropboxStorage(storage["token"], storage["directory"], threads)
|
||||||
} else if testStorageName == "b2" {
|
} else if testStorageName == "b2" {
|
||||||
return CreateB2Storage(storage["account"], storage["key"], storage["bucket"], threads)
|
return CreateB2Storage(storage["account"], storage["key"], storage["bucket"], threads)
|
||||||
} else if testStorageName == "gcs-s3" {
|
} else if testStorageName == "gcs-s3" {
|
||||||
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, false)
|
return CreateS3Storage(storage["region"], storage["endpoint"], storage["bucket"], storage["directory"], storage["access_key"], storage["secret_key"], threads, true, false)
|
||||||
} else if testStorageName == "gcs" {
|
} else if testStorageName == "gcs" {
|
||||||
return CreateGCSStorage(storage["token_file"], storage["bucket"], storage["directory"], threads)
|
return CreateGCSStorage(storage["token_file"], storage["bucket"], storage["directory"], threads)
|
||||||
} else if testStorageName == "gcs-sa" {
|
} else if testStorageName == "gcs-sa" {
|
||||||
return CreateGCSStorage(storage["token_file"], storage["bucket"], storage["directory"], threads)
|
return CreateGCSStorage(storage["token_file"], storage["bucket"], storage["directory"], threads)
|
||||||
} else if testStorageName == "azure" {
|
} else if testStorageName == "azure" {
|
||||||
return CreateAzureStorage(storage["account"], storage["key"], storage["container"], threads)
|
return CreateAzureStorage(storage["account"], storage["key"], storage["container"], threads)
|
||||||
} else if testStorageName == "acd" {
|
} else if testStorageName == "acd" {
|
||||||
return CreateACDStorage(storage["token_file"], storage["storage_path"], threads)
|
return CreateACDStorage(storage["token_file"], storage["storage_path"], threads)
|
||||||
} else if testStorageName == "gcd" {
|
} else if testStorageName == "gcd" {
|
||||||
return CreateGCDStorage(storage["token_file"], storage["storage_path"], threads)
|
return CreateGCDStorage(storage["token_file"], storage["storage_path"], threads)
|
||||||
} else if testStorageName == "one" {
|
} else if testStorageName == "one" {
|
||||||
return CreateOneDriveStorage(storage["token_file"], storage["storage_path"], threads)
|
return CreateOneDriveStorage(storage["token_file"], storage["storage_path"], threads)
|
||||||
} else if testStorageName == "hubic" {
|
} else if testStorageName == "hubic" {
|
||||||
return CreateHubicStorage(storage["token_file"], storage["storage_path"], threads)
|
return CreateHubicStorage(storage["token_file"], storage["storage_path"], threads)
|
||||||
} else {
|
} else {
|
||||||
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
|
return nil, fmt.Errorf("Invalid storage named: %s", testStorageName)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func cleanStorage(storage Storage) {
|
func cleanStorage(storage Storage) {
|
||||||
|
|
||||||
directories := make([]string, 0, 1024)
|
directories := make([]string, 0, 1024)
|
||||||
snapshots := make([]string, 0, 1024)
|
snapshots := make([]string, 0, 1024)
|
||||||
|
|
||||||
directories = append(directories, "snapshots/")
|
directories = append(directories, "snapshots/")
|
||||||
|
|
||||||
LOG_INFO("STORAGE_LIST", "Listing snapshots in the storage")
|
LOG_INFO("STORAGE_LIST", "Listing snapshots in the storage")
|
||||||
for len(directories) > 0 {
|
for len(directories) > 0 {
|
||||||
|
|
||||||
dir := directories[len(directories) - 1]
|
dir := directories[len(directories)-1]
|
||||||
directories = directories[:len(directories) - 1]
|
directories = directories[:len(directories)-1]
|
||||||
|
|
||||||
files, _, err := storage.ListFiles(0, dir)
|
files, _, err := storage.ListFiles(0, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("STORAGE_LIST", "Failed to list the directory %s: %v", dir, err)
|
LOG_ERROR("STORAGE_LIST", "Failed to list the directory %s: %v", dir, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if len(file) > 0 && file[len(file) - 1] == '/' {
|
if len(file) > 0 && file[len(file)-1] == '/' {
|
||||||
directories = append(directories, dir + file)
|
directories = append(directories, dir+file)
|
||||||
} else {
|
} else {
|
||||||
snapshots = append(snapshots, dir + file)
|
snapshots = append(snapshots, dir+file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_INFO("STORAGE_DELETE", "Deleting %d snapshots in the storage", len(snapshots))
|
LOG_INFO("STORAGE_DELETE", "Deleting %d snapshots in the storage", len(snapshots))
|
||||||
for _, snapshot := range snapshots {
|
for _, snapshot := range snapshots {
|
||||||
storage.DeleteFile(0, snapshot)
|
storage.DeleteFile(0, snapshot)
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, chunk := range listChunks(storage) {
|
for _, chunk := range listChunks(storage) {
|
||||||
storage.DeleteFile(0, "chunks/" + chunk)
|
storage.DeleteFile(0, "chunks/"+chunk)
|
||||||
}
|
}
|
||||||
|
|
||||||
storage.DeleteFile(0, "config")
|
storage.DeleteFile(0, "config")
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func listChunks(storage Storage) (chunks []string) {
|
func listChunks(storage Storage) (chunks []string) {
|
||||||
|
|
||||||
directories := make([]string, 0, 1024)
|
directories := make([]string, 0, 1024)
|
||||||
|
|
||||||
directories = append(directories, "chunks/")
|
directories = append(directories, "chunks/")
|
||||||
|
|
||||||
for len(directories) > 0 {
|
for len(directories) > 0 {
|
||||||
|
|
||||||
dir := directories[len(directories) - 1]
|
dir := directories[len(directories)-1]
|
||||||
directories = directories[:len(directories) - 1]
|
directories = directories[:len(directories)-1]
|
||||||
|
|
||||||
files, _, err := storage.ListFiles(0, dir)
|
files, _, err := storage.ListFiles(0, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("CHUNK_LIST", "Failed to list the directory %s: %v", dir, err)
|
LOG_ERROR("CHUNK_LIST", "Failed to list the directory %s: %v", dir, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if len(file) > 0 && file[len(file) - 1] == '/' {
|
if len(file) > 0 && file[len(file)-1] == '/' {
|
||||||
directories = append(directories, dir + file)
|
directories = append(directories, dir+file)
|
||||||
} else {
|
} else {
|
||||||
chunk := dir + file
|
chunk := dir + file
|
||||||
chunk = chunk[len("chunks/"):]
|
chunk = chunk[len("chunks/"):]
|
||||||
chunks = append(chunks, chunk)
|
chunks = append(chunks, chunk)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func moveChunk(t *testing.T, storage Storage, chunkID string, isFossil bool, delay int) {
|
func moveChunk(t *testing.T, storage Storage, chunkID string, isFossil bool, delay int) {
|
||||||
|
|
||||||
filePath, exist, _, err := storage.FindChunk(0, chunkID, isFossil)
|
filePath, exist, _, err := storage.FindChunk(0, chunkID, isFossil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error find chunk %s: %v", chunkID, err)
|
t.Errorf("Error find chunk %s: %v", chunkID, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
to := filePath + ".fsl"
|
to := filePath + ".fsl"
|
||||||
if isFossil {
|
if isFossil {
|
||||||
to = filePath[:len(filePath) - len(".fsl")]
|
to = filePath[:len(filePath)-len(".fsl")]
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storage.MoveFile(0, filePath, to)
|
err = storage.MoveFile(0, filePath, to)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error renaming file %s to %s: %v", filePath, to, err)
|
t.Errorf("Error renaming file %s to %s: %v", filePath, to, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
_, exist, _, err = storage.FindChunk(0, chunkID, isFossil)
|
_, exist, _, err = storage.FindChunk(0, chunkID, isFossil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error get file info for chunk %s: %v", chunkID, err)
|
t.Errorf("Error get file info for chunk %s: %v", chunkID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if exist {
|
if exist {
|
||||||
t.Errorf("File %s still exists after renaming", filePath)
|
t.Errorf("File %s still exists after renaming", filePath)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, exist, _, err = storage.FindChunk(0, chunkID, !isFossil)
|
_, exist, _, err = storage.FindChunk(0, chunkID, !isFossil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error get file info for %s: %v", to, err)
|
t.Errorf("Error get file info for %s: %v", to, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !exist {
|
if !exist {
|
||||||
t.Errorf("File %s doesn't exist", to)
|
t.Errorf("File %s doesn't exist", to)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStorage(t *testing.T) {
|
func TestStorage(t *testing.T) {
|
||||||
|
|
||||||
rand.Seed(time.Now().UnixNano())
|
rand.Seed(time.Now().UnixNano())
|
||||||
setTestingT(t)
|
setTestingT(t)
|
||||||
SetLoggingLevel(INFO)
|
SetLoggingLevel(INFO)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
switch e := r.(type) {
|
switch e := r.(type) {
|
||||||
case Exception:
|
case Exception:
|
||||||
t.Errorf("%s %s", e.LogID, e.Message)
|
t.Errorf("%s %s", e.LogID, e.Message)
|
||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
default:
|
default:
|
||||||
t.Errorf("%v", e)
|
t.Errorf("%v", e)
|
||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
||||||
os.RemoveAll(testDir)
|
os.RemoveAll(testDir)
|
||||||
os.MkdirAll(testDir, 0700)
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
|
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
|
||||||
|
|
||||||
storage, err := loadStorage(testDir, 1)
|
storage, err := loadStorage(testDir, 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create storage: %v", err)
|
t.Errorf("Failed to create storage: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
storage.EnableTestMode()
|
storage.EnableTestMode()
|
||||||
storage.SetRateLimits(testRateLimit, testRateLimit)
|
storage.SetRateLimits(testRateLimit, testRateLimit)
|
||||||
|
|
||||||
delay := 0
|
delay := 0
|
||||||
if _, ok := storage.(*ACDStorage); ok {
|
if _, ok := storage.(*ACDStorage); ok {
|
||||||
delay = 5
|
delay = 5
|
||||||
}
|
}
|
||||||
if _, ok := storage.(*HubicStorage); ok {
|
if _, ok := storage.(*HubicStorage); ok {
|
||||||
delay = 2
|
delay = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, dir := range []string { "chunks", "snapshots" } {
|
for _, dir := range []string{"chunks", "snapshots"} {
|
||||||
err = storage.CreateDirectory(0, dir)
|
err = storage.CreateDirectory(0, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create directory %s: %v", dir, err)
|
t.Errorf("Failed to create directory %s: %v", dir, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
storage.CreateDirectory(0, "snapshots/repository1")
|
storage.CreateDirectory(0, "snapshots/repository1")
|
||||||
storage.CreateDirectory(0, "snapshots/repository2")
|
storage.CreateDirectory(0, "snapshots/repository2")
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
{
|
{
|
||||||
|
|
||||||
// Upload fake snapshot files so that for storages having no concept of directories,
|
// Upload fake snapshot files so that for storages having no concept of directories,
|
||||||
// ListFiles("snapshots") still returns correct snapshot IDs.
|
// ListFiles("snapshots") still returns correct snapshot IDs.
|
||||||
|
|
||||||
// Create a random file not a text file to make ACD Storage happy.
|
// Create a random file not a text file to make ACD Storage happy.
|
||||||
content := make([]byte, 100)
|
content := make([]byte, 100)
|
||||||
_, err = crypto_rand.Read(content)
|
_, err = crypto_rand.Read(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error generating random content: %v", err)
|
t.Errorf("Error generating random content: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storage.UploadFile(0, "snapshots/repository1/1", content)
|
err = storage.UploadFile(0, "snapshots/repository1/1", content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error to upload snapshots/repository1/1: %v", err)
|
t.Errorf("Error to upload snapshots/repository1/1: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storage.UploadFile(0, "snapshots/repository2/1", content)
|
err = storage.UploadFile(0, "snapshots/repository2/1", content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error to upload snapshots/repository2/1: %v", err)
|
t.Errorf("Error to upload snapshots/repository2/1: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
snapshotDirs, _, err := storage.ListFiles(0, "snapshots/")
|
snapshotDirs, _, err := storage.ListFiles(0, "snapshots/")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list snapshot ids: %v", err)
|
t.Errorf("Failed to list snapshot ids: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshotIDs := []string {}
|
snapshotIDs := []string{}
|
||||||
for _, snapshotDir := range snapshotDirs {
|
for _, snapshotDir := range snapshotDirs {
|
||||||
if len(snapshotDir) > 0 && snapshotDir[len(snapshotDir) - 1] == '/' {
|
if len(snapshotDir) > 0 && snapshotDir[len(snapshotDir)-1] == '/' {
|
||||||
snapshotIDs = append(snapshotIDs, snapshotDir[:len(snapshotDir) - 1])
|
snapshotIDs = append(snapshotIDs, snapshotDir[:len(snapshotDir)-1])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(snapshotIDs) < 2 {
|
if len(snapshotIDs) < 2 {
|
||||||
t.Errorf("Snapshot directories not created")
|
t.Errorf("Snapshot directories not created")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, snapshotID := range snapshotIDs {
|
for _, snapshotID := range snapshotIDs {
|
||||||
snapshots, _, err := storage.ListFiles(0, "snapshots/" + snapshotID)
|
snapshots, _, err := storage.ListFiles(0, "snapshots/"+snapshotID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list snapshots for %s: %v", snapshotID, err)
|
t.Errorf("Failed to list snapshots for %s: %v", snapshotID, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, snapshot := range snapshots {
|
for _, snapshot := range snapshots {
|
||||||
storage.DeleteFile(0, "snapshots/" + snapshotID + "/" + snapshot)
|
storage.DeleteFile(0, "snapshots/"+snapshotID+"/"+snapshot)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
storage.DeleteFile(0, "config")
|
storage.DeleteFile(0, "config")
|
||||||
|
|
||||||
for _, file := range []string { "snapshots/repository1/1", "snapshots/repository2/1"} {
|
for _, file := range []string{"snapshots/repository1/1", "snapshots/repository2/1"} {
|
||||||
exist, _, _, err := storage.GetFileInfo(0, file)
|
exist, _, _, err := storage.GetFileInfo(0, file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to get file info for %s: %v", file, err)
|
t.Errorf("Failed to get file info for %s: %v", file, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if exist {
|
if exist {
|
||||||
t.Errorf("File %s still exists after deletion", file)
|
t.Errorf("File %s still exists after deletion", file)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
numberOfFiles := 20
|
numberOfFiles := 20
|
||||||
maxFileSize := 64 * 1024
|
maxFileSize := 64 * 1024
|
||||||
|
|
||||||
if testQuickMode {
|
if testQuickMode {
|
||||||
numberOfFiles = 2
|
numberOfFiles = 2
|
||||||
}
|
}
|
||||||
|
|
||||||
chunks := []string{}
|
chunks := []string{}
|
||||||
|
|
||||||
for i := 0; i < numberOfFiles; i++ {
|
for i := 0; i < numberOfFiles; i++ {
|
||||||
content := make([]byte, rand.Int() % maxFileSize + 1)
|
content := make([]byte, rand.Int()%maxFileSize+1)
|
||||||
_, err = crypto_rand.Read(content)
|
_, err = crypto_rand.Read(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error generating random content: %v", err)
|
t.Errorf("Error generating random content: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hasher := sha256.New()
|
hasher := sha256.New()
|
||||||
hasher.Write(content)
|
hasher.Write(content)
|
||||||
chunkID := hex.EncodeToString(hasher.Sum(nil))
|
chunkID := hex.EncodeToString(hasher.Sum(nil))
|
||||||
chunks = append(chunks, chunkID)
|
chunks = append(chunks, chunkID)
|
||||||
|
|
||||||
filePath, exist, _, err := storage.FindChunk(0, chunkID, false)
|
filePath, exist, _, err := storage.FindChunk(0, chunkID, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to list the chunk %s: %v", chunkID, err)
|
t.Errorf("Failed to list the chunk %s: %v", chunkID, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if exist {
|
if exist {
|
||||||
t.Errorf("Chunk %s already exists", chunkID)
|
t.Errorf("Chunk %s already exists", chunkID)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = storage.UploadFile(0, filePath, content)
|
err = storage.UploadFile(0, filePath, content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to upload the file %s: %v", filePath, err)
|
t.Errorf("Failed to upload the file %s: %v", filePath, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
LOG_INFO("STORAGE_CHUNK", "Uploaded chunk: %s, size: %d", chunkID, len(content))
|
LOG_INFO("STORAGE_CHUNK", "Uploaded chunk: %s, size: %d", chunkID, len(content))
|
||||||
}
|
}
|
||||||
|
|
||||||
allChunks := [] string {}
|
allChunks := []string{}
|
||||||
for _, file := range listChunks(storage) {
|
for _, file := range listChunks(storage) {
|
||||||
file = strings.Replace(file, "/", "", -1)
|
file = strings.Replace(file, "/", "", -1)
|
||||||
if len(file) == 64 {
|
if len(file) == 64 {
|
||||||
allChunks = append(allChunks, file)
|
allChunks = append(allChunks, file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_INFO("STORAGE_FOSSIL", "Making %s a fossil", chunks[0])
|
LOG_INFO("STORAGE_FOSSIL", "Making %s a fossil", chunks[0])
|
||||||
moveChunk(t, storage, chunks[0], false, delay)
|
moveChunk(t, storage, chunks[0], false, delay)
|
||||||
LOG_INFO("STORAGE_FOSSIL", "Making %s a chunk", chunks[0])
|
LOG_INFO("STORAGE_FOSSIL", "Making %s a chunk", chunks[0])
|
||||||
moveChunk(t, storage, chunks[0], true, delay)
|
moveChunk(t, storage, chunks[0], true, delay)
|
||||||
|
|
||||||
config := CreateConfig()
|
config := CreateConfig()
|
||||||
config.MinimumChunkSize = 100
|
config.MinimumChunkSize = 100
|
||||||
config.chunkPool = make(chan *Chunk, numberOfFiles * 2)
|
config.chunkPool = make(chan *Chunk, numberOfFiles*2)
|
||||||
|
|
||||||
chunk := CreateChunk(config, true)
|
chunk := CreateChunk(config, true)
|
||||||
|
|
||||||
|
for _, chunkID := range chunks {
|
||||||
|
|
||||||
for _, chunkID := range chunks {
|
chunk.Reset(false)
|
||||||
|
filePath, exist, _, err := storage.FindChunk(0, chunkID, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error getting file info for chunk %s: %v", chunkID, err)
|
||||||
|
continue
|
||||||
|
} else if !exist {
|
||||||
|
t.Errorf("Chunk %s does not exist", chunkID)
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
err = storage.DownloadFile(0, filePath, chunk)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error downloading file %s: %v", filePath, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
LOG_INFO("STORAGE_CHUNK", "Downloaded chunk: %s, size: %d", chunkID, chunk.GetLength())
|
||||||
|
}
|
||||||
|
|
||||||
chunk.Reset(false)
|
hasher := sha256.New()
|
||||||
filePath, exist, _, err := storage.FindChunk(0, chunkID, false)
|
hasher.Write(chunk.GetBytes())
|
||||||
if err != nil {
|
hash := hex.EncodeToString(hasher.Sum(nil))
|
||||||
t.Errorf("Error getting file info for chunk %s: %v", chunkID, err)
|
|
||||||
continue
|
|
||||||
} else if !exist {
|
|
||||||
t.Errorf("Chunk %s does not exist", chunkID)
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
err = storage.DownloadFile(0, filePath, chunk)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error downloading file %s: %v", filePath, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
LOG_INFO("STORAGE_CHUNK", "Downloaded chunk: %s, size: %d", chunkID, chunk.GetLength())
|
|
||||||
}
|
|
||||||
|
|
||||||
hasher := sha256.New()
|
if hash != chunkID {
|
||||||
hasher.Write(chunk.GetBytes())
|
t.Errorf("File %s, hash %s, size %d", chunkID, hash, chunk.GetBytes())
|
||||||
hash := hex.EncodeToString(hasher.Sum(nil))
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if hash != chunkID {
|
LOG_INFO("STORAGE_FOSSIL", "Making %s a fossil", chunks[1])
|
||||||
t.Errorf("File %s, hash %s, size %d", chunkID, hash, chunk.GetBytes())
|
moveChunk(t, storage, chunks[1], false, delay)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
LOG_INFO("STORAGE_FOSSIL", "Making %s a fossil", chunks[1])
|
filePath, exist, _, err := storage.FindChunk(0, chunks[1], true)
|
||||||
moveChunk(t, storage, chunks[1], false, delay)
|
if err != nil {
|
||||||
|
t.Errorf("Error getting file info for fossil %s: %v", chunks[1], err)
|
||||||
|
} else if !exist {
|
||||||
|
t.Errorf("Fossil %s does not exist", chunks[1])
|
||||||
|
} else {
|
||||||
|
err = storage.DeleteFile(0, filePath)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to delete file %s: %v", filePath)
|
||||||
|
} else {
|
||||||
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
filePath, exist, _, err = storage.FindChunk(0, chunks[1], true)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error get file info for deleted fossil %s: %v", chunks[1], err)
|
||||||
|
} else if exist {
|
||||||
|
t.Errorf("Fossil %s still exists after deletion", chunks[1])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
filePath, exist, _, err := storage.FindChunk(0, chunks[1], true)
|
for _, file := range allChunks {
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error getting file info for fossil %s: %v", chunks[1], err)
|
|
||||||
} else if !exist {
|
|
||||||
t.Errorf("Fossil %s does not exist", chunks[1])
|
|
||||||
} else {
|
|
||||||
err = storage.DeleteFile(0, filePath)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Failed to delete file %s: %v", filePath)
|
|
||||||
} else {
|
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
|
||||||
filePath, exist, _, err = storage.FindChunk(0, chunks[1], true)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error get file info for deleted fossil %s: %v", chunks[1], err)
|
|
||||||
} else if exist {
|
|
||||||
t.Errorf("Fossil %s still exists after deletion", chunks[1])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, file := range allChunks {
|
err = storage.DeleteFile(0, "chunks/"+file)
|
||||||
|
if err != nil {
|
||||||
err = storage.DeleteFile(0, "chunks/" + file)
|
t.Errorf("Failed to delete the file %s: %v", file, err)
|
||||||
if err != nil {
|
return
|
||||||
t.Errorf("Failed to delete the file %s: %v", file, err)
|
}
|
||||||
return
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestCleanStorage(t *testing.T) {
|
func TestCleanStorage(t *testing.T) {
|
||||||
setTestingT(t)
|
setTestingT(t)
|
||||||
SetLoggingLevel(INFO)
|
SetLoggingLevel(INFO)
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
switch e := r.(type) {
|
switch e := r.(type) {
|
||||||
case Exception:
|
case Exception:
|
||||||
t.Errorf("%s %s", e.LogID, e.Message)
|
t.Errorf("%s %s", e.LogID, e.Message)
|
||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
default:
|
default:
|
||||||
t.Errorf("%v", e)
|
t.Errorf("%v", e)
|
||||||
debug.PrintStack()
|
debug.PrintStack()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} ()
|
}()
|
||||||
|
|
||||||
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
testDir := path.Join(os.TempDir(), "duplicacy_test", "storage_test")
|
||||||
os.RemoveAll(testDir)
|
os.RemoveAll(testDir)
|
||||||
os.MkdirAll(testDir, 0700)
|
os.MkdirAll(testDir, 0700)
|
||||||
|
|
||||||
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
|
LOG_INFO("STORAGE_TEST", "storage: %s", testStorageName)
|
||||||
|
|
||||||
storage, err := loadStorage(testDir, 1)
|
storage, err := loadStorage(testDir, 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Failed to create storage: %v", err)
|
t.Errorf("Failed to create storage: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
directories := make([]string, 0, 1024)
|
directories := make([]string, 0, 1024)
|
||||||
directories = append(directories, "snapshots/")
|
directories = append(directories, "snapshots/")
|
||||||
directories = append(directories, "chunks/")
|
directories = append(directories, "chunks/")
|
||||||
|
|
||||||
for len(directories) > 0 {
|
for len(directories) > 0 {
|
||||||
|
|
||||||
dir := directories[len(directories) - 1]
|
dir := directories[len(directories)-1]
|
||||||
directories = directories[:len(directories) - 1]
|
directories = directories[:len(directories)-1]
|
||||||
|
|
||||||
LOG_INFO("LIST_FILES", "Listing %s", dir)
|
LOG_INFO("LIST_FILES", "Listing %s", dir)
|
||||||
|
|
||||||
files, _, err := storage.ListFiles(0, dir)
|
files, _, err := storage.ListFiles(0, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("LIST_FILES", "Failed to list the directory %s: %v", dir, err)
|
LOG_ERROR("LIST_FILES", "Failed to list the directory %s: %v", dir, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if len(file) > 0 && file[len(file) - 1] == '/' {
|
if len(file) > 0 && file[len(file)-1] == '/' {
|
||||||
directories = append(directories, dir + file)
|
directories = append(directories, dir+file)
|
||||||
} else {
|
} else {
|
||||||
storage.DeleteFile(0, dir + file)
|
storage.DeleteFile(0, dir+file)
|
||||||
LOG_INFO("DELETE_FILE", "Deleted file %s", file)
|
LOG_INFO("DELETE_FILE", "Deleted file %s", file)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
storage.DeleteFile(0, "config")
|
storage.DeleteFile(0, "config")
|
||||||
LOG_INFO("DELETE_FILE", "Deleted config")
|
LOG_INFO("DELETE_FILE", "Deleted config")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,262 +5,262 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"bufio"
|
||||||
"os"
|
"crypto/sha256"
|
||||||
"bufio"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"runtime"
|
"strings"
|
||||||
"crypto/sha256"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/crypto/pbkdf2"
|
"github.com/gilbertchen/gopass"
|
||||||
"github.com/gilbertchen/gopass"
|
"golang.org/x/crypto/pbkdf2"
|
||||||
)
|
)
|
||||||
|
|
||||||
var RunInBackground bool = false
|
var RunInBackground bool = false
|
||||||
|
|
||||||
type RateLimitedReader struct {
|
type RateLimitedReader struct {
|
||||||
Content []byte
|
Content []byte
|
||||||
Rate float64
|
Rate float64
|
||||||
Next int
|
Next int
|
||||||
StartTime time.Time
|
StartTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
var RegexMap map[string]*regexp.Regexp
|
var RegexMap map[string]*regexp.Regexp
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
|
||||||
if RegexMap == nil {
|
if RegexMap == nil {
|
||||||
RegexMap = make(map[string]*regexp.Regexp)
|
RegexMap = make(map[string]*regexp.Regexp)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateRateLimitedReader(content []byte, rate int) (*RateLimitedReader) {
|
func CreateRateLimitedReader(content []byte, rate int) *RateLimitedReader {
|
||||||
return &RateLimitedReader {
|
return &RateLimitedReader{
|
||||||
Content: content,
|
Content: content,
|
||||||
Rate: float64(rate * 1024),
|
Rate: float64(rate * 1024),
|
||||||
Next: 0,
|
Next: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsEmptyFilter(pattern string) bool {
|
func IsEmptyFilter(pattern string) bool {
|
||||||
if pattern == "+" || pattern == "-" || pattern == "i:" || pattern == "e:" {
|
if pattern == "+" || pattern == "-" || pattern == "i:" || pattern == "e:" {
|
||||||
return true
|
return true
|
||||||
} else {
|
} else {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsUnspecifiedFilter(pattern string) bool {
|
func IsUnspecifiedFilter(pattern string) bool {
|
||||||
if pattern[0] != '+' && pattern[0] != '-' && pattern[0] != 'i' && pattern[0] != 'e' {
|
if pattern[0] != '+' && pattern[0] != '-' && pattern[0] != 'i' && pattern[0] != 'e' {
|
||||||
return true
|
return true
|
||||||
} else {
|
} else {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsValidRegex(pattern string) (valid bool, err error) {
|
func IsValidRegex(pattern string) (valid bool, err error) {
|
||||||
|
|
||||||
var re *regexp.Regexp = nil
|
var re *regexp.Regexp = nil
|
||||||
|
|
||||||
if re, valid = RegexMap[pattern]; valid && re != nil {
|
if re, valid = RegexMap[pattern]; valid && re != nil {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
re, err = regexp.Compile(pattern)
|
re, err = regexp.Compile(pattern)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
} else {
|
} else {
|
||||||
RegexMap[pattern] = re
|
RegexMap[pattern] = re
|
||||||
LOG_DEBUG("REGEX_STORED", "Saved compiled regex for pattern \"%s\", regex=%#v", pattern, re)
|
LOG_DEBUG("REGEX_STORED", "Saved compiled regex for pattern \"%s\", regex=%#v", pattern, re)
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reader *RateLimitedReader) Length() (int64) {
|
func (reader *RateLimitedReader) Length() int64 {
|
||||||
return int64(len(reader.Content))
|
return int64(len(reader.Content))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reader *RateLimitedReader) Reset() {
|
func (reader *RateLimitedReader) Reset() {
|
||||||
reader.Next = 0
|
reader.Next = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reader *RateLimitedReader) Seek(offset int64, whence int) (int64, error) {
|
func (reader *RateLimitedReader) Seek(offset int64, whence int) (int64, error) {
|
||||||
if whence == io.SeekStart {
|
if whence == io.SeekStart {
|
||||||
reader.Next = int(offset)
|
reader.Next = int(offset)
|
||||||
} else if whence == io.SeekCurrent {
|
} else if whence == io.SeekCurrent {
|
||||||
reader.Next += int(offset)
|
reader.Next += int(offset)
|
||||||
} else {
|
} else {
|
||||||
reader.Next = len(reader.Content) - int(offset)
|
reader.Next = len(reader.Content) - int(offset)
|
||||||
}
|
}
|
||||||
return int64(reader.Next), nil
|
return int64(reader.Next), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (reader *RateLimitedReader) Read(p []byte) (n int, err error) {
|
func (reader *RateLimitedReader) Read(p []byte) (n int, err error) {
|
||||||
|
|
||||||
if reader.Next >= len(reader.Content) {
|
if reader.Next >= len(reader.Content) {
|
||||||
return 0, io.EOF
|
return 0, io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
if reader.Rate <= 0 {
|
if reader.Rate <= 0 {
|
||||||
n := copy(p, reader.Content[reader.Next:])
|
n := copy(p, reader.Content[reader.Next:])
|
||||||
reader.Next += n
|
reader.Next += n
|
||||||
if reader.Next >= len(reader.Content) {
|
if reader.Next >= len(reader.Content) {
|
||||||
return n, io.EOF
|
return n, io.EOF
|
||||||
}
|
}
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if reader.StartTime.IsZero() {
|
if reader.StartTime.IsZero() {
|
||||||
reader.StartTime = time.Now()
|
reader.StartTime = time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
elapsed := time.Since(reader.StartTime).Seconds()
|
elapsed := time.Since(reader.StartTime).Seconds()
|
||||||
delay := float64(reader.Next) / reader.Rate - elapsed
|
delay := float64(reader.Next)/reader.Rate - elapsed
|
||||||
end := reader.Next + int(reader.Rate / 5)
|
end := reader.Next + int(reader.Rate/5)
|
||||||
if delay > 0 {
|
if delay > 0 {
|
||||||
time.Sleep(time.Duration(delay * float64(time.Second)))
|
time.Sleep(time.Duration(delay * float64(time.Second)))
|
||||||
} else {
|
} else {
|
||||||
end += - int(delay * reader.Rate)
|
end += -int(delay * reader.Rate)
|
||||||
}
|
}
|
||||||
|
|
||||||
if end > len(reader.Content) {
|
if end > len(reader.Content) {
|
||||||
end = len(reader.Content)
|
end = len(reader.Content)
|
||||||
}
|
}
|
||||||
|
|
||||||
n = copy(p, reader.Content[reader.Next : end])
|
n = copy(p, reader.Content[reader.Next:end])
|
||||||
reader.Next += n
|
reader.Next += n
|
||||||
return n, nil
|
return n, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func RateLimitedCopy(writer io.Writer, reader io.Reader, rate int) (written int64, err error) {
|
func RateLimitedCopy(writer io.Writer, reader io.Reader, rate int) (written int64, err error) {
|
||||||
if rate <= 0 {
|
if rate <= 0 {
|
||||||
return io.Copy(writer, reader)
|
return io.Copy(writer, reader)
|
||||||
}
|
}
|
||||||
for range time.Tick(time.Second / 5) {
|
for range time.Tick(time.Second / 5) {
|
||||||
n, err := io.CopyN(writer, reader, int64(rate * 1024 / 5))
|
n, err := io.CopyN(writer, reader, int64(rate*1024/5))
|
||||||
written += n
|
written += n
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
return written, nil
|
return written, nil
|
||||||
} else {
|
} else {
|
||||||
return written, err
|
return written, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return written, nil
|
return written, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateKeyFromPassword generates a key from the password.
|
// GenerateKeyFromPassword generates a key from the password.
|
||||||
func GenerateKeyFromPassword(password string) []byte {
|
func GenerateKeyFromPassword(password string) []byte {
|
||||||
return pbkdf2.Key([]byte(password), DEFAULT_KEY, 16384, 32, sha256.New)
|
return pbkdf2.Key([]byte(password), DEFAULT_KEY, 16384, 32, sha256.New)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get password from preference, env, but don't start any keyring request
|
// Get password from preference, env, but don't start any keyring request
|
||||||
func GetPasswordFromPreference(preference Preference, passwordType string) (string) {
|
func GetPasswordFromPreference(preference Preference, passwordType string) string {
|
||||||
passwordID := passwordType
|
passwordID := passwordType
|
||||||
if preference.Name != "default" {
|
if preference.Name != "default" {
|
||||||
passwordID = preference.Name + "_" + passwordID
|
passwordID = preference.Name + "_" + passwordID
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
name := strings.ToUpper("duplicacy_" + passwordID)
|
name := strings.ToUpper("duplicacy_" + passwordID)
|
||||||
LOG_DEBUG("PASSWORD_ENV_VAR", "Reading the environment variable %s", name)
|
LOG_DEBUG("PASSWORD_ENV_VAR", "Reading the environment variable %s", name)
|
||||||
if password, found := os.LookupEnv(name); found && password != "" {
|
if password, found := os.LookupEnv(name); found && password != "" {
|
||||||
return password
|
return password
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the password is stored in the preference, there is no need to include the storage name
|
// If the password is stored in the preference, there is no need to include the storage name
|
||||||
// (i.e., preference.Name) in the key, so the key name should really be passwordType rather
|
// (i.e., preference.Name) in the key, so the key name should really be passwordType rather
|
||||||
// than passwordID; we're using passwordID here only for backward compatibility
|
// than passwordID; we're using passwordID here only for backward compatibility
|
||||||
if len(preference.Keys) > 0 && len(preference.Keys[passwordID]) > 0 {
|
if len(preference.Keys) > 0 && len(preference.Keys[passwordID]) > 0 {
|
||||||
LOG_DEBUG("PASSWORD_KEYCHAIN", "Reading %s from preferences", passwordID)
|
LOG_DEBUG("PASSWORD_KEYCHAIN", "Reading %s from preferences", passwordID)
|
||||||
return preference.Keys[passwordID]
|
return preference.Keys[passwordID]
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(preference.Keys) > 0 && len(preference.Keys[passwordType]) > 0 {
|
if len(preference.Keys) > 0 && len(preference.Keys[passwordType]) > 0 {
|
||||||
LOG_DEBUG("PASSWORD_KEYCHAIN", "Reading %s from preferences", passwordType)
|
LOG_DEBUG("PASSWORD_KEYCHAIN", "Reading %s from preferences", passwordType)
|
||||||
return preference.Keys[passwordType]
|
return preference.Keys[passwordType]
|
||||||
}
|
}
|
||||||
|
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPassword attempts to get the password from KeyChain/KeyRing, environment variables, or keyboard input.
|
// GetPassword attempts to get the password from KeyChain/KeyRing, environment variables, or keyboard input.
|
||||||
func GetPassword(preference Preference, passwordType string, prompt string,
|
func GetPassword(preference Preference, passwordType string, prompt string,
|
||||||
showPassword bool, resetPassword bool) (string) {
|
showPassword bool, resetPassword bool) string {
|
||||||
passwordID := passwordType
|
passwordID := passwordType
|
||||||
password := GetPasswordFromPreference(preference,passwordType)
|
password := GetPasswordFromPreference(preference, passwordType)
|
||||||
if password != "" {
|
if password != "" {
|
||||||
return password
|
return password
|
||||||
}
|
}
|
||||||
|
|
||||||
if preference.Name != "default" {
|
if preference.Name != "default" {
|
||||||
passwordID = preference.Name + "_" + passwordID
|
passwordID = preference.Name + "_" + passwordID
|
||||||
}
|
}
|
||||||
|
|
||||||
if resetPassword && !RunInBackground {
|
if resetPassword && !RunInBackground {
|
||||||
keyringSet(passwordID, "")
|
keyringSet(passwordID, "")
|
||||||
} else {
|
} else {
|
||||||
password := keyringGet(passwordID)
|
password := keyringGet(passwordID)
|
||||||
if password != "" {
|
if password != "" {
|
||||||
return password
|
return password
|
||||||
}
|
}
|
||||||
|
|
||||||
if RunInBackground {
|
if RunInBackground {
|
||||||
LOG_INFO("PASSWORD_MISSING", "%s is not found in Keychain/Keyring", passwordID)
|
LOG_INFO("PASSWORD_MISSING", "%s is not found in Keychain/Keyring", passwordID)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
password = ""
|
password = ""
|
||||||
fmt.Printf("%s", prompt)
|
fmt.Printf("%s", prompt)
|
||||||
if showPassword {
|
if showPassword {
|
||||||
scanner := bufio.NewScanner(os.Stdin)
|
scanner := bufio.NewScanner(os.Stdin)
|
||||||
scanner.Scan()
|
scanner.Scan()
|
||||||
password = scanner.Text()
|
password = scanner.Text()
|
||||||
} else {
|
} else {
|
||||||
passwordInBytes, err := gopass.GetPasswdMasked()
|
passwordInBytes, err := gopass.GetPasswdMasked()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("PASSWORD_READ", "Failed to read the password: %v", err)
|
LOG_ERROR("PASSWORD_READ", "Failed to read the password: %v", err)
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
password = string(passwordInBytes)
|
password = string(passwordInBytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
return password
|
return password
|
||||||
}
|
}
|
||||||
|
|
||||||
// SavePassword saves the specified password in the keyring/keychain.
|
// SavePassword saves the specified password in the keyring/keychain.
|
||||||
func SavePassword(preference Preference, passwordType string, password string) {
|
func SavePassword(preference Preference, passwordType string, password string) {
|
||||||
|
|
||||||
if password == "" || RunInBackground {
|
if password == "" || RunInBackground {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if preference.DoNotSavePassword {
|
if preference.DoNotSavePassword {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the password is retrieved from env or preference, don't save it to keyring
|
// If the password is retrieved from env or preference, don't save it to keyring
|
||||||
if GetPasswordFromPreference(preference, passwordType) == password {
|
if GetPasswordFromPreference(preference, passwordType) == password {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
passwordID := passwordType
|
passwordID := passwordType
|
||||||
if preference.Name != "default" {
|
if preference.Name != "default" {
|
||||||
passwordID = preference.Name + "_" + passwordID
|
passwordID = preference.Name + "_" + passwordID
|
||||||
}
|
}
|
||||||
keyringSet(passwordID, password)
|
keyringSet(passwordID, password)
|
||||||
}
|
}
|
||||||
|
|
||||||
// The following code was modified from the online article 'Matching Wildcards: An Algorithm', by Kirk J. Krauss,
|
// The following code was modified from the online article 'Matching Wildcards: An Algorithm', by Kirk J. Krauss,
|
||||||
@@ -269,61 +269,61 @@ func SavePassword(preference Preference, passwordType string, password string) {
|
|||||||
//
|
//
|
||||||
func matchPattern(text string, pattern string) bool {
|
func matchPattern(text string, pattern string) bool {
|
||||||
|
|
||||||
textLength := len(text)
|
textLength := len(text)
|
||||||
patternLength := len(pattern)
|
patternLength := len(pattern)
|
||||||
afterLastWildcard := 0
|
afterLastWildcard := 0
|
||||||
afterLastMatched := 0
|
afterLastMatched := 0
|
||||||
|
|
||||||
t := 0
|
t := 0
|
||||||
p := 0
|
p := 0
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if t >= textLength {
|
if t >= textLength {
|
||||||
if p >= patternLength {
|
if p >= patternLength {
|
||||||
return true // "x" matches "x"
|
return true // "x" matches "x"
|
||||||
} else if pattern[p] == '*' {
|
} else if pattern[p] == '*' {
|
||||||
p++
|
p++
|
||||||
continue // "x*" matches "x" or "xy"
|
continue // "x*" matches "x" or "xy"
|
||||||
}
|
}
|
||||||
return false // "x" doesn't match "xy"
|
return false // "x" doesn't match "xy"
|
||||||
}
|
}
|
||||||
|
|
||||||
w := byte(0)
|
w := byte(0)
|
||||||
if p < patternLength {
|
if p < patternLength {
|
||||||
w = pattern[p]
|
w = pattern[p]
|
||||||
}
|
}
|
||||||
|
|
||||||
if text[t] != w {
|
if text[t] != w {
|
||||||
if w == '?' {
|
if w == '?' {
|
||||||
t++
|
t++
|
||||||
p++
|
p++
|
||||||
continue
|
continue
|
||||||
} else if w == '*' {
|
} else if w == '*' {
|
||||||
p++
|
p++
|
||||||
afterLastWildcard = p
|
afterLastWildcard = p
|
||||||
if p >= patternLength {
|
if p >= patternLength {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
} else if afterLastWildcard > 0 {
|
} else if afterLastWildcard > 0 {
|
||||||
p = afterLastWildcard
|
p = afterLastWildcard
|
||||||
t = afterLastMatched
|
t = afterLastMatched
|
||||||
t++
|
t++
|
||||||
} else {
|
} else {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
for t < textLength && text[t] != pattern[p] && pattern[p] != '?' {
|
for t < textLength && text[t] != pattern[p] && pattern[p] != '?' {
|
||||||
t++
|
t++
|
||||||
}
|
}
|
||||||
|
|
||||||
if t >= textLength {
|
if t >= textLength {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
afterLastMatched = t
|
afterLastMatched = t
|
||||||
}
|
}
|
||||||
t++
|
t++
|
||||||
p++
|
p++
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -331,129 +331,129 @@ func matchPattern(text string, pattern string) bool {
|
|||||||
// either '+' or '-', whereas '-' indicates exclusion and '+' indicates inclusion. Wildcards like '*' and '?' may
|
// either '+' or '-', whereas '-' indicates exclusion and '+' indicates inclusion. Wildcards like '*' and '?' may
|
||||||
// appear in the patterns. In case no matching pattern is found, the file will be excluded if all patterns are
|
// appear in the patterns. In case no matching pattern is found, the file will be excluded if all patterns are
|
||||||
// include patterns, and included otherwise.
|
// include patterns, and included otherwise.
|
||||||
func MatchPath(filePath string, patterns [] string) (included bool) {
|
func MatchPath(filePath string, patterns []string) (included bool) {
|
||||||
|
|
||||||
var re *regexp.Regexp = nil
|
var re *regexp.Regexp = nil
|
||||||
var found bool
|
var found bool
|
||||||
var matched bool
|
var matched bool
|
||||||
|
|
||||||
allIncludes := true
|
allIncludes := true
|
||||||
|
|
||||||
for _, pattern := range patterns {
|
for _, pattern := range patterns {
|
||||||
if pattern[0] == '+' {
|
if pattern[0] == '+' {
|
||||||
if matchPattern(filePath, pattern[1:]) {
|
if matchPattern(filePath, pattern[1:]) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
} else if pattern[0] == '-' {
|
} else if pattern[0] == '-' {
|
||||||
allIncludes = false
|
allIncludes = false
|
||||||
if matchPattern(filePath, pattern[1:]) {
|
if matchPattern(filePath, pattern[1:]) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
} else if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
} else if strings.HasPrefix(pattern, "i:") || strings.HasPrefix(pattern, "e:") {
|
||||||
if re, found = RegexMap[pattern[2:]]; found {
|
if re, found = RegexMap[pattern[2:]]; found {
|
||||||
matched = re.MatchString(filePath)
|
matched = re.MatchString(filePath)
|
||||||
} else {
|
} else {
|
||||||
re, err := regexp.Compile(pattern)
|
re, err := regexp.Compile(pattern)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("REGEX_ERROR", "Invalid regex encountered for pattern \"%s\" - %v", pattern[2:], err)
|
LOG_ERROR("REGEX_ERROR", "Invalid regex encountered for pattern \"%s\" - %v", pattern[2:], err)
|
||||||
}
|
}
|
||||||
RegexMap[pattern] = re
|
RegexMap[pattern] = re
|
||||||
matched = re.MatchString(filePath)
|
matched = re.MatchString(filePath)
|
||||||
}
|
}
|
||||||
if matched {
|
if matched {
|
||||||
return strings.HasPrefix(pattern, "i:")
|
return strings.HasPrefix(pattern, "i:")
|
||||||
} else {
|
} else {
|
||||||
if strings.HasPrefix(pattern, "e:") {
|
if strings.HasPrefix(pattern, "e:") {
|
||||||
allIncludes = false
|
allIncludes = false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return !allIncludes
|
return !allIncludes
|
||||||
}
|
}
|
||||||
|
|
||||||
func joinPath(components ...string) string {
|
func joinPath(components ...string) string {
|
||||||
|
|
||||||
combinedPath := path.Join(components...)
|
combinedPath := path.Join(components...)
|
||||||
if len(combinedPath) > 257 && runtime.GOOS == "windows" {
|
if len(combinedPath) > 257 && runtime.GOOS == "windows" {
|
||||||
combinedPath = `\\?\` + filepath.Join(components...)
|
combinedPath = `\\?\` + filepath.Join(components...)
|
||||||
// If the path is on a samba drive we must use the UNC format
|
// If the path is on a samba drive we must use the UNC format
|
||||||
if strings.HasPrefix(combinedPath, `\\?\\\`) {
|
if strings.HasPrefix(combinedPath, `\\?\\\`) {
|
||||||
combinedPath = `\\?\UNC\` + combinedPath[6:]
|
combinedPath = `\\?\UNC\` + combinedPath[6:]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return combinedPath
|
return combinedPath
|
||||||
}
|
}
|
||||||
|
|
||||||
func PrettyNumber(number int64) (string) {
|
func PrettyNumber(number int64) string {
|
||||||
|
|
||||||
G := int64(1024 * 1024 * 1024)
|
G := int64(1024 * 1024 * 1024)
|
||||||
M := int64(1024 * 1024)
|
M := int64(1024 * 1024)
|
||||||
K := int64(1024)
|
K := int64(1024)
|
||||||
|
|
||||||
if number > 1000 * G {
|
if number > 1000*G {
|
||||||
return fmt.Sprintf("%dG", number / G)
|
return fmt.Sprintf("%dG", number/G)
|
||||||
} else if number > G {
|
} else if number > G {
|
||||||
return fmt.Sprintf("%d,%03dM", number / (1000 * M), (number / M) % 1000)
|
return fmt.Sprintf("%d,%03dM", number/(1000*M), (number/M)%1000)
|
||||||
} else if number > M {
|
} else if number > M {
|
||||||
return fmt.Sprintf("%d,%03dK", number / (1000 * K), (number / K) % 1000)
|
return fmt.Sprintf("%d,%03dK", number/(1000*K), (number/K)%1000)
|
||||||
} else if number > K {
|
} else if number > K {
|
||||||
return fmt.Sprintf("%dK", number / K)
|
return fmt.Sprintf("%dK", number/K)
|
||||||
} else {
|
} else {
|
||||||
return fmt.Sprintf("%d", number)
|
return fmt.Sprintf("%d", number)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func PrettySize(size int64) (string) {
|
func PrettySize(size int64) string {
|
||||||
if size > 1024 * 1024 {
|
if size > 1024*1024 {
|
||||||
return fmt.Sprintf("%.2fM", float64(size) / (1024.0 * 1024.0))
|
return fmt.Sprintf("%.2fM", float64(size)/(1024.0*1024.0))
|
||||||
} else if size > 1024 {
|
} else if size > 1024 {
|
||||||
return fmt.Sprintf("%.0fK", float64(size) / 1024.0)
|
return fmt.Sprintf("%.0fK", float64(size)/1024.0)
|
||||||
} else {
|
} else {
|
||||||
return fmt.Sprintf("%d", size)
|
return fmt.Sprintf("%d", size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func PrettyTime(seconds int64) (string) {
|
func PrettyTime(seconds int64) string {
|
||||||
|
|
||||||
day := int64(3600 * 24)
|
day := int64(3600 * 24)
|
||||||
|
|
||||||
if seconds > day * 2 {
|
if seconds > day*2 {
|
||||||
return fmt.Sprintf("%d days %02d:%02d:%02d",
|
return fmt.Sprintf("%d days %02d:%02d:%02d",
|
||||||
seconds / day, (seconds % day) / 3600, (seconds % 3600) / 60, seconds % 60)
|
seconds/day, (seconds%day)/3600, (seconds%3600)/60, seconds%60)
|
||||||
} else if seconds > day {
|
} else if seconds > day {
|
||||||
return fmt.Sprintf("1 day %02d:%02d:%02d", (seconds % day) / 3600, (seconds % 3600) / 60, seconds % 60)
|
return fmt.Sprintf("1 day %02d:%02d:%02d", (seconds%day)/3600, (seconds%3600)/60, seconds%60)
|
||||||
} else if seconds > 0 {
|
} else if seconds > 0 {
|
||||||
return fmt.Sprintf("%02d:%02d:%02d", seconds / 3600, (seconds % 3600) / 60, seconds % 60)
|
return fmt.Sprintf("%02d:%02d:%02d", seconds/3600, (seconds%3600)/60, seconds%60)
|
||||||
} else {
|
} else {
|
||||||
return "n/a"
|
return "n/a"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func AtoSize(sizeString string) (int) {
|
func AtoSize(sizeString string) int {
|
||||||
sizeString = strings.ToLower(sizeString)
|
sizeString = strings.ToLower(sizeString)
|
||||||
|
|
||||||
sizeRegex := regexp.MustCompile(`^([0-9]+)([mk])?$`)
|
sizeRegex := regexp.MustCompile(`^([0-9]+)([mk])?$`)
|
||||||
matched := sizeRegex.FindStringSubmatch(sizeString)
|
matched := sizeRegex.FindStringSubmatch(sizeString)
|
||||||
if matched == nil {
|
if matched == nil {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
size, _ := strconv.Atoi(matched[1])
|
size, _ := strconv.Atoi(matched[1])
|
||||||
|
|
||||||
if matched[2] == "m" {
|
if matched[2] == "m" {
|
||||||
size *= 1024 * 1024
|
size *= 1024 * 1024
|
||||||
} else if matched[2] == "k" {
|
} else if matched[2] == "k" {
|
||||||
size *= 1024
|
size *= 1024
|
||||||
}
|
}
|
||||||
|
|
||||||
return size
|
return size
|
||||||
}
|
}
|
||||||
|
|
||||||
func MinInt(x, y int) (int) {
|
func MinInt(x, y int) int {
|
||||||
if x < y {
|
if x < y {
|
||||||
return x
|
return x
|
||||||
}
|
}
|
||||||
return y
|
return y
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -7,79 +7,79 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"bytes"
|
||||||
"bytes"
|
"os"
|
||||||
"syscall"
|
"path/filepath"
|
||||||
"path/filepath"
|
"syscall"
|
||||||
|
|
||||||
"github.com/gilbertchen/xattr"
|
"github.com/gilbertchen/xattr"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Readlink(path string) (isRegular bool, s string, err error) {
|
func Readlink(path string) (isRegular bool, s string, err error) {
|
||||||
s, err = os.Readlink(path)
|
s, err = os.Readlink(path)
|
||||||
return false, s, err
|
return false, s, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetOwner(entry *Entry, fileInfo *os.FileInfo) {
|
func GetOwner(entry *Entry, fileInfo *os.FileInfo) {
|
||||||
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
|
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
|
||||||
if ok && stat != nil {
|
if ok && stat != nil {
|
||||||
entry.UID = int(stat.Uid)
|
entry.UID = int(stat.Uid)
|
||||||
entry.GID = int(stat.Gid)
|
entry.GID = int(stat.Gid)
|
||||||
} else {
|
} else {
|
||||||
entry.UID = -1
|
entry.UID = -1
|
||||||
entry.GID = -1
|
entry.GID = -1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) (bool) {
|
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
|
||||||
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
|
stat, ok := (*fileInfo).Sys().(*syscall.Stat_t)
|
||||||
if ok && stat != nil && (int(stat.Uid) != entry.UID || int(stat.Gid) != entry.GID) {
|
if ok && stat != nil && (int(stat.Uid) != entry.UID || int(stat.Gid) != entry.GID) {
|
||||||
if entry.UID != -1 && entry.GID != -1 {
|
if entry.UID != -1 && entry.GID != -1 {
|
||||||
err := os.Chown(fullPath, entry.UID, entry.GID)
|
err := os.Chown(fullPath, entry.UID, entry.GID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
LOG_ERROR("RESTORE_CHOWN", "Failed to change uid or gid: %v", err)
|
LOG_ERROR("RESTORE_CHOWN", "Failed to change uid or gid: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) ReadAttributes(top string) {
|
func (entry *Entry) ReadAttributes(top string) {
|
||||||
|
|
||||||
fullPath := filepath.Join(top, entry.Path)
|
fullPath := filepath.Join(top, entry.Path)
|
||||||
attributes, _ := xattr.Listxattr(fullPath)
|
attributes, _ := xattr.Listxattr(fullPath)
|
||||||
if len(attributes) > 0 {
|
if len(attributes) > 0 {
|
||||||
entry.Attributes = make(map[string][]byte)
|
entry.Attributes = make(map[string][]byte)
|
||||||
for _, name := range attributes {
|
for _, name := range attributes {
|
||||||
attribute, err := xattr.Getxattr(fullPath, name)
|
attribute, err := xattr.Getxattr(fullPath, name)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
entry.Attributes[name] = attribute
|
entry.Attributes[name] = attribute
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) SetAttributesToFile(fullPath string) {
|
func (entry *Entry) SetAttributesToFile(fullPath string) {
|
||||||
names, _ := xattr.Listxattr(fullPath)
|
names, _ := xattr.Listxattr(fullPath)
|
||||||
|
|
||||||
for _, name := range names {
|
for _, name := range names {
|
||||||
|
|
||||||
newAttribute, found := entry.Attributes[name]
|
newAttribute, found := entry.Attributes[name]
|
||||||
if found {
|
if found {
|
||||||
oldAttribute, _ := xattr.Getxattr(fullPath, name)
|
oldAttribute, _ := xattr.Getxattr(fullPath, name)
|
||||||
if bytes.Equal(oldAttribute, newAttribute) {
|
if bytes.Equal(oldAttribute, newAttribute) {
|
||||||
xattr.Setxattr(fullPath, name, newAttribute)
|
xattr.Setxattr(fullPath, name, newAttribute)
|
||||||
}
|
}
|
||||||
delete(entry.Attributes, name)
|
delete(entry.Attributes, name)
|
||||||
} else {
|
} else {
|
||||||
xattr.Removexattr(fullPath, name)
|
xattr.Removexattr(fullPath, name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for name, attribute := range entry.Attributes {
|
for name, attribute := range entry.Attributes {
|
||||||
xattr.Setxattr(fullPath, name, attribute)
|
xattr.Setxattr(fullPath, name, attribute)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,135 +5,134 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"bytes"
|
||||||
"io/ioutil"
|
"io"
|
||||||
"time"
|
"io/ioutil"
|
||||||
"bytes"
|
"time"
|
||||||
|
|
||||||
crypto_rand "crypto/rand"
|
crypto_rand "crypto/rand"
|
||||||
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMatchPattern(t *testing.T) {
|
func TestMatchPattern(t *testing.T) {
|
||||||
|
|
||||||
// Test cases were copied from Matching Wildcards: An Empirical Way to Tame an Algorithm
|
// Test cases were copied from Matching Wildcards: An Empirical Way to Tame an Algorithm
|
||||||
// By Kirk J. Krauss, October 07, 2014
|
// By Kirk J. Krauss, October 07, 2014
|
||||||
|
|
||||||
DATA := [] struct {
|
DATA := []struct {
|
||||||
text string
|
text string
|
||||||
pattern string
|
pattern string
|
||||||
matched bool
|
matched bool
|
||||||
} {
|
}{
|
||||||
// Cases with repeating character sequences.
|
// Cases with repeating character sequences.
|
||||||
{ "abcccd", "*ccd", true },
|
{"abcccd", "*ccd", true},
|
||||||
{ "mississipissippi", "*issip*ss*", true },
|
{"mississipissippi", "*issip*ss*", true},
|
||||||
{ "xxxx*zzzzzzzzy*f", "xxxx*zzy*fffff", false },
|
{"xxxx*zzzzzzzzy*f", "xxxx*zzy*fffff", false},
|
||||||
{ "xxxx*zzzzzzzzy*f", "xxx*zzy*f", true },
|
{"xxxx*zzzzzzzzy*f", "xxx*zzy*f", true},
|
||||||
{ "xxxxzzzzzzzzyf", "xxxx*zzy*fffff", false },
|
{"xxxxzzzzzzzzyf", "xxxx*zzy*fffff", false},
|
||||||
{ "xxxxzzzzzzzzyf", "xxxx*zzy*f", true },
|
{"xxxxzzzzzzzzyf", "xxxx*zzy*f", true},
|
||||||
{ "xyxyxyzyxyz", "xy*z*xyz", true },
|
{"xyxyxyzyxyz", "xy*z*xyz", true},
|
||||||
{ "mississippi", "*sip*", true },
|
{"mississippi", "*sip*", true},
|
||||||
{ "xyxyxyxyz", "xy*xyz", true },
|
{"xyxyxyxyz", "xy*xyz", true},
|
||||||
{ "mississippi", "mi*sip*", true },
|
{"mississippi", "mi*sip*", true},
|
||||||
{ "ababac", "*abac*", true },
|
{"ababac", "*abac*", true},
|
||||||
{ "ababac", "*abac*", true },
|
{"ababac", "*abac*", true},
|
||||||
{ "aaazz", "a*zz*", true },
|
{"aaazz", "a*zz*", true},
|
||||||
{ "a12b12", "*12*23", false },
|
{"a12b12", "*12*23", false},
|
||||||
{ "a12b12", "a12b", false },
|
{"a12b12", "a12b", false},
|
||||||
{ "a12b12", "*12*12*", true },
|
{"a12b12", "*12*12*", true},
|
||||||
|
|
||||||
// More double wildcard scenarios.
|
// More double wildcard scenarios.
|
||||||
{ "XYXYXYZYXYz", "XY*Z*XYz", true },
|
{"XYXYXYZYXYz", "XY*Z*XYz", true},
|
||||||
{ "missisSIPpi", "*SIP*", true },
|
{"missisSIPpi", "*SIP*", true},
|
||||||
{ "mississipPI", "*issip*PI", true },
|
{"mississipPI", "*issip*PI", true},
|
||||||
{ "xyxyxyxyz", "xy*xyz", true },
|
{"xyxyxyxyz", "xy*xyz", true},
|
||||||
{ "miSsissippi", "mi*sip*", true },
|
{"miSsissippi", "mi*sip*", true},
|
||||||
{ "miSsissippi", "mi*Sip*", false },
|
{"miSsissippi", "mi*Sip*", false},
|
||||||
{ "abAbac", "*Abac*", true },
|
{"abAbac", "*Abac*", true},
|
||||||
{ "abAbac", "*Abac*", true },
|
{"abAbac", "*Abac*", true},
|
||||||
{ "aAazz", "a*zz*", true },
|
{"aAazz", "a*zz*", true},
|
||||||
{ "A12b12", "*12*23", false },
|
{"A12b12", "*12*23", false},
|
||||||
{ "a12B12", "*12*12*", true },
|
{"a12B12", "*12*12*", true},
|
||||||
{ "oWn", "*oWn*", true },
|
{"oWn", "*oWn*", true},
|
||||||
|
|
||||||
// Completely tame (no wildcards) cases.
|
// Completely tame (no wildcards) cases.
|
||||||
{ "bLah", "bLah", true },
|
{"bLah", "bLah", true},
|
||||||
{ "bLah", "bLaH", false },
|
{"bLah", "bLaH", false},
|
||||||
|
|
||||||
// Simple mixed wildcard tests suggested by IBMer Marlin Deckert.
|
// Simple mixed wildcard tests suggested by IBMer Marlin Deckert.
|
||||||
{ "a", "*?", true },
|
{"a", "*?", true},
|
||||||
{ "ab", "*?", true },
|
{"ab", "*?", true},
|
||||||
{ "abc", "*?", true },
|
{"abc", "*?", true},
|
||||||
|
|
||||||
// More mixed wildcard tests including coverage for false positives.
|
// More mixed wildcard tests including coverage for false positives.
|
||||||
{ "a", "??", false },
|
{"a", "??", false},
|
||||||
{ "ab", "?*?", true },
|
{"ab", "?*?", true},
|
||||||
{ "ab", "*?*?*", true },
|
{"ab", "*?*?*", true},
|
||||||
{ "abc", "?*?*?", true },
|
{"abc", "?*?*?", true},
|
||||||
{ "abc", "?*?*&?", false },
|
{"abc", "?*?*&?", false},
|
||||||
{ "abcd", "?b*??", true },
|
{"abcd", "?b*??", true},
|
||||||
{ "abcd", "?a*??", false },
|
{"abcd", "?a*??", false},
|
||||||
{ "abcd", "?*?c?", true },
|
{"abcd", "?*?c?", true},
|
||||||
{ "abcd", "?*?d?", false },
|
{"abcd", "?*?d?", false},
|
||||||
{ "abcde", "?*b*?*d*?", true },
|
{"abcde", "?*b*?*d*?", true},
|
||||||
|
|
||||||
// Single-character-match cases.
|
// Single-character-match cases.
|
||||||
{ "bLah", "bL?h", true },
|
{"bLah", "bL?h", true},
|
||||||
{ "bLaaa", "bLa?", false },
|
{"bLaaa", "bLa?", false},
|
||||||
{ "bLah", "bLa?", true },
|
{"bLah", "bLa?", true},
|
||||||
{ "bLaH", "?Lah", false },
|
{"bLaH", "?Lah", false},
|
||||||
{ "bLaH", "?LaH", true },
|
{"bLaH", "?LaH", true},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, data := range DATA {
|
for _, data := range DATA {
|
||||||
if matchPattern(data.text, data.pattern) != data.matched {
|
if matchPattern(data.text, data.pattern) != data.matched {
|
||||||
t.Errorf("text: %s, pattern %s, expected: %t", data.text, data.pattern, data.matched)
|
t.Errorf("text: %s, pattern %s, expected: %t", data.text, data.pattern, data.matched)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRateLimit(t *testing.T) {
|
func TestRateLimit(t *testing.T) {
|
||||||
content := make([]byte, 100 * 1024)
|
content := make([]byte, 100*1024)
|
||||||
_, err := crypto_rand.Read(content)
|
_, err := crypto_rand.Read(content)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error generating random content: %v", err)
|
t.Errorf("Error generating random content: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedRate := 10
|
expectedRate := 10
|
||||||
rateLimiter := CreateRateLimitedReader(content, expectedRate)
|
rateLimiter := CreateRateLimitedReader(content, expectedRate)
|
||||||
|
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
n, err := io.Copy(ioutil.Discard, rateLimiter)
|
n, err := io.Copy(ioutil.Discard, rateLimiter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error reading from the rate limited reader: %v", err)
|
t.Errorf("Error reading from the rate limited reader: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if int(n) != len(content) {
|
if int(n) != len(content) {
|
||||||
t.Errorf("Wrote %s bytes instead of %s", n, len(content))
|
t.Errorf("Wrote %s bytes instead of %s", n, len(content))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
elapsed := time.Since(startTime)
|
elapsed := time.Since(startTime)
|
||||||
actualRate := float64(len(content)) / elapsed.Seconds() / 1024
|
actualRate := float64(len(content)) / elapsed.Seconds() / 1024
|
||||||
t.Logf("Elapsed time: %s, actual rate: %.3f kB/s, expected rate: %d kB/s", elapsed, actualRate, expectedRate)
|
t.Logf("Elapsed time: %s, actual rate: %.3f kB/s, expected rate: %d kB/s", elapsed, actualRate, expectedRate)
|
||||||
|
|
||||||
startTime = time.Now()
|
startTime = time.Now()
|
||||||
n, err = RateLimitedCopy(ioutil.Discard, bytes.NewBuffer(content), expectedRate)
|
n, err = RateLimitedCopy(ioutil.Discard, bytes.NewBuffer(content), expectedRate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error writing with rate limit: %v", err)
|
t.Errorf("Error writing with rate limit: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if int(n) != len(content) {
|
if int(n) != len(content) {
|
||||||
t.Errorf("Copied %s bytes instead of %s", n, len(content))
|
t.Errorf("Copied %s bytes instead of %s", n, len(content))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
elapsed = time.Since(startTime)
|
elapsed = time.Since(startTime)
|
||||||
actualRate = float64(len(content)) / elapsed.Seconds() / 1024
|
actualRate = float64(len(content)) / elapsed.Seconds() / 1024
|
||||||
t.Logf("Elapsed time: %s, actual rate: %.3f kB/s, expected rate: %d kB/s", elapsed, actualRate, expectedRate)
|
t.Logf("Elapsed time: %s, actual rate: %.3f kB/s, expected rate: %d kB/s", elapsed, actualRate, expectedRate)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,46 +5,47 @@
|
|||||||
package duplicacy
|
package duplicacy
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"fmt"
|
||||||
"fmt"
|
"os"
|
||||||
"syscall"
|
"syscall"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
type symbolicLinkReparseBuffer struct {
|
type symbolicLinkReparseBuffer struct {
|
||||||
SubstituteNameOffset uint16
|
SubstituteNameOffset uint16
|
||||||
SubstituteNameLength uint16
|
SubstituteNameLength uint16
|
||||||
PrintNameOffset uint16
|
PrintNameOffset uint16
|
||||||
PrintNameLength uint16
|
PrintNameLength uint16
|
||||||
Flags uint32
|
Flags uint32
|
||||||
PathBuffer [1]uint16
|
PathBuffer [1]uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
type mountPointReparseBuffer struct {
|
type mountPointReparseBuffer struct {
|
||||||
SubstituteNameOffset uint16
|
SubstituteNameOffset uint16
|
||||||
SubstituteNameLength uint16
|
SubstituteNameLength uint16
|
||||||
PrintNameOffset uint16
|
PrintNameOffset uint16
|
||||||
PrintNameLength uint16
|
PrintNameLength uint16
|
||||||
PathBuffer [1]uint16
|
PathBuffer [1]uint16
|
||||||
}
|
}
|
||||||
|
|
||||||
type reparseDataBuffer struct {
|
type reparseDataBuffer struct {
|
||||||
ReparseTag uint32
|
ReparseTag uint32
|
||||||
ReparseDataLength uint16
|
ReparseDataLength uint16
|
||||||
Reserved uint16
|
Reserved uint16
|
||||||
|
|
||||||
// GenericReparseBuffer
|
// GenericReparseBuffer
|
||||||
reparseBuffer byte
|
reparseBuffer byte
|
||||||
}
|
}
|
||||||
const (
|
|
||||||
FSCTL_GET_REPARSE_POINT = 0x900A8
|
|
||||||
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024
|
|
||||||
IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
|
|
||||||
IO_REPARSE_TAG_SYMLINK = 0xA000000C
|
|
||||||
IO_REPARSE_TAG_DEDUP = 0x80000013
|
|
||||||
SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1
|
|
||||||
|
|
||||||
FILE_READ_ATTRIBUTES = 0x0080
|
const (
|
||||||
|
FSCTL_GET_REPARSE_POINT = 0x900A8
|
||||||
|
MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024
|
||||||
|
IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003
|
||||||
|
IO_REPARSE_TAG_SYMLINK = 0xA000000C
|
||||||
|
IO_REPARSE_TAG_DEDUP = 0x80000013
|
||||||
|
SYMBOLIC_LINK_FLAG_DIRECTORY = 0x1
|
||||||
|
|
||||||
|
FILE_READ_ATTRIBUTES = 0x0080
|
||||||
)
|
)
|
||||||
|
|
||||||
// We copied golang source code for Readlink but made a simple modification here: use FILE_READ_ATTRIBUTES instead of
|
// We copied golang source code for Readlink but made a simple modification here: use FILE_READ_ATTRIBUTES instead of
|
||||||
@@ -53,58 +54,58 @@ const (
|
|||||||
|
|
||||||
// Readlink returns the destination of the named symbolic link.
|
// Readlink returns the destination of the named symbolic link.
|
||||||
func Readlink(path string) (isRegular bool, s string, err error) {
|
func Readlink(path string) (isRegular bool, s string, err error) {
|
||||||
fd, err := syscall.CreateFile(syscall.StringToUTF16Ptr(path), FILE_READ_ATTRIBUTES,
|
fd, err := syscall.CreateFile(syscall.StringToUTF16Ptr(path), FILE_READ_ATTRIBUTES,
|
||||||
syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING,
|
syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING,
|
||||||
syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
syscall.FILE_FLAG_OPEN_REPARSE_POINT|syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, "", err
|
return false, "", err
|
||||||
}
|
}
|
||||||
defer syscall.CloseHandle(fd)
|
defer syscall.CloseHandle(fd)
|
||||||
|
|
||||||
rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
|
rdbbuf := make([]byte, syscall.MAXIMUM_REPARSE_DATA_BUFFER_SIZE)
|
||||||
var bytesReturned uint32
|
var bytesReturned uint32
|
||||||
err = syscall.DeviceIoControl(fd, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0],
|
err = syscall.DeviceIoControl(fd, syscall.FSCTL_GET_REPARSE_POINT, nil, 0, &rdbbuf[0],
|
||||||
uint32(len(rdbbuf)), &bytesReturned, nil)
|
uint32(len(rdbbuf)), &bytesReturned, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, "", err
|
return false, "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0]))
|
rdb := (*reparseDataBuffer)(unsafe.Pointer(&rdbbuf[0]))
|
||||||
switch rdb.ReparseTag {
|
switch rdb.ReparseTag {
|
||||||
case IO_REPARSE_TAG_SYMLINK:
|
case IO_REPARSE_TAG_SYMLINK:
|
||||||
data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
|
data := (*symbolicLinkReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
|
||||||
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
|
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
|
||||||
if data.PrintNameLength > 0 {
|
if data.PrintNameLength > 0 {
|
||||||
s = syscall.UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength + data.PrintNameOffset)/2])
|
s = syscall.UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength+data.PrintNameOffset)/2])
|
||||||
} else {
|
} else {
|
||||||
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameLength + data.SubstituteNameOffset)/2])
|
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameLength+data.SubstituteNameOffset)/2])
|
||||||
}
|
}
|
||||||
case IO_REPARSE_TAG_MOUNT_POINT:
|
case IO_REPARSE_TAG_MOUNT_POINT:
|
||||||
data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
|
data := (*mountPointReparseBuffer)(unsafe.Pointer(&rdb.reparseBuffer))
|
||||||
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
|
p := (*[0xffff]uint16)(unsafe.Pointer(&data.PathBuffer[0]))
|
||||||
if data.PrintNameLength > 0 {
|
if data.PrintNameLength > 0 {
|
||||||
s = syscall.UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength + data.PrintNameOffset)/2])
|
s = syscall.UTF16ToString(p[data.PrintNameOffset/2 : (data.PrintNameLength+data.PrintNameOffset)/2])
|
||||||
} else {
|
} else {
|
||||||
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameLength + data.SubstituteNameOffset)/2])
|
s = syscall.UTF16ToString(p[data.SubstituteNameOffset/2 : (data.SubstituteNameLength+data.SubstituteNameOffset)/2])
|
||||||
}
|
}
|
||||||
case IO_REPARSE_TAG_DEDUP:
|
case IO_REPARSE_TAG_DEDUP:
|
||||||
return true, "", nil
|
return true, "", nil
|
||||||
default:
|
default:
|
||||||
// the path is not a symlink or junction but another type of reparse
|
// the path is not a symlink or junction but another type of reparse
|
||||||
// point
|
// point
|
||||||
return false, "", fmt.Errorf("Unhandled reparse point type %x", rdb.ReparseTag)
|
return false, "", fmt.Errorf("Unhandled reparse point type %x", rdb.ReparseTag)
|
||||||
}
|
}
|
||||||
|
|
||||||
return false, s, nil
|
return false, s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetOwner(entry *Entry, fileInfo *os.FileInfo) {
|
func GetOwner(entry *Entry, fileInfo *os.FileInfo) {
|
||||||
entry.UID = -1
|
entry.UID = -1
|
||||||
entry.GID = -1
|
entry.GID = -1
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) (bool) {
|
func SetOwner(fullPath string, entry *Entry, fileInfo *os.FileInfo) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) ReadAttributes(top string) {
|
func (entry *Entry) ReadAttributes(top string) {
|
||||||
|
|||||||
Reference in New Issue
Block a user