mirror of
https://github.com/gilbertchen/duplicacy
synced 2025-12-06 00:03:38 +00:00
Implement Erasure Coding
This commit is contained in:
@@ -458,8 +458,26 @@ func configRepository(context *cli.Context, init bool) {
|
||||
if iterations == 0 {
|
||||
iterations = duplicacy.CONFIG_DEFAULT_ITERATIONS
|
||||
}
|
||||
|
||||
dataShards := 0
|
||||
parityShards := 0
|
||||
shards := context.String("erasure-coding")
|
||||
if shards != "" {
|
||||
shardsRegex := regexp.MustCompile(`^([0-9]+):([0-9]+)$`)
|
||||
matched := shardsRegex.FindStringSubmatch(shards)
|
||||
if matched == nil {
|
||||
duplicacy.LOG_ERROR("STORAGE_ERASURECODE", "Invalid erasure coding parameters: %s", shards)
|
||||
} else {
|
||||
dataShards, _ = strconv.Atoi(matched[1])
|
||||
parityShards, _ = strconv.Atoi(matched[2])
|
||||
if dataShards == 0 || dataShards > 256 || parityShards == 0 || parityShards > dataShards {
|
||||
duplicacy.LOG_ERROR("STORAGE_ERASURECODE", "Invalid erasure coding parameters: %s", shards)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
duplicacy.ConfigStorage(storage, iterations, compressionLevel, averageChunkSize, maximumChunkSize,
|
||||
minimumChunkSize, storagePassword, otherConfig, bitCopy, context.String("key"))
|
||||
minimumChunkSize, storagePassword, otherConfig, bitCopy, context.String("key"), dataShards, parityShards)
|
||||
}
|
||||
|
||||
duplicacy.Preferences = append(duplicacy.Preferences, preference)
|
||||
@@ -1403,6 +1421,11 @@ func main() {
|
||||
Usage: "the RSA public key to encrypt file chunks",
|
||||
Argument: "<public key>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "erasure-coding",
|
||||
Usage: "enable erasure coding to protect against storage corruption",
|
||||
Argument: "<data shards>:<parity shards>",
|
||||
},
|
||||
},
|
||||
Usage: "Initialize the storage if necessary and the current directory as the repository",
|
||||
ArgsUsage: "<snapshot id> <storage url>",
|
||||
@@ -1882,6 +1905,11 @@ func main() {
|
||||
Usage: "the RSA public key to encrypt file chunks",
|
||||
Argument: "<public key>",
|
||||
},
|
||||
cli.StringFlag{
|
||||
Name: "erasure-coding",
|
||||
Usage: "enable erasure coding to protect against storage corruption",
|
||||
Argument: "<data shards>:<parity shards>",
|
||||
},
|
||||
},
|
||||
Usage: "Add an additional storage to be used for the existing repository",
|
||||
ArgsUsage: "<storage name> <snapshot id> <storage url>",
|
||||
|
||||
@@ -226,12 +226,20 @@ func TestBackupManager(t *testing.T) {
|
||||
cleanStorage(storage)
|
||||
|
||||
time.Sleep(time.Duration(delay) * time.Second)
|
||||
|
||||
dataShards := 0
|
||||
parityShards := 0
|
||||
if testErasureCoding {
|
||||
dataShards = 5
|
||||
parityShards = 2
|
||||
}
|
||||
|
||||
if testFixedChunkSize {
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "") {
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "", dataShards, parityShards) {
|
||||
t.Errorf("Failed to initialize the storage")
|
||||
}
|
||||
} else {
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false, "") {
|
||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false, "", dataShards, parityShards) {
|
||||
t.Errorf("Failed to initialize the storage")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,6 +22,8 @@ import (
|
||||
"runtime"
|
||||
|
||||
"github.com/bkaradzic/go-lz4"
|
||||
"github.com/minio/highwayhash"
|
||||
"github.com/klauspost/reedsolomon"
|
||||
)
|
||||
|
||||
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
|
||||
@@ -68,11 +70,13 @@ type Chunk struct {
|
||||
}
|
||||
|
||||
// Magic word to identify a duplicacy format encrypted file, plus a version number.
|
||||
var ENCRYPTION_HEADER = "duplicacy\000"
|
||||
var ENCRYPTION_BANNER = "duplicacy\000"
|
||||
|
||||
// RSA encrypted chunks start with "duplicacy\002"
|
||||
var ENCRYPTION_VERSION_RSA byte = 2
|
||||
|
||||
var ERASURE_CODING_BANNER = "duplicacy\003"
|
||||
|
||||
// CreateChunk creates a new chunk.
|
||||
func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
|
||||
|
||||
@@ -224,7 +228,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
|
||||
// Start with the magic number and the version number.
|
||||
if usingRSA {
|
||||
// RSA encryption starts "duplicacy\002"
|
||||
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER)[:len(ENCRYPTION_HEADER) - 1])
|
||||
encryptedBuffer.Write([]byte(ENCRYPTION_BANNER)[:len(ENCRYPTION_BANNER) - 1])
|
||||
encryptedBuffer.Write([]byte{ENCRYPTION_VERSION_RSA})
|
||||
|
||||
// Then the encrypted key
|
||||
@@ -235,7 +239,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
|
||||
binary.Write(encryptedBuffer, binary.LittleEndian, uint16(len(encryptedKey)))
|
||||
encryptedBuffer.Write(encryptedKey)
|
||||
} else {
|
||||
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER))
|
||||
encryptedBuffer.Write([]byte(ENCRYPTION_BANNER))
|
||||
}
|
||||
|
||||
// Followed by the nonce
|
||||
@@ -248,7 +252,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
|
||||
offset = encryptedBuffer.Len()
|
||||
}
|
||||
|
||||
// offset is either 0 or the length of header + nonce
|
||||
// offset is either 0 or the length of banner + nonce
|
||||
|
||||
if chunk.config.CompressionLevel >= -1 && chunk.config.CompressionLevel <= 9 {
|
||||
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
|
||||
@@ -273,26 +277,79 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
|
||||
return fmt.Errorf("Invalid compression level: %d", chunk.config.CompressionLevel)
|
||||
}
|
||||
|
||||
if len(encryptionKey) == 0 {
|
||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||
return nil
|
||||
if len(encryptionKey) > 0 {
|
||||
|
||||
// PKCS7 is used. The sizes of compressed chunks leak information about the original chunks so we want the padding sizes
|
||||
// to be the maximum allowed by PKCS7
|
||||
dataLength := encryptedBuffer.Len() - offset
|
||||
paddingLength := 256 - dataLength%256
|
||||
|
||||
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
|
||||
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
|
||||
|
||||
// The encrypted data will be appended to the duplicacy banner and the once.
|
||||
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
|
||||
encryptedBuffer.Bytes()[offset:offset+dataLength+paddingLength], nil)
|
||||
|
||||
encryptedBuffer.Truncate(len(encryptedBytes))
|
||||
}
|
||||
|
||||
// PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes
|
||||
// to be the maximum allowed by PKCS7
|
||||
dataLength := encryptedBuffer.Len() - offset
|
||||
paddingLength := 256 - dataLength%256
|
||||
if chunk.config.DataShards == 0 || chunk.config.ParityShards == 0 {
|
||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||
return
|
||||
}
|
||||
|
||||
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
|
||||
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
|
||||
// Start erasure coding
|
||||
encoder, err := reedsolomon.New(chunk.config.DataShards, chunk.config.ParityShards)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chunkSize := len(encryptedBuffer.Bytes())
|
||||
shardSize := (chunkSize + chunk.config.DataShards - 1) / chunk.config.DataShards
|
||||
// Append zeros to make the last shard to have the same size as other
|
||||
encryptedBuffer.Write(make([]byte, shardSize * chunk.config.DataShards - chunkSize))
|
||||
// Grow the buffer for parity shards
|
||||
encryptedBuffer.Grow(shardSize * chunk.config.ParityShards)
|
||||
// Now create one slice for each shard, reusing the data in the buffer
|
||||
data := make([][]byte, chunk.config.DataShards + chunk.config.ParityShards)
|
||||
for i := 0; i < chunk.config.DataShards + chunk.config.ParityShards; i++ {
|
||||
data[i] = encryptedBuffer.Bytes()[i * shardSize: (i + 1) * shardSize]
|
||||
}
|
||||
// This populates the parity shard
|
||||
encoder.Encode(data)
|
||||
|
||||
// The encrypted data will be appended to the duplicacy header and the once.
|
||||
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
|
||||
encryptedBuffer.Bytes()[offset:offset+dataLength+paddingLength], nil)
|
||||
// Prepare the chunk to be uploaded
|
||||
chunk.buffer.Reset()
|
||||
// First the banner
|
||||
chunk.buffer.Write([]byte(ERASURE_CODING_BANNER))
|
||||
// Then the header which includes the chunk size, data/parity and a 2-byte checksum
|
||||
header := make([]byte, 14)
|
||||
binary.LittleEndian.PutUint64(header[0:], uint64(chunkSize))
|
||||
binary.LittleEndian.PutUint16(header[8:], uint16(chunk.config.DataShards))
|
||||
binary.LittleEndian.PutUint16(header[10:], uint16(chunk.config.ParityShards))
|
||||
header[12] = header[0] ^ header[2] ^ header[4] ^ header[6] ^ header[8] ^ header[10]
|
||||
header[13] = header[1] ^ header[3] ^ header[5] ^ header[7] ^ header[9] ^ header[11]
|
||||
chunk.buffer.Write(header)
|
||||
// Calculate the highway hash for each shard
|
||||
hashKey := make([]byte, 32)
|
||||
for _, part := range data {
|
||||
hasher, err := highwayhash.New(hashKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = hasher.Write(part)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
chunk.buffer.Write(hasher.Sum(nil))
|
||||
}
|
||||
|
||||
encryptedBuffer.Truncate(len(encryptedBytes))
|
||||
|
||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||
// Copy the data
|
||||
for _, part := range data {
|
||||
chunk.buffer.Write(part)
|
||||
}
|
||||
// Append the header again for redundancy
|
||||
chunk.buffer.Write(header)
|
||||
|
||||
return nil
|
||||
|
||||
@@ -322,7 +379,122 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
}()
|
||||
|
||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||
headerLength := len(ENCRYPTION_HEADER)
|
||||
bannerLength := len(ENCRYPTION_BANNER)
|
||||
|
||||
if len(encryptedBuffer.Bytes()) > bannerLength && string(encryptedBuffer.Bytes()[:bannerLength]) == ERASURE_CODING_BANNER {
|
||||
|
||||
// The chunk was encoded with erasure coding
|
||||
if len(encryptedBuffer.Bytes()) < bannerLength + 14 {
|
||||
return fmt.Errorf("Erasure coding header truncated (%d bytes)", len(encryptedBuffer.Bytes()))
|
||||
}
|
||||
// Check the header checksum
|
||||
header := encryptedBuffer.Bytes()[bannerLength: bannerLength + 14]
|
||||
if header[12] != header[0] ^ header[2] ^ header[4] ^ header[6] ^ header[8] ^ header[10] ||
|
||||
header[13] != header[1] ^ header[3] ^ header[5] ^ header[7] ^ header[9] ^ header[11] {
|
||||
return fmt.Errorf("Erasure coding header corrupted (%x)", header)
|
||||
}
|
||||
|
||||
// Read the parameters
|
||||
chunkSize := int(binary.LittleEndian.Uint64(header[0:8]))
|
||||
dataShards := int(binary.LittleEndian.Uint16(header[8:10]))
|
||||
parityShards := int(binary.LittleEndian.Uint16(header[10:12]))
|
||||
shardSize := (chunkSize + chunk.config.DataShards - 1) / chunk.config.DataShards
|
||||
// This is the length the chunk file should have
|
||||
expectedLength := bannerLength + 2 * len(header) + (dataShards + parityShards) * (shardSize + 32)
|
||||
// The minimum length that can be recovered from
|
||||
minimumLength := bannerLength + len(header) + (dataShards + parityShards) * 32 + dataShards * shardSize
|
||||
LOG_DEBUG("CHUNK_ERASURECODE", "Chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards)
|
||||
if len(encryptedBuffer.Bytes()) > expectedLength {
|
||||
LOG_WARN("CHUNK_ERASURECODE", "Chunk has %d bytes (instead of %d)", len(encryptedBuffer.Bytes()), expectedLength)
|
||||
} else if len(encryptedBuffer.Bytes()) == expectedLength {
|
||||
// Correct size; fall through
|
||||
} else if len(encryptedBuffer.Bytes()) > minimumLength {
|
||||
LOG_WARN("CHUNK_ERASURECODE", "Chunk is truncated (%d out of %d bytes)", len(encryptedBuffer.Bytes()), expectedLength)
|
||||
} else {
|
||||
return fmt.Errorf("Not enough chunk data for recovery; chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards)
|
||||
}
|
||||
|
||||
// Where the hashes start
|
||||
hashOffset := bannerLength + len(header)
|
||||
// Where the data start
|
||||
dataOffset := hashOffset + (dataShards + parityShards) * 32
|
||||
|
||||
data := make([][]byte, dataShards + parityShards)
|
||||
recoveryNeeded := false
|
||||
hashKey := make([]byte, 32)
|
||||
availableShards := 0
|
||||
for i := 0; i < dataShards + parityShards; i++ {
|
||||
start := dataOffset + i * shardSize
|
||||
if start + shardSize > len(encryptedBuffer.Bytes()) {
|
||||
// the current shard is incomplete
|
||||
break
|
||||
}
|
||||
// Now verify the hash
|
||||
hasher, err := highwayhash.New(hashKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = hasher.Write(encryptedBuffer.Bytes()[start: start + shardSize])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bytes.Compare(hasher.Sum(nil), encryptedBuffer.Bytes()[hashOffset + i * 32: hashOffset + (i + 1) * 32]) != 0 {
|
||||
if i < dataShards {
|
||||
recoveryNeeded = true
|
||||
}
|
||||
} else {
|
||||
// The shard is good
|
||||
data[i] = encryptedBuffer.Bytes()[start: start + shardSize]
|
||||
availableShards++
|
||||
if availableShards >= dataShards {
|
||||
// We have enough shards to recover; skip the remaining shards
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !recoveryNeeded {
|
||||
// Remove the padding zeros from the last shard
|
||||
encryptedBuffer.Truncate(dataOffset + chunkSize)
|
||||
// Skip the header and hashes
|
||||
encryptedBuffer.Read(encryptedBuffer.Bytes()[:dataOffset])
|
||||
} else {
|
||||
if availableShards < dataShards {
|
||||
return fmt.Errorf("Not enough chunk data for recover; only %d out of %d shards are complete", availableShards, dataShards + parityShards)
|
||||
}
|
||||
|
||||
// Show the validity of shards using a string of * and -
|
||||
slots := ""
|
||||
for _, part := range data {
|
||||
if len(part) != 0 {
|
||||
slots += "*"
|
||||
} else {
|
||||
slots += "-"
|
||||
}
|
||||
}
|
||||
|
||||
LOG_WARN("CHUNK_ERASURECODE", "Recovering a %d byte chunk from %d byte shards: %s", chunkSize, shardSize, slots)
|
||||
encoder, err := reedsolomon.New(dataShards, parityShards)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = encoder.Reconstruct(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
LOG_DEBUG("CHUNK_ERASURECODE", "Chunk data successfully recovered")
|
||||
buffer := AllocateChunkBuffer()
|
||||
buffer.Reset()
|
||||
for i := 0; i < dataShards; i++ {
|
||||
buffer.Write(data[i])
|
||||
}
|
||||
buffer.Truncate(chunkSize)
|
||||
|
||||
ReleaseChunkBuffer(encryptedBuffer)
|
||||
encryptedBuffer = buffer
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(encryptionKey) > 0 {
|
||||
|
||||
@@ -340,15 +512,15 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
key = hasher.Sum(nil)
|
||||
}
|
||||
|
||||
if len(encryptedBuffer.Bytes()) < headerLength + 12 {
|
||||
if len(encryptedBuffer.Bytes()) < bannerLength + 12 {
|
||||
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||
}
|
||||
|
||||
if string(encryptedBuffer.Bytes()[:headerLength-1]) != ENCRYPTION_HEADER[:headerLength-1] {
|
||||
if string(encryptedBuffer.Bytes()[:bannerLength-1]) != ENCRYPTION_BANNER[:bannerLength-1] {
|
||||
return fmt.Errorf("The storage doesn't seem to be encrypted")
|
||||
}
|
||||
|
||||
encryptionVersion := encryptedBuffer.Bytes()[headerLength-1]
|
||||
encryptionVersion := encryptedBuffer.Bytes()[bannerLength-1]
|
||||
if encryptionVersion != 0 && encryptionVersion != ENCRYPTION_VERSION_RSA {
|
||||
return fmt.Errorf("Unsupported encryption version %d", encryptionVersion)
|
||||
}
|
||||
@@ -359,14 +531,14 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
return fmt.Errorf("An RSA private key is required to decrypt the chunk")
|
||||
}
|
||||
|
||||
encryptedKeyLength := binary.LittleEndian.Uint16(encryptedBuffer.Bytes()[headerLength:headerLength+2])
|
||||
encryptedKeyLength := binary.LittleEndian.Uint16(encryptedBuffer.Bytes()[bannerLength:bannerLength+2])
|
||||
|
||||
if len(encryptedBuffer.Bytes()) < headerLength + 14 + int(encryptedKeyLength) {
|
||||
if len(encryptedBuffer.Bytes()) < bannerLength + 14 + int(encryptedKeyLength) {
|
||||
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||
}
|
||||
|
||||
encryptedKey := encryptedBuffer.Bytes()[headerLength + 2:headerLength + 2 + int(encryptedKeyLength)]
|
||||
headerLength += 2 + int(encryptedKeyLength)
|
||||
encryptedKey := encryptedBuffer.Bytes()[bannerLength + 2:bannerLength + 2 + int(encryptedKeyLength)]
|
||||
bannerLength += 2 + int(encryptedKeyLength)
|
||||
|
||||
decryptedKey, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPrivateKey, encryptedKey, nil)
|
||||
if err != nil {
|
||||
@@ -385,8 +557,8 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
||||
return err
|
||||
}
|
||||
|
||||
offset = headerLength + gcm.NonceSize()
|
||||
nonce := encryptedBuffer.Bytes()[headerLength:offset]
|
||||
offset = bannerLength + gcm.NonceSize()
|
||||
nonce := encryptedBuffer.Bytes()[bannerLength:offset]
|
||||
|
||||
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
|
||||
encryptedBuffer.Bytes()[offset:], nil)
|
||||
|
||||
@@ -12,7 +12,46 @@ import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestChunk(t *testing.T) {
|
||||
func TestErasureCoding(t *testing.T) {
|
||||
key := []byte("duplicacydefault")
|
||||
|
||||
config := CreateConfig()
|
||||
config.HashKey = key
|
||||
config.IDKey = key
|
||||
config.MinimumChunkSize = 100
|
||||
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
||||
config.DataShards = 5
|
||||
config.ParityShards = 2
|
||||
|
||||
chunk := CreateChunk(config, true)
|
||||
chunk.Reset(true)
|
||||
data := make([]byte, 100)
|
||||
for i := 0; i < len(data); i++ {
|
||||
data[i] = byte(i)
|
||||
}
|
||||
chunk.Write(data)
|
||||
err := chunk.Encrypt([]byte(""), "", false)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to encrypt the test data: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData := make([]byte, chunk.GetLength())
|
||||
copy(encryptedData, chunk.GetBytes())
|
||||
|
||||
crypto_rand.Read(encryptedData[280:300])
|
||||
|
||||
chunk.Reset(false)
|
||||
chunk.Write(encryptedData)
|
||||
err = chunk.Decrypt([]byte(""), "")
|
||||
if err != nil {
|
||||
t.Errorf("Failed to decrypt the data: %v", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func TestChunkBasic(t *testing.T) {
|
||||
|
||||
key := []byte("duplicacydefault")
|
||||
|
||||
@@ -32,7 +71,10 @@ func TestChunk(t *testing.T) {
|
||||
config.rsaPublicKey = privateKey.Public().(*rsa.PublicKey)
|
||||
}
|
||||
|
||||
remainderLength := -1
|
||||
if testErasureCoding {
|
||||
config.DataShards = 5
|
||||
config.ParityShards = 2
|
||||
}
|
||||
|
||||
for i := 0; i < 500; i++ {
|
||||
|
||||
@@ -56,10 +98,14 @@ func TestChunk(t *testing.T) {
|
||||
encryptedData := make([]byte, chunk.GetLength())
|
||||
copy(encryptedData, chunk.GetBytes())
|
||||
|
||||
if remainderLength == -1 {
|
||||
remainderLength = len(encryptedData) % 256
|
||||
} else if len(encryptedData)%256 != remainderLength {
|
||||
t.Errorf("Incorrect padding size")
|
||||
if testErasureCoding {
|
||||
offset := 24 + 32 * 7
|
||||
start := rand.Int() % (len(encryptedData) - offset) + offset
|
||||
length := (len(encryptedData) - offset) / 7
|
||||
if start + length > len(encryptedData) {
|
||||
length = len(encryptedData) - start
|
||||
}
|
||||
crypto_rand.Read(encryptedData[start: start+length])
|
||||
}
|
||||
|
||||
chunk.Reset(false)
|
||||
|
||||
@@ -34,8 +34,8 @@ var DEFAULT_KEY = []byte("duplicacy")
|
||||
// standard zlib levels of -1 to 9.
|
||||
var DEFAULT_COMPRESSION_LEVEL = 100
|
||||
|
||||
// The new header of the config file (to differentiate from the old format where the salt and iterations are fixed)
|
||||
var CONFIG_HEADER = "duplicacy\001"
|
||||
// The new banner of the config file (to differentiate from the old format where the salt and iterations are fixed)
|
||||
var CONFIG_BANNER = "duplicacy\001"
|
||||
|
||||
// The length of the salt used in the new format
|
||||
var CONFIG_SALT_LENGTH = 32
|
||||
@@ -70,6 +70,10 @@ type Config struct {
|
||||
// for encrypting a non-chunk file
|
||||
FileKey []byte `json:"-"`
|
||||
|
||||
// for erasure coding
|
||||
DataShards int `json:'data-shards'`
|
||||
ParityShards int `json:'parity-shards'`
|
||||
|
||||
// for RSA encryption
|
||||
rsaPrivateKey *rsa.PrivateKey
|
||||
rsaPublicKey *rsa.PublicKey
|
||||
@@ -180,6 +184,10 @@ func (config *Config) Print() {
|
||||
LOG_TRACE("CONFIG_INFO", "Metadata chunks are encrypted")
|
||||
}
|
||||
|
||||
if config.DataShards != 0 && config.ParityShards != 0 {
|
||||
LOG_TRACE("CONFIG_INFO", "Data shards: %d, parity shards: %d", config.DataShards, config.ParityShards)
|
||||
}
|
||||
|
||||
if config.rsaPublicKey != nil {
|
||||
pkisPublicKey, _ := x509.MarshalPKIXPublicKey(config.rsaPublicKey)
|
||||
|
||||
@@ -386,11 +394,11 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
if len(configFile.GetBytes()) < len(ENCRYPTION_HEADER) {
|
||||
if len(configFile.GetBytes()) < len(ENCRYPTION_BANNER) {
|
||||
return nil, false, fmt.Errorf("The storage has an invalid config file")
|
||||
}
|
||||
|
||||
if string(configFile.GetBytes()[:len(ENCRYPTION_HEADER)-1]) == ENCRYPTION_HEADER[:len(ENCRYPTION_HEADER)-1] && len(password) == 0 {
|
||||
if string(configFile.GetBytes()[:len(ENCRYPTION_BANNER)-1]) == ENCRYPTION_BANNER[:len(ENCRYPTION_BANNER)-1] && len(password) == 0 {
|
||||
return nil, true, fmt.Errorf("The storage is likely to have been initialized with a password before")
|
||||
}
|
||||
|
||||
@@ -398,23 +406,23 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
||||
|
||||
if len(password) > 0 {
|
||||
|
||||
if string(configFile.GetBytes()[:len(ENCRYPTION_HEADER)]) == ENCRYPTION_HEADER {
|
||||
if string(configFile.GetBytes()[:len(ENCRYPTION_BANNER)]) == ENCRYPTION_BANNER {
|
||||
// This is the old config format with a static salt and a fixed number of iterations
|
||||
masterKey = GenerateKeyFromPassword(password, DEFAULT_KEY, CONFIG_DEFAULT_ITERATIONS)
|
||||
LOG_TRACE("CONFIG_FORMAT", "Using a static salt and %d iterations for key derivation", CONFIG_DEFAULT_ITERATIONS)
|
||||
} else if string(configFile.GetBytes()[:len(CONFIG_HEADER)]) == CONFIG_HEADER {
|
||||
} else if string(configFile.GetBytes()[:len(CONFIG_BANNER)]) == CONFIG_BANNER {
|
||||
// This is the new config format with a random salt and a configurable number of iterations
|
||||
encryptedLength := len(configFile.GetBytes()) - CONFIG_SALT_LENGTH - 4
|
||||
|
||||
// Extract the salt and the number of iterations
|
||||
saltStart := configFile.GetBytes()[len(CONFIG_HEADER):]
|
||||
saltStart := configFile.GetBytes()[len(CONFIG_BANNER):]
|
||||
iterations := binary.LittleEndian.Uint32(saltStart[CONFIG_SALT_LENGTH : CONFIG_SALT_LENGTH+4])
|
||||
LOG_TRACE("CONFIG_ITERATIONS", "Using %d iterations for key derivation", iterations)
|
||||
masterKey = GenerateKeyFromPassword(password, saltStart[:CONFIG_SALT_LENGTH], int(iterations))
|
||||
|
||||
// Copy to a temporary buffer to replace the header and remove the salt and the number of riterations
|
||||
// Copy to a temporary buffer to replace the banner and remove the salt and the number of riterations
|
||||
var encrypted bytes.Buffer
|
||||
encrypted.Write([]byte(ENCRYPTION_HEADER))
|
||||
encrypted.Write([]byte(ENCRYPTION_BANNER))
|
||||
encrypted.Write(saltStart[CONFIG_SALT_LENGTH+4:])
|
||||
|
||||
configFile.Reset(false)
|
||||
@@ -423,7 +431,7 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
||||
LOG_ERROR("CONFIG_DOWNLOAD", "Encrypted config has %d bytes instead of expected %d bytes", len(configFile.GetBytes()), encryptedLength)
|
||||
}
|
||||
} else {
|
||||
return nil, true, fmt.Errorf("The config file has an invalid header")
|
||||
return nil, true, fmt.Errorf("The config file has an invalid banner")
|
||||
}
|
||||
|
||||
// Decrypt the config file. masterKey == nil means no encryption.
|
||||
@@ -487,15 +495,15 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i
|
||||
return false
|
||||
}
|
||||
|
||||
// The new encrypted format for config is CONFIG_HEADER + salt + #iterations + encrypted content
|
||||
// The new encrypted format for config is CONFIG_BANNER + salt + #iterations + encrypted content
|
||||
encryptedLength := len(chunk.GetBytes()) + CONFIG_SALT_LENGTH + 4
|
||||
|
||||
// Copy to a temporary buffer to replace the header and add the salt and the number of iterations
|
||||
// Copy to a temporary buffer to replace the banner and add the salt and the number of iterations
|
||||
var encrypted bytes.Buffer
|
||||
encrypted.Write([]byte(CONFIG_HEADER))
|
||||
encrypted.Write([]byte(CONFIG_BANNER))
|
||||
encrypted.Write(salt)
|
||||
binary.Write(&encrypted, binary.LittleEndian, uint32(iterations))
|
||||
encrypted.Write(chunk.GetBytes()[len(ENCRYPTION_HEADER):])
|
||||
encrypted.Write(chunk.GetBytes()[len(ENCRYPTION_BANNER):])
|
||||
|
||||
chunk.Reset(false)
|
||||
chunk.Write(encrypted.Bytes())
|
||||
@@ -528,7 +536,7 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i
|
||||
// it simply creates a file named 'config' that stores various parameters as well as a set of keys if encryption
|
||||
// is enabled.
|
||||
func ConfigStorage(storage Storage, iterations int, compressionLevel int, averageChunkSize int, maximumChunkSize int,
|
||||
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool, keyFile string) bool {
|
||||
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool, keyFile string, dataShards int, parityShards int) bool {
|
||||
|
||||
exist, _, _, err := storage.GetFileInfo(0, "config")
|
||||
if err != nil {
|
||||
@@ -550,6 +558,10 @@ func ConfigStorage(storage Storage, iterations int, compressionLevel int, averag
|
||||
if keyFile != "" {
|
||||
config.loadRSAPublicKey(keyFile)
|
||||
}
|
||||
|
||||
config.DataShards = dataShards
|
||||
config.ParityShards = parityShards
|
||||
|
||||
return UploadConfig(storage, config, password, iterations)
|
||||
}
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ var testQuickMode bool
|
||||
var testThreads int
|
||||
var testFixedChunkSize bool
|
||||
var testRSAEncryption bool
|
||||
var testErasureCoding bool
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
|
||||
@@ -36,6 +37,7 @@ func init() {
|
||||
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
|
||||
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
|
||||
flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption")
|
||||
flag.BoolVar(&testErasureCoding, "erasure-coding", false, "enable Erasure Coding")
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user