mirror of
https://github.com/gilbertchen/duplicacy
synced 2025-12-10 05:13:17 +00:00
Implement Erasure Coding
This commit is contained in:
@@ -458,8 +458,26 @@ func configRepository(context *cli.Context, init bool) {
|
|||||||
if iterations == 0 {
|
if iterations == 0 {
|
||||||
iterations = duplicacy.CONFIG_DEFAULT_ITERATIONS
|
iterations = duplicacy.CONFIG_DEFAULT_ITERATIONS
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dataShards := 0
|
||||||
|
parityShards := 0
|
||||||
|
shards := context.String("erasure-coding")
|
||||||
|
if shards != "" {
|
||||||
|
shardsRegex := regexp.MustCompile(`^([0-9]+):([0-9]+)$`)
|
||||||
|
matched := shardsRegex.FindStringSubmatch(shards)
|
||||||
|
if matched == nil {
|
||||||
|
duplicacy.LOG_ERROR("STORAGE_ERASURECODE", "Invalid erasure coding parameters: %s", shards)
|
||||||
|
} else {
|
||||||
|
dataShards, _ = strconv.Atoi(matched[1])
|
||||||
|
parityShards, _ = strconv.Atoi(matched[2])
|
||||||
|
if dataShards == 0 || dataShards > 256 || parityShards == 0 || parityShards > dataShards {
|
||||||
|
duplicacy.LOG_ERROR("STORAGE_ERASURECODE", "Invalid erasure coding parameters: %s", shards)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
duplicacy.ConfigStorage(storage, iterations, compressionLevel, averageChunkSize, maximumChunkSize,
|
duplicacy.ConfigStorage(storage, iterations, compressionLevel, averageChunkSize, maximumChunkSize,
|
||||||
minimumChunkSize, storagePassword, otherConfig, bitCopy, context.String("key"))
|
minimumChunkSize, storagePassword, otherConfig, bitCopy, context.String("key"), dataShards, parityShards)
|
||||||
}
|
}
|
||||||
|
|
||||||
duplicacy.Preferences = append(duplicacy.Preferences, preference)
|
duplicacy.Preferences = append(duplicacy.Preferences, preference)
|
||||||
@@ -1403,6 +1421,11 @@ func main() {
|
|||||||
Usage: "the RSA public key to encrypt file chunks",
|
Usage: "the RSA public key to encrypt file chunks",
|
||||||
Argument: "<public key>",
|
Argument: "<public key>",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "erasure-coding",
|
||||||
|
Usage: "enable erasure coding to protect against storage corruption",
|
||||||
|
Argument: "<data shards>:<parity shards>",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Usage: "Initialize the storage if necessary and the current directory as the repository",
|
Usage: "Initialize the storage if necessary and the current directory as the repository",
|
||||||
ArgsUsage: "<snapshot id> <storage url>",
|
ArgsUsage: "<snapshot id> <storage url>",
|
||||||
@@ -1882,6 +1905,11 @@ func main() {
|
|||||||
Usage: "the RSA public key to encrypt file chunks",
|
Usage: "the RSA public key to encrypt file chunks",
|
||||||
Argument: "<public key>",
|
Argument: "<public key>",
|
||||||
},
|
},
|
||||||
|
cli.StringFlag{
|
||||||
|
Name: "erasure-coding",
|
||||||
|
Usage: "enable erasure coding to protect against storage corruption",
|
||||||
|
Argument: "<data shards>:<parity shards>",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Usage: "Add an additional storage to be used for the existing repository",
|
Usage: "Add an additional storage to be used for the existing repository",
|
||||||
ArgsUsage: "<storage name> <snapshot id> <storage url>",
|
ArgsUsage: "<storage name> <snapshot id> <storage url>",
|
||||||
|
|||||||
@@ -226,12 +226,20 @@ func TestBackupManager(t *testing.T) {
|
|||||||
cleanStorage(storage)
|
cleanStorage(storage)
|
||||||
|
|
||||||
time.Sleep(time.Duration(delay) * time.Second)
|
time.Sleep(time.Duration(delay) * time.Second)
|
||||||
|
|
||||||
|
dataShards := 0
|
||||||
|
parityShards := 0
|
||||||
|
if testErasureCoding {
|
||||||
|
dataShards = 5
|
||||||
|
parityShards = 2
|
||||||
|
}
|
||||||
|
|
||||||
if testFixedChunkSize {
|
if testFixedChunkSize {
|
||||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "") {
|
if !ConfigStorage(storage, 16384, 100, 64*1024, 64*1024, 64*1024, password, nil, false, "", dataShards, parityShards) {
|
||||||
t.Errorf("Failed to initialize the storage")
|
t.Errorf("Failed to initialize the storage")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false, "") {
|
if !ConfigStorage(storage, 16384, 100, 64*1024, 256*1024, 16*1024, password, nil, false, "", dataShards, parityShards) {
|
||||||
t.Errorf("Failed to initialize the storage")
|
t.Errorf("Failed to initialize the storage")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,6 +22,8 @@ import (
|
|||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/bkaradzic/go-lz4"
|
"github.com/bkaradzic/go-lz4"
|
||||||
|
"github.com/minio/highwayhash"
|
||||||
|
"github.com/klauspost/reedsolomon"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
|
// A chunk needs to acquire a new buffer and return the old one for every encrypt/decrypt operation, therefore
|
||||||
@@ -68,11 +70,13 @@ type Chunk struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Magic word to identify a duplicacy format encrypted file, plus a version number.
|
// Magic word to identify a duplicacy format encrypted file, plus a version number.
|
||||||
var ENCRYPTION_HEADER = "duplicacy\000"
|
var ENCRYPTION_BANNER = "duplicacy\000"
|
||||||
|
|
||||||
// RSA encrypted chunks start with "duplicacy\002"
|
// RSA encrypted chunks start with "duplicacy\002"
|
||||||
var ENCRYPTION_VERSION_RSA byte = 2
|
var ENCRYPTION_VERSION_RSA byte = 2
|
||||||
|
|
||||||
|
var ERASURE_CODING_BANNER = "duplicacy\003"
|
||||||
|
|
||||||
// CreateChunk creates a new chunk.
|
// CreateChunk creates a new chunk.
|
||||||
func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
|
func CreateChunk(config *Config, bufferNeeded bool) *Chunk {
|
||||||
|
|
||||||
@@ -224,7 +228,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
|
|||||||
// Start with the magic number and the version number.
|
// Start with the magic number and the version number.
|
||||||
if usingRSA {
|
if usingRSA {
|
||||||
// RSA encryption starts "duplicacy\002"
|
// RSA encryption starts "duplicacy\002"
|
||||||
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER)[:len(ENCRYPTION_HEADER) - 1])
|
encryptedBuffer.Write([]byte(ENCRYPTION_BANNER)[:len(ENCRYPTION_BANNER) - 1])
|
||||||
encryptedBuffer.Write([]byte{ENCRYPTION_VERSION_RSA})
|
encryptedBuffer.Write([]byte{ENCRYPTION_VERSION_RSA})
|
||||||
|
|
||||||
// Then the encrypted key
|
// Then the encrypted key
|
||||||
@@ -235,7 +239,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
|
|||||||
binary.Write(encryptedBuffer, binary.LittleEndian, uint16(len(encryptedKey)))
|
binary.Write(encryptedBuffer, binary.LittleEndian, uint16(len(encryptedKey)))
|
||||||
encryptedBuffer.Write(encryptedKey)
|
encryptedBuffer.Write(encryptedKey)
|
||||||
} else {
|
} else {
|
||||||
encryptedBuffer.Write([]byte(ENCRYPTION_HEADER))
|
encryptedBuffer.Write([]byte(ENCRYPTION_BANNER))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Followed by the nonce
|
// Followed by the nonce
|
||||||
@@ -248,7 +252,7 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
|
|||||||
offset = encryptedBuffer.Len()
|
offset = encryptedBuffer.Len()
|
||||||
}
|
}
|
||||||
|
|
||||||
// offset is either 0 or the length of header + nonce
|
// offset is either 0 or the length of banner + nonce
|
||||||
|
|
||||||
if chunk.config.CompressionLevel >= -1 && chunk.config.CompressionLevel <= 9 {
|
if chunk.config.CompressionLevel >= -1 && chunk.config.CompressionLevel <= 9 {
|
||||||
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
|
deflater, _ := zlib.NewWriterLevel(encryptedBuffer, chunk.config.CompressionLevel)
|
||||||
@@ -273,12 +277,9 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
|
|||||||
return fmt.Errorf("Invalid compression level: %d", chunk.config.CompressionLevel)
|
return fmt.Errorf("Invalid compression level: %d", chunk.config.CompressionLevel)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(encryptionKey) == 0 {
|
if len(encryptionKey) > 0 {
|
||||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PKCS7 is used. Compressed chunk sizes leaks information about the original chunks so we want the padding sizes
|
// PKCS7 is used. The sizes of compressed chunks leak information about the original chunks so we want the padding sizes
|
||||||
// to be the maximum allowed by PKCS7
|
// to be the maximum allowed by PKCS7
|
||||||
dataLength := encryptedBuffer.Len() - offset
|
dataLength := encryptedBuffer.Len() - offset
|
||||||
paddingLength := 256 - dataLength%256
|
paddingLength := 256 - dataLength%256
|
||||||
@@ -286,13 +287,69 @@ func (chunk *Chunk) Encrypt(encryptionKey []byte, derivationKey string, isSnapsh
|
|||||||
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
|
encryptedBuffer.Write(bytes.Repeat([]byte{byte(paddingLength)}, paddingLength))
|
||||||
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
|
encryptedBuffer.Write(bytes.Repeat([]byte{0}, gcm.Overhead()))
|
||||||
|
|
||||||
// The encrypted data will be appended to the duplicacy header and the once.
|
// The encrypted data will be appended to the duplicacy banner and the once.
|
||||||
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
|
encryptedBytes := gcm.Seal(encryptedBuffer.Bytes()[:offset], nonce,
|
||||||
encryptedBuffer.Bytes()[offset:offset+dataLength+paddingLength], nil)
|
encryptedBuffer.Bytes()[offset:offset+dataLength+paddingLength], nil)
|
||||||
|
|
||||||
encryptedBuffer.Truncate(len(encryptedBytes))
|
encryptedBuffer.Truncate(len(encryptedBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
if chunk.config.DataShards == 0 || chunk.config.ParityShards == 0 {
|
||||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start erasure coding
|
||||||
|
encoder, err := reedsolomon.New(chunk.config.DataShards, chunk.config.ParityShards)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
chunkSize := len(encryptedBuffer.Bytes())
|
||||||
|
shardSize := (chunkSize + chunk.config.DataShards - 1) / chunk.config.DataShards
|
||||||
|
// Append zeros to make the last shard to have the same size as other
|
||||||
|
encryptedBuffer.Write(make([]byte, shardSize * chunk.config.DataShards - chunkSize))
|
||||||
|
// Grow the buffer for parity shards
|
||||||
|
encryptedBuffer.Grow(shardSize * chunk.config.ParityShards)
|
||||||
|
// Now create one slice for each shard, reusing the data in the buffer
|
||||||
|
data := make([][]byte, chunk.config.DataShards + chunk.config.ParityShards)
|
||||||
|
for i := 0; i < chunk.config.DataShards + chunk.config.ParityShards; i++ {
|
||||||
|
data[i] = encryptedBuffer.Bytes()[i * shardSize: (i + 1) * shardSize]
|
||||||
|
}
|
||||||
|
// This populates the parity shard
|
||||||
|
encoder.Encode(data)
|
||||||
|
|
||||||
|
// Prepare the chunk to be uploaded
|
||||||
|
chunk.buffer.Reset()
|
||||||
|
// First the banner
|
||||||
|
chunk.buffer.Write([]byte(ERASURE_CODING_BANNER))
|
||||||
|
// Then the header which includes the chunk size, data/parity and a 2-byte checksum
|
||||||
|
header := make([]byte, 14)
|
||||||
|
binary.LittleEndian.PutUint64(header[0:], uint64(chunkSize))
|
||||||
|
binary.LittleEndian.PutUint16(header[8:], uint16(chunk.config.DataShards))
|
||||||
|
binary.LittleEndian.PutUint16(header[10:], uint16(chunk.config.ParityShards))
|
||||||
|
header[12] = header[0] ^ header[2] ^ header[4] ^ header[6] ^ header[8] ^ header[10]
|
||||||
|
header[13] = header[1] ^ header[3] ^ header[5] ^ header[7] ^ header[9] ^ header[11]
|
||||||
|
chunk.buffer.Write(header)
|
||||||
|
// Calculate the highway hash for each shard
|
||||||
|
hashKey := make([]byte, 32)
|
||||||
|
for _, part := range data {
|
||||||
|
hasher, err := highwayhash.New(hashKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = hasher.Write(part)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
chunk.buffer.Write(hasher.Sum(nil))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the data
|
||||||
|
for _, part := range data {
|
||||||
|
chunk.buffer.Write(part)
|
||||||
|
}
|
||||||
|
// Append the header again for redundancy
|
||||||
|
chunk.buffer.Write(header)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
@@ -322,7 +379,122 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
chunk.buffer, encryptedBuffer = encryptedBuffer, chunk.buffer
|
||||||
headerLength := len(ENCRYPTION_HEADER)
|
bannerLength := len(ENCRYPTION_BANNER)
|
||||||
|
|
||||||
|
if len(encryptedBuffer.Bytes()) > bannerLength && string(encryptedBuffer.Bytes()[:bannerLength]) == ERASURE_CODING_BANNER {
|
||||||
|
|
||||||
|
// The chunk was encoded with erasure coding
|
||||||
|
if len(encryptedBuffer.Bytes()) < bannerLength + 14 {
|
||||||
|
return fmt.Errorf("Erasure coding header truncated (%d bytes)", len(encryptedBuffer.Bytes()))
|
||||||
|
}
|
||||||
|
// Check the header checksum
|
||||||
|
header := encryptedBuffer.Bytes()[bannerLength: bannerLength + 14]
|
||||||
|
if header[12] != header[0] ^ header[2] ^ header[4] ^ header[6] ^ header[8] ^ header[10] ||
|
||||||
|
header[13] != header[1] ^ header[3] ^ header[5] ^ header[7] ^ header[9] ^ header[11] {
|
||||||
|
return fmt.Errorf("Erasure coding header corrupted (%x)", header)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read the parameters
|
||||||
|
chunkSize := int(binary.LittleEndian.Uint64(header[0:8]))
|
||||||
|
dataShards := int(binary.LittleEndian.Uint16(header[8:10]))
|
||||||
|
parityShards := int(binary.LittleEndian.Uint16(header[10:12]))
|
||||||
|
shardSize := (chunkSize + chunk.config.DataShards - 1) / chunk.config.DataShards
|
||||||
|
// This is the length the chunk file should have
|
||||||
|
expectedLength := bannerLength + 2 * len(header) + (dataShards + parityShards) * (shardSize + 32)
|
||||||
|
// The minimum length that can be recovered from
|
||||||
|
minimumLength := bannerLength + len(header) + (dataShards + parityShards) * 32 + dataShards * shardSize
|
||||||
|
LOG_DEBUG("CHUNK_ERASURECODE", "Chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards)
|
||||||
|
if len(encryptedBuffer.Bytes()) > expectedLength {
|
||||||
|
LOG_WARN("CHUNK_ERASURECODE", "Chunk has %d bytes (instead of %d)", len(encryptedBuffer.Bytes()), expectedLength)
|
||||||
|
} else if len(encryptedBuffer.Bytes()) == expectedLength {
|
||||||
|
// Correct size; fall through
|
||||||
|
} else if len(encryptedBuffer.Bytes()) > minimumLength {
|
||||||
|
LOG_WARN("CHUNK_ERASURECODE", "Chunk is truncated (%d out of %d bytes)", len(encryptedBuffer.Bytes()), expectedLength)
|
||||||
|
} else {
|
||||||
|
return fmt.Errorf("Not enough chunk data for recovery; chunk size: %d bytes, data size: %d, parity: %d/%d", chunkSize, len(encryptedBuffer.Bytes()), dataShards, parityShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Where the hashes start
|
||||||
|
hashOffset := bannerLength + len(header)
|
||||||
|
// Where the data start
|
||||||
|
dataOffset := hashOffset + (dataShards + parityShards) * 32
|
||||||
|
|
||||||
|
data := make([][]byte, dataShards + parityShards)
|
||||||
|
recoveryNeeded := false
|
||||||
|
hashKey := make([]byte, 32)
|
||||||
|
availableShards := 0
|
||||||
|
for i := 0; i < dataShards + parityShards; i++ {
|
||||||
|
start := dataOffset + i * shardSize
|
||||||
|
if start + shardSize > len(encryptedBuffer.Bytes()) {
|
||||||
|
// the current shard is incomplete
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Now verify the hash
|
||||||
|
hasher, err := highwayhash.New(hashKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = hasher.Write(encryptedBuffer.Bytes()[start: start + shardSize])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if bytes.Compare(hasher.Sum(nil), encryptedBuffer.Bytes()[hashOffset + i * 32: hashOffset + (i + 1) * 32]) != 0 {
|
||||||
|
if i < dataShards {
|
||||||
|
recoveryNeeded = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// The shard is good
|
||||||
|
data[i] = encryptedBuffer.Bytes()[start: start + shardSize]
|
||||||
|
availableShards++
|
||||||
|
if availableShards >= dataShards {
|
||||||
|
// We have enough shards to recover; skip the remaining shards
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !recoveryNeeded {
|
||||||
|
// Remove the padding zeros from the last shard
|
||||||
|
encryptedBuffer.Truncate(dataOffset + chunkSize)
|
||||||
|
// Skip the header and hashes
|
||||||
|
encryptedBuffer.Read(encryptedBuffer.Bytes()[:dataOffset])
|
||||||
|
} else {
|
||||||
|
if availableShards < dataShards {
|
||||||
|
return fmt.Errorf("Not enough chunk data for recover; only %d out of %d shards are complete", availableShards, dataShards + parityShards)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Show the validity of shards using a string of * and -
|
||||||
|
slots := ""
|
||||||
|
for _, part := range data {
|
||||||
|
if len(part) != 0 {
|
||||||
|
slots += "*"
|
||||||
|
} else {
|
||||||
|
slots += "-"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_WARN("CHUNK_ERASURECODE", "Recovering a %d byte chunk from %d byte shards: %s", chunkSize, shardSize, slots)
|
||||||
|
encoder, err := reedsolomon.New(dataShards, parityShards)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = encoder.Reconstruct(data)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
LOG_DEBUG("CHUNK_ERASURECODE", "Chunk data successfully recovered")
|
||||||
|
buffer := AllocateChunkBuffer()
|
||||||
|
buffer.Reset()
|
||||||
|
for i := 0; i < dataShards; i++ {
|
||||||
|
buffer.Write(data[i])
|
||||||
|
}
|
||||||
|
buffer.Truncate(chunkSize)
|
||||||
|
|
||||||
|
ReleaseChunkBuffer(encryptedBuffer)
|
||||||
|
encryptedBuffer = buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
if len(encryptionKey) > 0 {
|
if len(encryptionKey) > 0 {
|
||||||
|
|
||||||
@@ -340,15 +512,15 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
key = hasher.Sum(nil)
|
key = hasher.Sum(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(encryptedBuffer.Bytes()) < headerLength + 12 {
|
if len(encryptedBuffer.Bytes()) < bannerLength + 12 {
|
||||||
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
if string(encryptedBuffer.Bytes()[:headerLength-1]) != ENCRYPTION_HEADER[:headerLength-1] {
|
if string(encryptedBuffer.Bytes()[:bannerLength-1]) != ENCRYPTION_BANNER[:bannerLength-1] {
|
||||||
return fmt.Errorf("The storage doesn't seem to be encrypted")
|
return fmt.Errorf("The storage doesn't seem to be encrypted")
|
||||||
}
|
}
|
||||||
|
|
||||||
encryptionVersion := encryptedBuffer.Bytes()[headerLength-1]
|
encryptionVersion := encryptedBuffer.Bytes()[bannerLength-1]
|
||||||
if encryptionVersion != 0 && encryptionVersion != ENCRYPTION_VERSION_RSA {
|
if encryptionVersion != 0 && encryptionVersion != ENCRYPTION_VERSION_RSA {
|
||||||
return fmt.Errorf("Unsupported encryption version %d", encryptionVersion)
|
return fmt.Errorf("Unsupported encryption version %d", encryptionVersion)
|
||||||
}
|
}
|
||||||
@@ -359,14 +531,14 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
return fmt.Errorf("An RSA private key is required to decrypt the chunk")
|
return fmt.Errorf("An RSA private key is required to decrypt the chunk")
|
||||||
}
|
}
|
||||||
|
|
||||||
encryptedKeyLength := binary.LittleEndian.Uint16(encryptedBuffer.Bytes()[headerLength:headerLength+2])
|
encryptedKeyLength := binary.LittleEndian.Uint16(encryptedBuffer.Bytes()[bannerLength:bannerLength+2])
|
||||||
|
|
||||||
if len(encryptedBuffer.Bytes()) < headerLength + 14 + int(encryptedKeyLength) {
|
if len(encryptedBuffer.Bytes()) < bannerLength + 14 + int(encryptedKeyLength) {
|
||||||
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
return fmt.Errorf("No enough encrypted data (%d bytes) provided", len(encryptedBuffer.Bytes()))
|
||||||
}
|
}
|
||||||
|
|
||||||
encryptedKey := encryptedBuffer.Bytes()[headerLength + 2:headerLength + 2 + int(encryptedKeyLength)]
|
encryptedKey := encryptedBuffer.Bytes()[bannerLength + 2:bannerLength + 2 + int(encryptedKeyLength)]
|
||||||
headerLength += 2 + int(encryptedKeyLength)
|
bannerLength += 2 + int(encryptedKeyLength)
|
||||||
|
|
||||||
decryptedKey, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPrivateKey, encryptedKey, nil)
|
decryptedKey, err := rsa.DecryptOAEP(sha256.New(), rand.Reader, chunk.config.rsaPrivateKey, encryptedKey, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -385,8 +557,8 @@ func (chunk *Chunk) Decrypt(encryptionKey []byte, derivationKey string) (err err
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
offset = headerLength + gcm.NonceSize()
|
offset = bannerLength + gcm.NonceSize()
|
||||||
nonce := encryptedBuffer.Bytes()[headerLength:offset]
|
nonce := encryptedBuffer.Bytes()[bannerLength:offset]
|
||||||
|
|
||||||
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
|
decryptedBytes, err := gcm.Open(encryptedBuffer.Bytes()[:offset], nonce,
|
||||||
encryptedBuffer.Bytes()[offset:], nil)
|
encryptedBuffer.Bytes()[offset:], nil)
|
||||||
|
|||||||
@@ -12,7 +12,46 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestChunk(t *testing.T) {
|
func TestErasureCoding(t *testing.T) {
|
||||||
|
key := []byte("duplicacydefault")
|
||||||
|
|
||||||
|
config := CreateConfig()
|
||||||
|
config.HashKey = key
|
||||||
|
config.IDKey = key
|
||||||
|
config.MinimumChunkSize = 100
|
||||||
|
config.CompressionLevel = DEFAULT_COMPRESSION_LEVEL
|
||||||
|
config.DataShards = 5
|
||||||
|
config.ParityShards = 2
|
||||||
|
|
||||||
|
chunk := CreateChunk(config, true)
|
||||||
|
chunk.Reset(true)
|
||||||
|
data := make([]byte, 100)
|
||||||
|
for i := 0; i < len(data); i++ {
|
||||||
|
data[i] = byte(i)
|
||||||
|
}
|
||||||
|
chunk.Write(data)
|
||||||
|
err := chunk.Encrypt([]byte(""), "", false)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to encrypt the test data: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
encryptedData := make([]byte, chunk.GetLength())
|
||||||
|
copy(encryptedData, chunk.GetBytes())
|
||||||
|
|
||||||
|
crypto_rand.Read(encryptedData[280:300])
|
||||||
|
|
||||||
|
chunk.Reset(false)
|
||||||
|
chunk.Write(encryptedData)
|
||||||
|
err = chunk.Decrypt([]byte(""), "")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Failed to decrypt the data: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChunkBasic(t *testing.T) {
|
||||||
|
|
||||||
key := []byte("duplicacydefault")
|
key := []byte("duplicacydefault")
|
||||||
|
|
||||||
@@ -32,7 +71,10 @@ func TestChunk(t *testing.T) {
|
|||||||
config.rsaPublicKey = privateKey.Public().(*rsa.PublicKey)
|
config.rsaPublicKey = privateKey.Public().(*rsa.PublicKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
remainderLength := -1
|
if testErasureCoding {
|
||||||
|
config.DataShards = 5
|
||||||
|
config.ParityShards = 2
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < 500; i++ {
|
for i := 0; i < 500; i++ {
|
||||||
|
|
||||||
@@ -56,10 +98,14 @@ func TestChunk(t *testing.T) {
|
|||||||
encryptedData := make([]byte, chunk.GetLength())
|
encryptedData := make([]byte, chunk.GetLength())
|
||||||
copy(encryptedData, chunk.GetBytes())
|
copy(encryptedData, chunk.GetBytes())
|
||||||
|
|
||||||
if remainderLength == -1 {
|
if testErasureCoding {
|
||||||
remainderLength = len(encryptedData) % 256
|
offset := 24 + 32 * 7
|
||||||
} else if len(encryptedData)%256 != remainderLength {
|
start := rand.Int() % (len(encryptedData) - offset) + offset
|
||||||
t.Errorf("Incorrect padding size")
|
length := (len(encryptedData) - offset) / 7
|
||||||
|
if start + length > len(encryptedData) {
|
||||||
|
length = len(encryptedData) - start
|
||||||
|
}
|
||||||
|
crypto_rand.Read(encryptedData[start: start+length])
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk.Reset(false)
|
chunk.Reset(false)
|
||||||
|
|||||||
@@ -34,8 +34,8 @@ var DEFAULT_KEY = []byte("duplicacy")
|
|||||||
// standard zlib levels of -1 to 9.
|
// standard zlib levels of -1 to 9.
|
||||||
var DEFAULT_COMPRESSION_LEVEL = 100
|
var DEFAULT_COMPRESSION_LEVEL = 100
|
||||||
|
|
||||||
// The new header of the config file (to differentiate from the old format where the salt and iterations are fixed)
|
// The new banner of the config file (to differentiate from the old format where the salt and iterations are fixed)
|
||||||
var CONFIG_HEADER = "duplicacy\001"
|
var CONFIG_BANNER = "duplicacy\001"
|
||||||
|
|
||||||
// The length of the salt used in the new format
|
// The length of the salt used in the new format
|
||||||
var CONFIG_SALT_LENGTH = 32
|
var CONFIG_SALT_LENGTH = 32
|
||||||
@@ -70,6 +70,10 @@ type Config struct {
|
|||||||
// for encrypting a non-chunk file
|
// for encrypting a non-chunk file
|
||||||
FileKey []byte `json:"-"`
|
FileKey []byte `json:"-"`
|
||||||
|
|
||||||
|
// for erasure coding
|
||||||
|
DataShards int `json:'data-shards'`
|
||||||
|
ParityShards int `json:'parity-shards'`
|
||||||
|
|
||||||
// for RSA encryption
|
// for RSA encryption
|
||||||
rsaPrivateKey *rsa.PrivateKey
|
rsaPrivateKey *rsa.PrivateKey
|
||||||
rsaPublicKey *rsa.PublicKey
|
rsaPublicKey *rsa.PublicKey
|
||||||
@@ -180,6 +184,10 @@ func (config *Config) Print() {
|
|||||||
LOG_TRACE("CONFIG_INFO", "Metadata chunks are encrypted")
|
LOG_TRACE("CONFIG_INFO", "Metadata chunks are encrypted")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if config.DataShards != 0 && config.ParityShards != 0 {
|
||||||
|
LOG_TRACE("CONFIG_INFO", "Data shards: %d, parity shards: %d", config.DataShards, config.ParityShards)
|
||||||
|
}
|
||||||
|
|
||||||
if config.rsaPublicKey != nil {
|
if config.rsaPublicKey != nil {
|
||||||
pkisPublicKey, _ := x509.MarshalPKIXPublicKey(config.rsaPublicKey)
|
pkisPublicKey, _ := x509.MarshalPKIXPublicKey(config.rsaPublicKey)
|
||||||
|
|
||||||
@@ -386,11 +394,11 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
|||||||
return nil, false, err
|
return nil, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(configFile.GetBytes()) < len(ENCRYPTION_HEADER) {
|
if len(configFile.GetBytes()) < len(ENCRYPTION_BANNER) {
|
||||||
return nil, false, fmt.Errorf("The storage has an invalid config file")
|
return nil, false, fmt.Errorf("The storage has an invalid config file")
|
||||||
}
|
}
|
||||||
|
|
||||||
if string(configFile.GetBytes()[:len(ENCRYPTION_HEADER)-1]) == ENCRYPTION_HEADER[:len(ENCRYPTION_HEADER)-1] && len(password) == 0 {
|
if string(configFile.GetBytes()[:len(ENCRYPTION_BANNER)-1]) == ENCRYPTION_BANNER[:len(ENCRYPTION_BANNER)-1] && len(password) == 0 {
|
||||||
return nil, true, fmt.Errorf("The storage is likely to have been initialized with a password before")
|
return nil, true, fmt.Errorf("The storage is likely to have been initialized with a password before")
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -398,23 +406,23 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
|||||||
|
|
||||||
if len(password) > 0 {
|
if len(password) > 0 {
|
||||||
|
|
||||||
if string(configFile.GetBytes()[:len(ENCRYPTION_HEADER)]) == ENCRYPTION_HEADER {
|
if string(configFile.GetBytes()[:len(ENCRYPTION_BANNER)]) == ENCRYPTION_BANNER {
|
||||||
// This is the old config format with a static salt and a fixed number of iterations
|
// This is the old config format with a static salt and a fixed number of iterations
|
||||||
masterKey = GenerateKeyFromPassword(password, DEFAULT_KEY, CONFIG_DEFAULT_ITERATIONS)
|
masterKey = GenerateKeyFromPassword(password, DEFAULT_KEY, CONFIG_DEFAULT_ITERATIONS)
|
||||||
LOG_TRACE("CONFIG_FORMAT", "Using a static salt and %d iterations for key derivation", CONFIG_DEFAULT_ITERATIONS)
|
LOG_TRACE("CONFIG_FORMAT", "Using a static salt and %d iterations for key derivation", CONFIG_DEFAULT_ITERATIONS)
|
||||||
} else if string(configFile.GetBytes()[:len(CONFIG_HEADER)]) == CONFIG_HEADER {
|
} else if string(configFile.GetBytes()[:len(CONFIG_BANNER)]) == CONFIG_BANNER {
|
||||||
// This is the new config format with a random salt and a configurable number of iterations
|
// This is the new config format with a random salt and a configurable number of iterations
|
||||||
encryptedLength := len(configFile.GetBytes()) - CONFIG_SALT_LENGTH - 4
|
encryptedLength := len(configFile.GetBytes()) - CONFIG_SALT_LENGTH - 4
|
||||||
|
|
||||||
// Extract the salt and the number of iterations
|
// Extract the salt and the number of iterations
|
||||||
saltStart := configFile.GetBytes()[len(CONFIG_HEADER):]
|
saltStart := configFile.GetBytes()[len(CONFIG_BANNER):]
|
||||||
iterations := binary.LittleEndian.Uint32(saltStart[CONFIG_SALT_LENGTH : CONFIG_SALT_LENGTH+4])
|
iterations := binary.LittleEndian.Uint32(saltStart[CONFIG_SALT_LENGTH : CONFIG_SALT_LENGTH+4])
|
||||||
LOG_TRACE("CONFIG_ITERATIONS", "Using %d iterations for key derivation", iterations)
|
LOG_TRACE("CONFIG_ITERATIONS", "Using %d iterations for key derivation", iterations)
|
||||||
masterKey = GenerateKeyFromPassword(password, saltStart[:CONFIG_SALT_LENGTH], int(iterations))
|
masterKey = GenerateKeyFromPassword(password, saltStart[:CONFIG_SALT_LENGTH], int(iterations))
|
||||||
|
|
||||||
// Copy to a temporary buffer to replace the header and remove the salt and the number of riterations
|
// Copy to a temporary buffer to replace the banner and remove the salt and the number of riterations
|
||||||
var encrypted bytes.Buffer
|
var encrypted bytes.Buffer
|
||||||
encrypted.Write([]byte(ENCRYPTION_HEADER))
|
encrypted.Write([]byte(ENCRYPTION_BANNER))
|
||||||
encrypted.Write(saltStart[CONFIG_SALT_LENGTH+4:])
|
encrypted.Write(saltStart[CONFIG_SALT_LENGTH+4:])
|
||||||
|
|
||||||
configFile.Reset(false)
|
configFile.Reset(false)
|
||||||
@@ -423,7 +431,7 @@ func DownloadConfig(storage Storage, password string) (config *Config, isEncrypt
|
|||||||
LOG_ERROR("CONFIG_DOWNLOAD", "Encrypted config has %d bytes instead of expected %d bytes", len(configFile.GetBytes()), encryptedLength)
|
LOG_ERROR("CONFIG_DOWNLOAD", "Encrypted config has %d bytes instead of expected %d bytes", len(configFile.GetBytes()), encryptedLength)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return nil, true, fmt.Errorf("The config file has an invalid header")
|
return nil, true, fmt.Errorf("The config file has an invalid banner")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decrypt the config file. masterKey == nil means no encryption.
|
// Decrypt the config file. masterKey == nil means no encryption.
|
||||||
@@ -487,15 +495,15 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// The new encrypted format for config is CONFIG_HEADER + salt + #iterations + encrypted content
|
// The new encrypted format for config is CONFIG_BANNER + salt + #iterations + encrypted content
|
||||||
encryptedLength := len(chunk.GetBytes()) + CONFIG_SALT_LENGTH + 4
|
encryptedLength := len(chunk.GetBytes()) + CONFIG_SALT_LENGTH + 4
|
||||||
|
|
||||||
// Copy to a temporary buffer to replace the header and add the salt and the number of iterations
|
// Copy to a temporary buffer to replace the banner and add the salt and the number of iterations
|
||||||
var encrypted bytes.Buffer
|
var encrypted bytes.Buffer
|
||||||
encrypted.Write([]byte(CONFIG_HEADER))
|
encrypted.Write([]byte(CONFIG_BANNER))
|
||||||
encrypted.Write(salt)
|
encrypted.Write(salt)
|
||||||
binary.Write(&encrypted, binary.LittleEndian, uint32(iterations))
|
binary.Write(&encrypted, binary.LittleEndian, uint32(iterations))
|
||||||
encrypted.Write(chunk.GetBytes()[len(ENCRYPTION_HEADER):])
|
encrypted.Write(chunk.GetBytes()[len(ENCRYPTION_BANNER):])
|
||||||
|
|
||||||
chunk.Reset(false)
|
chunk.Reset(false)
|
||||||
chunk.Write(encrypted.Bytes())
|
chunk.Write(encrypted.Bytes())
|
||||||
@@ -528,7 +536,7 @@ func UploadConfig(storage Storage, config *Config, password string, iterations i
|
|||||||
// it simply creates a file named 'config' that stores various parameters as well as a set of keys if encryption
|
// it simply creates a file named 'config' that stores various parameters as well as a set of keys if encryption
|
||||||
// is enabled.
|
// is enabled.
|
||||||
func ConfigStorage(storage Storage, iterations int, compressionLevel int, averageChunkSize int, maximumChunkSize int,
|
func ConfigStorage(storage Storage, iterations int, compressionLevel int, averageChunkSize int, maximumChunkSize int,
|
||||||
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool, keyFile string) bool {
|
minimumChunkSize int, password string, copyFrom *Config, bitCopy bool, keyFile string, dataShards int, parityShards int) bool {
|
||||||
|
|
||||||
exist, _, _, err := storage.GetFileInfo(0, "config")
|
exist, _, _, err := storage.GetFileInfo(0, "config")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -550,6 +558,10 @@ func ConfigStorage(storage Storage, iterations int, compressionLevel int, averag
|
|||||||
if keyFile != "" {
|
if keyFile != "" {
|
||||||
config.loadRSAPublicKey(keyFile)
|
config.loadRSAPublicKey(keyFile)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
config.DataShards = dataShards
|
||||||
|
config.ParityShards = parityShards
|
||||||
|
|
||||||
return UploadConfig(storage, config, password, iterations)
|
return UploadConfig(storage, config, password, iterations)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -28,6 +28,7 @@ var testQuickMode bool
|
|||||||
var testThreads int
|
var testThreads int
|
||||||
var testFixedChunkSize bool
|
var testFixedChunkSize bool
|
||||||
var testRSAEncryption bool
|
var testRSAEncryption bool
|
||||||
|
var testErasureCoding bool
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
|
flag.StringVar(&testStorageName, "storage", "", "the test storage to use")
|
||||||
@@ -36,6 +37,7 @@ func init() {
|
|||||||
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
|
flag.IntVar(&testThreads, "threads", 1, "number of downloading/uploading threads")
|
||||||
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
|
flag.BoolVar(&testFixedChunkSize, "fixed-chunk-size", false, "fixed chunk size")
|
||||||
flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption")
|
flag.BoolVar(&testRSAEncryption, "rsa", false, "enable RSA encryption")
|
||||||
|
flag.BoolVar(&testErasureCoding, "erasure-coding", false, "enable Erasure Coding")
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user