mirror of
https://github.com/rclone/rclone.git
synced 2026-01-22 04:13:14 +00:00
Compare commits
1 Commits
resume
...
fix-b2-acl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2e99232a04 |
2
backend/cache/cache.go
vendored
2
backend/cache/cache.go
vendored
@@ -489,7 +489,7 @@ func NewFs(ctx context.Context, name, rootPath string, m configmap.Mapper) (fs.F
|
||||
f.opt.TempWritePath = filepath.ToSlash(f.opt.TempWritePath)
|
||||
f.tempFs, err = cache.Get(ctx, f.opt.TempWritePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp fs: %w", err)
|
||||
return nil, fmt.Errorf("failed to create temp fs: %v: %w", err, err)
|
||||
}
|
||||
fs.Infof(name, "Upload Temp Rest Time: %v", f.opt.TempWaitTime)
|
||||
fs.Infof(name, "Upload Temp FS: %v", f.opt.TempWritePath)
|
||||
|
||||
2
backend/cache/cache_test.go
vendored
2
backend/cache/cache_test.go
vendored
@@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: "TestCache:",
|
||||
NilObject: (*cache.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt", "Resume"},
|
||||
UnimplementableFsMethods: []string{"PublicLink", "OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType", "ID", "GetTier", "SetTier"},
|
||||
SkipInvalidUTF8: true, // invalid UTF-8 confuses the cache
|
||||
})
|
||||
|
||||
@@ -6,8 +6,6 @@ import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@@ -15,7 +13,6 @@ import (
|
||||
gohash "hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"path"
|
||||
"regexp"
|
||||
@@ -382,8 +379,6 @@ type Fs struct {
|
||||
features *fs.Features // optional features
|
||||
dirSort bool // reserved for future, ignored
|
||||
useNoRename bool // can be set with the transactions option
|
||||
hashState string // set in resume(), used to restore hash state
|
||||
resumeXactID string // set in resume(), allows reuse of xactID upon resume
|
||||
}
|
||||
|
||||
// configure sets up chunker for given name format, meta format and hash type.
|
||||
@@ -1157,41 +1152,7 @@ func (f *Fs) put(
|
||||
|
||||
// Prepare to upload
|
||||
c := f.newChunkingReader(src)
|
||||
// Prepare for resume if resumable
|
||||
var resumeOpt *fs.OptionResume
|
||||
// partialHashState will be used in wrapStream to restore hash state
|
||||
var partialHashState []byte
|
||||
for _, option := range options {
|
||||
switch option.(type) {
|
||||
case *fs.OptionResume:
|
||||
resumeOpt = option.(*fs.OptionResume)
|
||||
if resumeOpt.Pos != 0 {
|
||||
numChunksOnRemote := resumeOpt.Pos / int64(f.opt.ChunkSize)
|
||||
// Checks for existing chunks on the remote
|
||||
for i := 0; i < int(numChunksOnRemote); i++ {
|
||||
existingChunkName := f.makeChunkName(remote, i, "", f.resumeXactID)
|
||||
existingChunk, err := f.base.NewObject(ctx, existingChunkName)
|
||||
// If NewObject returns an error the chunk likely doesn't exist on the remote and we cannot resume
|
||||
if err != nil {
|
||||
resumeOpt.Pos = 0
|
||||
c.chunks = nil
|
||||
break
|
||||
}
|
||||
c.chunks = append(c.chunks, existingChunk)
|
||||
}
|
||||
fs.Debugf(f, "Resuming at chunk number: %d", numChunksOnRemote)
|
||||
partialHashState, _ = base64.StdEncoding.DecodeString(f.hashState)
|
||||
// Discard bytes that already exist on remote
|
||||
written, err := io.CopyN(ioutil.Discard, in, resumeOpt.Pos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.accountBytes(written)
|
||||
c.sizeLeft = c.sizeTotal - c.readCount
|
||||
}
|
||||
}
|
||||
}
|
||||
wrapIn := c.wrapStream(ctx, in, src, partialHashState)
|
||||
wrapIn := c.wrapStream(ctx, in, src)
|
||||
|
||||
var metaObject fs.Object
|
||||
defer func() {
|
||||
@@ -1201,22 +1162,13 @@ func (f *Fs) put(
|
||||
}()
|
||||
|
||||
baseRemote := remote
|
||||
var xactID string
|
||||
if resumeOpt != nil && resumeOpt.Pos != 0 {
|
||||
xactID = f.resumeXactID
|
||||
} else {
|
||||
xactID, err = f.newXactID(ctx, baseRemote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
xactID, errXact := f.newXactID(ctx, baseRemote)
|
||||
if errXact != nil {
|
||||
return nil, errXact
|
||||
}
|
||||
|
||||
// Transfer chunks data
|
||||
for c.chunkNo = 0; !c.done; c.chunkNo++ {
|
||||
// skip to chunk we can resume from if resumeOpt is set
|
||||
if c.chunkNo == 0 && resumeOpt != nil && resumeOpt.Pos != 0 {
|
||||
c.chunkNo = int(resumeOpt.Pos) / int(f.opt.ChunkSize)
|
||||
}
|
||||
if c.chunkNo > maxSafeChunkNumber {
|
||||
return nil, ErrChunkOverflow
|
||||
}
|
||||
@@ -1278,41 +1230,6 @@ func (f *Fs) put(
|
||||
c.chunkLimit = c.chunkSize
|
||||
|
||||
c.chunks = append(c.chunks, chunk)
|
||||
|
||||
// If an OptionResume was passed than we should call SetID so a resume can be attempted in event of a failure
|
||||
// ID keeps track of the first chunk that should be uploaded if a resume is attempted
|
||||
if resumeOpt != nil {
|
||||
// Publish hash state to control chunk
|
||||
marshaler, ok := c.hasher.(encoding.BinaryMarshaler)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("The hash type does not implement encoding.BinaryMarshaler")
|
||||
}
|
||||
state, err := marshaler.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashType := f.opt.HashType
|
||||
data, err := marshalPartialHashJSON(ctx, hashType, base64.StdEncoding.EncodeToString(state))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
controlChunkName := f.makeChunkName(remote, -1, "phash", xactID)
|
||||
controlInfo := f.wrapInfo(src, controlChunkName, int64(len(data)))
|
||||
controlChunk, err := basePut(ctx, bytes.NewReader(data), controlInfo)
|
||||
defer func() {
|
||||
_ = controlChunk.Remove(ctx)
|
||||
}()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
positionStr := strconv.Itoa(c.chunkNo + 1) // stores the number of chunks uploaded
|
||||
chunkSizeStr := strconv.FormatInt(c.chunkSize, 10)
|
||||
startFromStr := strconv.FormatInt(int64(f.opt.StartFrom), 10)
|
||||
err = resumeOpt.SetID(ctx, chunkSizeStr+","+startFromStr+","+positionStr+","+xactID, f.opt.HashType, base64.StdEncoding.EncodeToString(state))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Validate uploaded size
|
||||
@@ -1439,7 +1356,7 @@ func (f *Fs) newChunkingReader(src fs.ObjectInfo) *chunkingReader {
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *chunkingReader) wrapStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, partialHashState []byte) io.Reader {
|
||||
func (c *chunkingReader) wrapStream(ctx context.Context, in io.Reader, src fs.ObjectInfo) io.Reader {
|
||||
baseIn, wrapBack := accounting.UnWrap(in)
|
||||
|
||||
switch {
|
||||
@@ -1474,15 +1391,6 @@ func (c *chunkingReader) wrapStream(ctx context.Context, in io.Reader, src fs.Ob
|
||||
}
|
||||
|
||||
if c.hasher != nil {
|
||||
// Restores hash state during a resume
|
||||
if partialHashState != nil {
|
||||
unmarshaler, ok := c.hasher.(encoding.BinaryUnmarshaler)
|
||||
if ok {
|
||||
if err := unmarshaler.UnmarshalBinary(partialHashState); err != nil {
|
||||
log.Fatal("unable to unmarshal hash:", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
baseIn = io.TeeReader(baseIn, c.hasher)
|
||||
}
|
||||
c.baseReader = baseIn
|
||||
@@ -2602,34 +2510,6 @@ func unmarshalSimpleJSON(ctx context.Context, metaObject fs.Object, data []byte)
|
||||
return info, true, nil
|
||||
}
|
||||
|
||||
// Format for partial hash control chunks
|
||||
type partialHashJSON struct {
|
||||
HashType string `json:"htype"`
|
||||
PartialHash string `json:"phash"`
|
||||
}
|
||||
|
||||
// marshalPartialHashJSON
|
||||
//
|
||||
// Creates a JSON containing the hashType being used and the partial hash state. This will be stored in
|
||||
// a control chunk and used for resume functionality.
|
||||
//
|
||||
func marshalPartialHashJSON(ctx context.Context, hashType, partialHash string) ([]byte, error) {
|
||||
controlData := partialHashJSON{
|
||||
HashType: hashType,
|
||||
PartialHash: partialHash,
|
||||
}
|
||||
data, err := json.Marshal(&controlData)
|
||||
return data, err
|
||||
}
|
||||
|
||||
// unmarshalPartialHashJSON parses partial hash control chunk.
|
||||
//
|
||||
func unmarshalPartialHashJSON(ctx context.Context, data []byte) (hashType, partialHashState string, err error) {
|
||||
var partialHashData partialHashJSON
|
||||
err = json.Unmarshal(data, &partialHashData)
|
||||
return partialHashData.HashType, partialHashData.PartialHash, err
|
||||
}
|
||||
|
||||
func silentlyRemove(ctx context.Context, o fs.Object) {
|
||||
_ = o.Remove(ctx) // ignore error
|
||||
}
|
||||
@@ -2664,58 +2544,9 @@ func (f *Fs) CanQuickRename() bool {
|
||||
return f.base.Features().Move != nil
|
||||
}
|
||||
|
||||
// Resume checks whether the (remote, ID) pair is valid and returns
|
||||
// the point the file should be resumed from or an error.
|
||||
func (f *Fs) Resume(ctx context.Context, remote, ID, hashName, hashState string) (Pos int64, err error) {
|
||||
idSlice := strings.Split(ID, ",")
|
||||
cachedChunkSize, err := strconv.ParseInt(idSlice[0], 10, 64)
|
||||
cachedStartFrom, err := strconv.ParseInt(idSlice[1], 10, 64)
|
||||
cachedChunkNo, err := strconv.ParseInt(idSlice[2], 10, 64)
|
||||
cachedXactID := idSlice[3]
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if cachedChunkSize != int64(f.opt.ChunkSize) {
|
||||
return 0, errors.New("ChunkSize doesn't match for file we are trying to resume")
|
||||
}
|
||||
if f.opt.StartFrom != int(cachedStartFrom) {
|
||||
return 0, errors.New("StartFrom doesn't match for file we are trying to resume")
|
||||
}
|
||||
// Check partial hash control chunk
|
||||
controlChunkName := f.makeChunkName(remote, -1, "phash", cachedXactID)
|
||||
hashControlChunk, err := f.base.NewObject(ctx, controlChunkName)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
reader, err := hashControlChunk.Open(ctx)
|
||||
data, err := ioutil.ReadAll(reader)
|
||||
_ = reader.Close() // ensure file handle is freed on windows
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
remoteHashType, remoteHashState, err := unmarshalPartialHashJSON(ctx, data)
|
||||
if remoteHashType == hashName && remoteHashState == hashState {
|
||||
if f.opt.HashType != remoteHashType {
|
||||
fs.Debugf(f, "Resume skipped, mismatch hash types. prev: %s, curr: %s", remoteHashType, f.opt.HashType)
|
||||
return 0, nil
|
||||
}
|
||||
pos := cachedChunkNo * cachedChunkSize
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
f.hashState = hashState
|
||||
f.resumeXactID = cachedXactID
|
||||
return pos, nil
|
||||
}
|
||||
|
||||
// No valid control chunks found, rewind from start
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = (*Fs)(nil)
|
||||
_ fs.Resumer = (*Fs)(nil)
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.Copier = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
gocipher "crypto/cipher"
|
||||
"crypto/rand"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/Max-Sum/base32768"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
@@ -116,57 +114,6 @@ func (mode NameEncryptionMode) String() (out string) {
|
||||
return out
|
||||
}
|
||||
|
||||
// fileNameEncoding are the encoding methods dealing with encrypted file names
|
||||
type fileNameEncoding interface {
|
||||
EncodeToString(src []byte) string
|
||||
DecodeString(s string) ([]byte, error)
|
||||
}
|
||||
|
||||
// caseInsensitiveBase32Encoding defines a file name encoding
|
||||
// using a modified version of standard base32 as described in
|
||||
// RFC4648
|
||||
//
|
||||
// The standard encoding is modified in two ways
|
||||
// * it becomes lower case (no-one likes upper case filenames!)
|
||||
// * we strip the padding character `=`
|
||||
type caseInsensitiveBase32Encoding struct{}
|
||||
|
||||
// EncodeToString encodes a strign using the modified version of
|
||||
// base32 encoding.
|
||||
func (caseInsensitiveBase32Encoding) EncodeToString(src []byte) string {
|
||||
encoded := base32.HexEncoding.EncodeToString(src)
|
||||
encoded = strings.TrimRight(encoded, "=")
|
||||
return strings.ToLower(encoded)
|
||||
}
|
||||
|
||||
// DecodeString decodes a string as encoded by EncodeToString
|
||||
func (caseInsensitiveBase32Encoding) DecodeString(s string) ([]byte, error) {
|
||||
if strings.HasSuffix(s, "=") {
|
||||
return nil, ErrorBadBase32Encoding
|
||||
}
|
||||
// First figure out how many padding characters to add
|
||||
roundUpToMultipleOf8 := (len(s) + 7) &^ 7
|
||||
equals := roundUpToMultipleOf8 - len(s)
|
||||
s = strings.ToUpper(s) + "========"[:equals]
|
||||
return base32.HexEncoding.DecodeString(s)
|
||||
}
|
||||
|
||||
// NewNameEncoding creates a NameEncoding from a string
|
||||
func NewNameEncoding(s string) (enc fileNameEncoding, err error) {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
case "base32":
|
||||
enc = caseInsensitiveBase32Encoding{}
|
||||
case "base64":
|
||||
enc = base64.RawURLEncoding
|
||||
case "base32768":
|
||||
enc = base32768.SafeEncoding
|
||||
default:
|
||||
err = fmt.Errorf("Unknown file name encoding mode %q", s)
|
||||
}
|
||||
return enc, err
|
||||
}
|
||||
|
||||
// Cipher defines an encoding and decoding cipher for the crypt backend
|
||||
type Cipher struct {
|
||||
dataKey [32]byte // Key for secretbox
|
||||
@@ -174,17 +121,15 @@ type Cipher struct {
|
||||
nameTweak [nameCipherBlockSize]byte // used to tweak the name crypto
|
||||
block gocipher.Block
|
||||
mode NameEncryptionMode
|
||||
fileNameEnc fileNameEncoding
|
||||
buffers sync.Pool // encrypt/decrypt buffers
|
||||
cryptoRand io.Reader // read crypto random numbers from here
|
||||
dirNameEncrypt bool
|
||||
}
|
||||
|
||||
// newCipher initialises the cipher. If salt is "" then it uses a built in salt val
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool, enc fileNameEncoding) (*Cipher, error) {
|
||||
func newCipher(mode NameEncryptionMode, password, salt string, dirNameEncrypt bool) (*Cipher, error) {
|
||||
c := &Cipher{
|
||||
mode: mode,
|
||||
fileNameEnc: enc,
|
||||
cryptoRand: rand.Reader,
|
||||
dirNameEncrypt: dirNameEncrypt,
|
||||
}
|
||||
@@ -242,6 +187,30 @@ func (c *Cipher) putBlock(buf []byte) {
|
||||
c.buffers.Put(buf)
|
||||
}
|
||||
|
||||
// encodeFileName encodes a filename using a modified version of
|
||||
// standard base32 as described in RFC4648
|
||||
//
|
||||
// The standard encoding is modified in two ways
|
||||
// * it becomes lower case (no-one likes upper case filenames!)
|
||||
// * we strip the padding character `=`
|
||||
func encodeFileName(in []byte) string {
|
||||
encoded := base32.HexEncoding.EncodeToString(in)
|
||||
encoded = strings.TrimRight(encoded, "=")
|
||||
return strings.ToLower(encoded)
|
||||
}
|
||||
|
||||
// decodeFileName decodes a filename as encoded by encodeFileName
|
||||
func decodeFileName(in string) ([]byte, error) {
|
||||
if strings.HasSuffix(in, "=") {
|
||||
return nil, ErrorBadBase32Encoding
|
||||
}
|
||||
// First figure out how many padding characters to add
|
||||
roundUpToMultipleOf8 := (len(in) + 7) &^ 7
|
||||
equals := roundUpToMultipleOf8 - len(in)
|
||||
in = strings.ToUpper(in) + "========"[:equals]
|
||||
return base32.HexEncoding.DecodeString(in)
|
||||
}
|
||||
|
||||
// encryptSegment encrypts a path segment
|
||||
//
|
||||
// This uses EME with AES
|
||||
@@ -262,7 +231,7 @@ func (c *Cipher) encryptSegment(plaintext string) string {
|
||||
}
|
||||
paddedPlaintext := pkcs7.Pad(nameCipherBlockSize, []byte(plaintext))
|
||||
ciphertext := eme.Transform(c.block, c.nameTweak[:], paddedPlaintext, eme.DirectionEncrypt)
|
||||
return c.fileNameEnc.EncodeToString(ciphertext)
|
||||
return encodeFileName(ciphertext)
|
||||
}
|
||||
|
||||
// decryptSegment decrypts a path segment
|
||||
@@ -270,7 +239,7 @@ func (c *Cipher) decryptSegment(ciphertext string) (string, error) {
|
||||
if ciphertext == "" {
|
||||
return "", nil
|
||||
}
|
||||
rawCiphertext, err := c.fileNameEnc.DecodeString(ciphertext)
|
||||
rawCiphertext, err := decodeFileName(ciphertext)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base32"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -12,7 +11,6 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/Max-Sum/base32768"
|
||||
"github.com/rclone/rclone/backend/crypt/pkcs7"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -47,31 +45,11 @@ func TestNewNameEncryptionModeString(t *testing.T) {
|
||||
assert.Equal(t, NameEncryptionMode(3).String(), "Unknown mode #3")
|
||||
}
|
||||
|
||||
type EncodingTestCase struct {
|
||||
in string
|
||||
expected string
|
||||
}
|
||||
|
||||
func testEncodeFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
for _, test := range testCases {
|
||||
enc, err := NewNameEncoding(encoding)
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
actual := enc.EncodeToString([]byte(test.in))
|
||||
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
||||
recovered, err := enc.DecodeString(test.expected)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
|
||||
if caseInsensitive {
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = enc.DecodeString(in)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeFileNameBase32(t *testing.T) {
|
||||
testEncodeFileName(t, "base32", []EncodingTestCase{
|
||||
func TestEncodeFileName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"", ""},
|
||||
{"1", "64"},
|
||||
{"12", "64p0"},
|
||||
@@ -89,56 +67,20 @@ func TestEncodeFileNameBase32(t *testing.T) {
|
||||
{"12345678901234", "64p36d1l6orjge9g64p36d0"},
|
||||
{"123456789012345", "64p36d1l6orjge9g64p36d1l"},
|
||||
{"1234567890123456", "64p36d1l6orjge9g64p36d1l6o"},
|
||||
}, true)
|
||||
} {
|
||||
actual := encodeFileName([]byte(test.in))
|
||||
assert.Equal(t, actual, test.expected, fmt.Sprintf("in=%q", test.in))
|
||||
recovered, err := decodeFileName(test.expected)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", test.expected))
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = decodeFileName(in)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, string(recovered), test.in, fmt.Sprintf("reverse=%q", in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeFileNameBase64(t *testing.T) {
|
||||
testEncodeFileName(t, "base64", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "MQ"},
|
||||
{"12", "MTI"},
|
||||
{"123", "MTIz"},
|
||||
{"1234", "MTIzNA"},
|
||||
{"12345", "MTIzNDU"},
|
||||
{"123456", "MTIzNDU2"},
|
||||
{"1234567", "MTIzNDU2Nw"},
|
||||
{"12345678", "MTIzNDU2Nzg"},
|
||||
{"123456789", "MTIzNDU2Nzg5"},
|
||||
{"1234567890", "MTIzNDU2Nzg5MA"},
|
||||
{"12345678901", "MTIzNDU2Nzg5MDE"},
|
||||
{"123456789012", "MTIzNDU2Nzg5MDEy"},
|
||||
{"1234567890123", "MTIzNDU2Nzg5MDEyMw"},
|
||||
{"12345678901234", "MTIzNDU2Nzg5MDEyMzQ"},
|
||||
{"123456789012345", "MTIzNDU2Nzg5MDEyMzQ1"},
|
||||
{"1234567890123456", "MTIzNDU2Nzg5MDEyMzQ1Ng"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestEncodeFileNameBase32768(t *testing.T) {
|
||||
testEncodeFileName(t, "base32768", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "㼿"},
|
||||
{"12", "㻙ɟ"},
|
||||
{"123", "㻙ⲿ"},
|
||||
{"1234", "㻙ⲍƟ"},
|
||||
{"12345", "㻙ⲍ⍟"},
|
||||
{"123456", "㻙ⲍ⍆ʏ"},
|
||||
{"1234567", "㻙ⲍ⍆觟"},
|
||||
{"12345678", "㻙ⲍ⍆觓ɧ"},
|
||||
{"123456789", "㻙ⲍ⍆觓栯"},
|
||||
{"1234567890", "㻙ⲍ⍆觓栩ɣ"},
|
||||
{"12345678901", "㻙ⲍ⍆觓栩朧"},
|
||||
{"123456789012", "㻙ⲍ⍆觓栩朤ʅ"},
|
||||
{"1234567890123", "㻙ⲍ⍆觓栩朤談"},
|
||||
{"12345678901234", "㻙ⲍ⍆觓栩朤諆ɔ"},
|
||||
{"123456789012345", "㻙ⲍ⍆觓栩朤諆媕"},
|
||||
{"1234567890123456", "㻙ⲍ⍆觓栩朤諆媕䆿"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestDecodeFileNameBase32(t *testing.T) {
|
||||
enc, err := NewNameEncoding("base32")
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
func TestDecodeFileName(t *testing.T) {
|
||||
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
@@ -148,65 +90,17 @@ func TestDecodeFileNameBase32(t *testing.T) {
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{"hello=hello", base32.CorruptInputError(5)},
|
||||
} {
|
||||
actual, actualErr := enc.DecodeString(test.in)
|
||||
actual, actualErr := decodeFileName(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeFileNameBase64(t *testing.T) {
|
||||
enc, err := NewNameEncoding("base64")
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||
func TestEncryptSegment(t *testing.T) {
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
in string
|
||||
expected string
|
||||
}{
|
||||
{"64=", base64.CorruptInputError(2)},
|
||||
{"!", base64.CorruptInputError(0)},
|
||||
{"Hello=Hello", base64.CorruptInputError(5)},
|
||||
} {
|
||||
actual, actualErr := enc.DecodeString(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeFileNameBase32768(t *testing.T) {
|
||||
enc, err := NewNameEncoding("base32768")
|
||||
assert.NoError(t, err, "There should be no error creating name encoder for base32.")
|
||||
// We've tested decoding the valid ones above, now concentrate on the invalid ones
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
}{
|
||||
{"㼿c", base32768.CorruptInputError(1)},
|
||||
{"!", base32768.CorruptInputError(0)},
|
||||
{"㻙ⲿ=㻙ⲿ", base32768.CorruptInputError(2)},
|
||||
} {
|
||||
actual, actualErr := enc.DecodeString(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func testEncryptSegment(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range testCases {
|
||||
actual := c.encryptSegment(test.in)
|
||||
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
|
||||
recovered, err := c.decryptSegment(test.expected)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
if caseInsensitive {
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = c.decryptSegment(in)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptSegmentBase32(t *testing.T) {
|
||||
testEncryptSegment(t, "base32", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"12", "l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
@@ -224,61 +118,26 @@ func TestEncryptSegmentBase32(t *testing.T) {
|
||||
{"12345678901234", "moq0uqdlqrblrc5pa5u5c7hq9g"},
|
||||
{"123456789012345", "eeam3li4rnommi3a762h5n7meg"},
|
||||
{"1234567890123456", "mijbj0frqf6ms7frcr6bd9h0env53jv96pjaaoirk7forcgpt70g"},
|
||||
}, true)
|
||||
} {
|
||||
actual := c.encryptSegment(test.in)
|
||||
assert.Equal(t, test.expected, actual, fmt.Sprintf("Testing %q", test.in))
|
||||
recovered, err := c.decryptSegment(test.expected)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", test.expected))
|
||||
in := strings.ToUpper(test.expected)
|
||||
recovered, err = c.decryptSegment(in)
|
||||
assert.NoError(t, err, fmt.Sprintf("Testing reverse %q", in))
|
||||
assert.Equal(t, test.in, recovered, fmt.Sprintf("Testing reverse %q", in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptSegmentBase64(t *testing.T) {
|
||||
testEncryptSegment(t, "base64", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"12", "qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"123", "1CxFf2Mti1xIPYlGruDh-A"},
|
||||
{"1234", "RL-xOTmsxsG7kuTy2XJUxw"},
|
||||
{"12345", "3FP_GHoeBJdq0yLgaED8IQ"},
|
||||
{"123456", "Xc4T1Gqrs3OVYnrE6dpEWQ"},
|
||||
{"1234567", "uZeEzssOnDWHEOzLqjwpog"},
|
||||
{"12345678", "8noiTP5WkkbEuijsPhOpxQ"},
|
||||
{"123456789", "GeNxgLA0wiaGAKU3U7qL4Q"},
|
||||
{"1234567890", "x1DUhdmqoVWYVBLD3dha-A"},
|
||||
{"12345678901", "iEyP_3BZR6vvv_2WM6NbZw"},
|
||||
{"123456789012", "4OPGvS4SZdjvS568APUaFw"},
|
||||
{"1234567890123", "Y8c5Wr8OhYYUo7fPwdojdg"},
|
||||
{"12345678901234", "tjQPabXW112wuVF8Vh46TA"},
|
||||
{"123456789012345", "c5Vh1kTd8WtIajmFEtz2dA"},
|
||||
{"1234567890123456", "tKa5gfvTzW4d-2bMtqYgdf5Rz-k2ZqViW6HfjbIZ6cE"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestEncryptSegmentBase32768(t *testing.T) {
|
||||
testEncryptSegment(t, "base32768", []EncodingTestCase{
|
||||
{"", ""},
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"12", "竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"123", "遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
{"1234", "䢟銮䵵狌㐜燳谒颴詟"},
|
||||
{"12345", "钉Ꞇ㖃蚩憶狫朰杜㜿"},
|
||||
{"123456", "啇ᚵⵕ憗䋫➫➓肤卟"},
|
||||
{"1234567", "茫螓翁連劘樓㶔抉矟"},
|
||||
{"12345678", "龝☳䘊辄岅較络㧩襟"},
|
||||
{"123456789", "ⲱ苀㱆犂媐Ꮤ锇惫靟"},
|
||||
{"1234567890", "計宁憕偵匢皫╛纺ꌟ"},
|
||||
{"12345678901", "檆䨿鑫㪺藝ꡖ勇䦛婟"},
|
||||
{"123456789012", "雑頏䰂䲝淚哚鹡魺⪟"},
|
||||
{"1234567890123", "塃璶繁躸圅㔟䗃肃懟"},
|
||||
{"12345678901234", "腺ᕚ崚鏕鏥讥鼌䑺䲿"},
|
||||
{"123456789012345", "怪绕滻蕶肣但⠥荖惟"},
|
||||
{"1234567890123456", "肳哀旚挶靏鏻㾭䱠慟㪳ꏆ賊兲铧敻塹魀ʟ"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestDecryptSegmentBase32(t *testing.T) {
|
||||
func TestDecryptSegment(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 3328)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
enc, _ := NewNameEncoding("base32")
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
@@ -286,371 +145,49 @@ func TestDecryptSegmentBase32(t *testing.T) {
|
||||
{"64=", ErrorBadBase32Encoding},
|
||||
{"!", base32.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
{encodeFileName([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{encodeFileName([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecryptSegmentBase64(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := make([]byte, 2816)
|
||||
for i := range longName {
|
||||
longName[i] = 'a'
|
||||
}
|
||||
enc, _ := NewNameEncoding("base64")
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
}{
|
||||
{"6H=", base64.CorruptInputError(2)},
|
||||
{"!", base64.CorruptInputError(0)},
|
||||
{string(longName), ErrorTooLongAfterDecode},
|
||||
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecryptSegmentBase32768(t *testing.T) {
|
||||
// We've tested the forwards above, now concentrate on the errors
|
||||
longName := strings.Repeat("怪", 1280)
|
||||
enc, _ := NewNameEncoding("base32768")
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range []struct {
|
||||
in string
|
||||
expectedErr error
|
||||
}{
|
||||
{"怪=", base32768.CorruptInputError(1)},
|
||||
{"!", base32768.CorruptInputError(0)},
|
||||
{longName, ErrorTooLongAfterDecode},
|
||||
{enc.EncodeToString([]byte("a")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef")), ErrorNotAMultipleOfBlocksize},
|
||||
{enc.EncodeToString([]byte("123456789abcdef0")), pkcs7.ErrorPaddingTooLong},
|
||||
} {
|
||||
actual, actualErr := c.decryptSegment(test.in)
|
||||
assert.Equal(t, test.expectedErr, actualErr, fmt.Sprintf("in=%q got actual=%q, err = %v %T", test.in, actual, actualErr, actualErr))
|
||||
}
|
||||
}
|
||||
|
||||
func testStandardEncryptFileName(t *testing.T, encoding string, testCasesEncryptDir []EncodingTestCase, testCasesNoEncryptDir []EncodingTestCase) {
|
||||
func TestEncryptFileName(t *testing.T) {
|
||||
// First standard mode
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
for _, test := range testCasesEncryptDir {
|
||||
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
|
||||
}
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
||||
// Standard mode with directory name encryption off
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
for _, test := range testCasesNoEncryptDir {
|
||||
assert.Equal(t, test.expected, c.EncryptFileName(test.in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStandardEncryptFileNameBase32(t *testing.T) {
|
||||
testStandardEncryptFileName(t, "base32", []EncodingTestCase{
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
|
||||
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
|
||||
}, []EncodingTestCase{
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"1/12", "1/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
{"1/12/123", "1/12/qgm4avr35m5loi1th53ato71v0"},
|
||||
{"1-v2001-02-03-040506-123", "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptFileNameBase64(t *testing.T) {
|
||||
testStandardEncryptFileName(t, "base64", []EncodingTestCase{
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
|
||||
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
|
||||
}, []EncodingTestCase{
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"1/12", "1/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"1/12/123", "1/12/1CxFf2Mti1xIPYlGruDh-A"},
|
||||
{"1-v2001-02-03-040506-123", "yBxRX25ypgUVyj8MSxJnFw-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "1/qQUDHOGN_jVdLIMQzYrhvA-v2001-02-03-040506-123"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptFileNameBase32768(t *testing.T) {
|
||||
testStandardEncryptFileName(t, "base32768", []EncodingTestCase{
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
|
||||
}, []EncodingTestCase{
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"1/12", "1/竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"1/12/123", "1/12/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
{"1-v2001-02-03-040506-123", "詮㪗鐮僀伎作㻖㢧⪟-v2001-02-03-040506-123"},
|
||||
{"1/12-v2001-02-03-040506-123", "1/竢朧䉱虃光塬䟛⣡蓟-v2001-02-03-040506-123"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNonStandardEncryptFileName(t *testing.T) {
|
||||
// Off mode
|
||||
c, _ := newCipher(NameEncryptionOff, "", "", true, nil)
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptFileName("1"))
|
||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptFileName("1/12"))
|
||||
assert.Equal(t, "1/12/qgm4avr35m5loi1th53ato71v0", c.EncryptFileName("1/12/123"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", c.EncryptFileName("1-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "1/l42g6771hnv3an9cgc8cr2n1ng-v2001-02-03-040506-123", c.EncryptFileName("1/12-v2001-02-03-040506-123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
||||
assert.Equal(t, "1/12/123.bin", c.EncryptFileName("1/12/123"))
|
||||
// Obfuscation mode
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true, nil)
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", true)
|
||||
assert.Equal(t, "49.6/99.23/150.890/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
assert.Equal(t, "49.6/99.23/150.890/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "49.6/99.23/150.890/162.uryyB-v2001-02-03-040506-123.GKG", c.EncryptFileName("1/12/123/hello-v2001-02-03-040506-123.txt"))
|
||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||
// Obfuscation mode with directory name encryption off
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", false, nil)
|
||||
c, _ = newCipher(NameEncryptionObfuscated, "", "", false)
|
||||
assert.Equal(t, "1/12/123/53.!!lipps", c.EncryptFileName("1/12/123/!hello"))
|
||||
assert.Equal(t, "1/12/123/53-v2001-02-03-040506-123.!!lipps", c.EncryptFileName("1/12/123/!hello-v2001-02-03-040506-123"))
|
||||
assert.Equal(t, "161.\u00e4", c.EncryptFileName("\u00a1"))
|
||||
assert.Equal(t, "160.\u03c2", c.EncryptFileName("\u03a0"))
|
||||
}
|
||||
|
||||
func testStandardDecryptFileName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range testCases {
|
||||
// Test when dirNameEncrypt=true
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
assert.NoError(t, actualErr)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
if caseInsensitive {
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
actual, actualErr := c.DecryptFileName(strings.ToUpper(test.in))
|
||||
assert.NoError(t, actualErr)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
}
|
||||
// Add a character should raise ErrorNotAMultipleOfBlocksize
|
||||
actual, actualErr = c.DecryptFileName(enc.EncodeToString([]byte("1")) + test.in)
|
||||
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
|
||||
assert.Equal(t, "", actual)
|
||||
// Test when dirNameEncrypt=false
|
||||
noDirEncryptIn := test.in
|
||||
if strings.LastIndex(test.expected, "/") != -1 {
|
||||
noDirEncryptIn = test.expected[:strings.LastIndex(test.expected, "/")] + test.in[strings.LastIndex(test.in, "/"):]
|
||||
}
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
actual, actualErr = c.DecryptFileName(noDirEncryptIn)
|
||||
assert.NoError(t, actualErr)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStandardDecryptFileNameBase32(t *testing.T) {
|
||||
testStandardDecryptFileName(t, "base32", []EncodingTestCase{
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
|
||||
}, true)
|
||||
}
|
||||
|
||||
func TestStandardDecryptFileNameBase64(t *testing.T) {
|
||||
testStandardDecryptFileName(t, "base64", []EncodingTestCase{
|
||||
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestStandardDecryptFileNameBase32768(t *testing.T) {
|
||||
testStandardDecryptFileName(t, "base32768", []EncodingTestCase{
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestNonStandardDecryptFileName(t *testing.T) {
|
||||
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
assert.Equal(t, test.expectedErr, actualErr, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncDecMatches(t *testing.T) {
|
||||
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
in string
|
||||
}{
|
||||
{NameEncryptionStandard, "1/2/3/4"},
|
||||
{NameEncryptionOff, "1/2/3/4"},
|
||||
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
|
||||
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", true, enc)
|
||||
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, out, test.in, what)
|
||||
assert.Equal(t, err, nil, what)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func testStandardEncryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
// First standard mode
|
||||
for _, test := range testCases {
|
||||
assert.Equal(t, test.expected, c.EncryptDirName(test.in))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStandardEncryptDirNameBase32(t *testing.T) {
|
||||
testStandardEncryptDirName(t, "base32", []EncodingTestCase{
|
||||
{"1", "p0e52nreeaj0a5ea7s64m4j72s"},
|
||||
{"1/12", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng"},
|
||||
{"1/12/123", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptDirNameBase64(t *testing.T) {
|
||||
testStandardEncryptDirName(t, "base64", []EncodingTestCase{
|
||||
{"1", "yBxRX25ypgUVyj8MSxJnFw"},
|
||||
{"1/12", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA"},
|
||||
{"1/12/123", "yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardEncryptDirNameBase32768(t *testing.T) {
|
||||
testStandardEncryptDirName(t, "base32768", []EncodingTestCase{
|
||||
{"1", "詮㪗鐮僀伎作㻖㢧⪟"},
|
||||
{"1/12", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟"},
|
||||
{"1/12/123", "詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestNonStandardEncryptDirName(t *testing.T) {
|
||||
for _, encoding := range []string{"base32", "base64", "base32768"} {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true, enc)
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
}
|
||||
}
|
||||
|
||||
func testStandardDecryptDirName(t *testing.T, encoding string, testCases []EncodingTestCase, caseInsensitive bool) {
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range testCases {
|
||||
// Test dirNameEncrypt=true
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, enc)
|
||||
actual, actualErr := c.DecryptDirName(test.in)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
assert.NoError(t, actualErr)
|
||||
if caseInsensitive {
|
||||
actual, actualErr := c.DecryptDirName(strings.ToUpper(test.in))
|
||||
assert.Equal(t, actual, test.expected)
|
||||
assert.NoError(t, actualErr)
|
||||
}
|
||||
actual, actualErr = c.DecryptDirName(enc.EncodeToString([]byte("1")) + test.in)
|
||||
assert.Equal(t, "", actual)
|
||||
assert.Equal(t, ErrorNotAMultipleOfBlocksize, actualErr)
|
||||
// Test dirNameEncrypt=false
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false, enc)
|
||||
actual, actualErr = c.DecryptDirName(test.in)
|
||||
assert.Equal(t, test.in, actual)
|
||||
assert.NoError(t, actualErr)
|
||||
actual, actualErr = c.DecryptDirName(test.expected)
|
||||
assert.Equal(t, test.expected, actual)
|
||||
assert.NoError(t, actualErr)
|
||||
// Test dirNameEncrypt=false
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
enc, _ := NewNameEncoding(encoding)
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
|
||||
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, enc)
|
||||
actual, actualErr := c.DecryptDirName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
assert.Equal(t, test.expectedErr, actualErr, what)
|
||||
}
|
||||
*/
|
||||
|
||||
func TestStandardDecryptDirNameBase32(t *testing.T) {
|
||||
testStandardDecryptDirName(t, "base32", []EncodingTestCase{
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s", "1"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12"},
|
||||
{"p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123"},
|
||||
}, true)
|
||||
}
|
||||
|
||||
func TestStandardDecryptDirNameBase64(t *testing.T) {
|
||||
testStandardDecryptDirName(t, "base64", []EncodingTestCase{
|
||||
{"yBxRX25ypgUVyj8MSxJnFw", "1"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA", "1/12"},
|
||||
{"yBxRX25ypgUVyj8MSxJnFw/qQUDHOGN_jVdLIMQzYrhvA/1CxFf2Mti1xIPYlGruDh-A", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestStandardDecryptDirNameBase32768(t *testing.T) {
|
||||
testStandardDecryptDirName(t, "base32768", []EncodingTestCase{
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟", "1"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟", "1/12"},
|
||||
{"詮㪗鐮僀伎作㻖㢧⪟/竢朧䉱虃光塬䟛⣡蓟/遶㞟鋅缕袡鲅ⵝ蝁ꌟ", "1/12/123"},
|
||||
}, false)
|
||||
}
|
||||
|
||||
func TestNonStandardDecryptDirName(t *testing.T) {
|
||||
func TestDecryptFileName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
@@ -658,11 +195,87 @@ func TestNonStandardDecryptDirName(t *testing.T) {
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "1/12/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s-v2001-02-03-040506-123", "1-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bix", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, ".bin", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionOff, true, "1/12/123-v2001-02-03-040506-123.bin", "1/12/123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt.bin", "1/12/123-v1970-01-01-010101-123-v2001-02-03-040506-123.txt", nil},
|
||||
{NameEncryptionObfuscated, true, "!.hello", "hello", nil},
|
||||
{NameEncryptionObfuscated, true, "hello", "", ErrorNotAnEncryptedFile},
|
||||
{NameEncryptionObfuscated, true, "161.\u00e4", "\u00a1", nil},
|
||||
{NameEncryptionObfuscated, true, "160.\u03c2", "\u03a0", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53.!!lipps", "1/12/123/!hello", nil},
|
||||
{NameEncryptionObfuscated, false, "1/12/123/53-v2001-02-03-040506-123.!!lipps", "1/12/123/!hello-v2001-02-03-040506-123", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
||||
actual, actualErr := c.DecryptFileName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
assert.Equal(t, test.expectedErr, actualErr, what)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncDecMatches(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
in string
|
||||
}{
|
||||
{NameEncryptionStandard, "1/2/3/4"},
|
||||
{NameEncryptionOff, "1/2/3/4"},
|
||||
{NameEncryptionObfuscated, "1/2/3/4/!hello\u03a0"},
|
||||
{NameEncryptionObfuscated, "Avatar The Last Airbender"},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", true)
|
||||
out, err := c.DecryptFileName(c.EncryptFileName(test.in))
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, out, test.in, what)
|
||||
assert.Equal(t, err, nil, what)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncryptDirName(t *testing.T) {
|
||||
// First standard mode
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s", c.EncryptDirName("1"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", c.EncryptDirName("1/12"))
|
||||
assert.Equal(t, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", c.EncryptDirName("1/12/123"))
|
||||
// Standard mode with dir name encryption off
|
||||
c, _ = newCipher(NameEncryptionStandard, "", "", false)
|
||||
assert.Equal(t, "1/12", c.EncryptDirName("1/12"))
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
// Now off mode
|
||||
c, _ = newCipher(NameEncryptionOff, "", "", true)
|
||||
assert.Equal(t, "1/12/123", c.EncryptDirName("1/12/123"))
|
||||
}
|
||||
|
||||
func TestDecryptDirName(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
mode NameEncryptionMode
|
||||
dirNameEncrypt bool
|
||||
in string
|
||||
expected string
|
||||
expectedErr error
|
||||
}{
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s", "1", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeAJ0A5EA7S64M4J72S/L42G6771HNv3an9cgc8cr2n1ng", "1/12", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng/qgm4avr35m5loi1th53ato71v0", "1/12/123", nil},
|
||||
{NameEncryptionStandard, true, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1/qgm4avr35m5loi1th53ato71v0", "", ErrorNotAMultipleOfBlocksize},
|
||||
{NameEncryptionStandard, false, "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", "p0e52nreeaj0a5ea7s64m4j72s/l42g6771hnv3an9cgc8cr2n1ng", nil},
|
||||
{NameEncryptionStandard, false, "1/12/123", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, "1/12/123.bin", "1/12/123.bin", nil},
|
||||
{NameEncryptionOff, true, "1/12/123", "1/12/123", nil},
|
||||
{NameEncryptionOff, true, ".bin", ".bin", nil},
|
||||
} {
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt, nil)
|
||||
c, _ := newCipher(test.mode, "", "", test.dirNameEncrypt)
|
||||
actual, actualErr := c.DecryptDirName(test.in)
|
||||
what := fmt.Sprintf("Testing %q (mode=%v)", test.in, test.mode)
|
||||
assert.Equal(t, test.expected, actual, what)
|
||||
@@ -671,7 +284,7 @@ func TestNonStandardDecryptDirName(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEncryptedSize(t *testing.T) {
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in int64
|
||||
expected int64
|
||||
@@ -695,7 +308,7 @@ func TestEncryptedSize(t *testing.T) {
|
||||
|
||||
func TestDecryptedSize(t *testing.T) {
|
||||
// Test the errors since we tested the reverse above
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, _ := newCipher(NameEncryptionStandard, "", "", true)
|
||||
for _, test := range []struct {
|
||||
in int64
|
||||
expectedErr error
|
||||
@@ -1066,7 +679,7 @@ func (z *zeroes) Read(p []byte) (n int, err error) {
|
||||
|
||||
// Test encrypt decrypt with different buffer sizes
|
||||
func testEncryptDecrypt(t *testing.T, bufSize int, copySize int64) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = &zeroes{} // zero out the nonce
|
||||
buf := make([]byte, bufSize)
|
||||
@@ -1136,7 +749,7 @@ func TestEncryptData(t *testing.T) {
|
||||
{[]byte{1}, file1},
|
||||
{[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, file16},
|
||||
} {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
@@ -1159,7 +772,7 @@ func TestEncryptData(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewEncrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
@@ -1175,12 +788,13 @@ func TestNewEncrypter(t *testing.T) {
|
||||
fh, err = c.newEncrypter(z, nil)
|
||||
assert.Nil(t, fh)
|
||||
assert.Error(t, err, "short read of nonce")
|
||||
|
||||
}
|
||||
|
||||
// Test the stream returning 0, io.ErrUnexpectedEOF - this used to
|
||||
// cause a fatal loop
|
||||
func TestNewEncrypterErrUnexpectedEOF(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
in := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
@@ -1209,7 +823,7 @@ func (c *closeDetector) Close() error {
|
||||
}
|
||||
|
||||
func TestNewDecrypter(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = newRandomSource(1e8) // nodge the crypto rand generator
|
||||
|
||||
@@ -1252,7 +866,7 @@ func TestNewDecrypter(t *testing.T) {
|
||||
|
||||
// Test the stream returning 0, io.ErrUnexpectedEOF
|
||||
func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
in2 := &readers.ErrorReader{Err: io.ErrUnexpectedEOF}
|
||||
@@ -1268,7 +882,7 @@ func TestNewDecrypterErrUnexpectedEOF(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewDecrypterSeekLimit(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
c.cryptoRand = &zeroes{} // nodge the crypto rand generator
|
||||
|
||||
@@ -1474,7 +1088,7 @@ func TestDecrypterCalculateUnderlying(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDecrypterRead(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Test truncating the file at each possible point
|
||||
@@ -1538,7 +1152,7 @@ func TestDecrypterRead(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDecrypterClose(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
cd := newCloseDetector(bytes.NewBuffer(file16))
|
||||
@@ -1576,7 +1190,7 @@ func TestDecrypterClose(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPutGetBlock(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
block := c.getBlock()
|
||||
@@ -1587,7 +1201,7 @@ func TestPutGetBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKey(t *testing.T) {
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true, nil)
|
||||
c, err := newCipher(NameEncryptionStandard, "", "", true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check zero keys OK
|
||||
|
||||
@@ -116,29 +116,6 @@ names, or for debugging purposes.`,
|
||||
Help: "Encrypt file data.",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
Name: "filename_encoding",
|
||||
Help: `How to encode the encrypted filename to text string.
|
||||
|
||||
This option could help with shortening the encrypted filename. The
|
||||
suitable option would depend on the way your remote count the filename
|
||||
length and if it's case sensitve.`,
|
||||
Default: "base32",
|
||||
Examples: []fs.OptionExample{
|
||||
{
|
||||
Value: "base32",
|
||||
Help: "Encode using base32. Suitable for all remote.",
|
||||
},
|
||||
{
|
||||
Value: "base64",
|
||||
Help: "Encode using base64. Suitable for case sensitive remote.",
|
||||
},
|
||||
{
|
||||
Value: "base32768",
|
||||
Help: "Encode using base32768. Suitable if your remote counts UTF-16 or\nUnicode codepoint instead of UTF-8 byte length. (Eg. Onedrive)",
|
||||
},
|
||||
},
|
||||
Advanced: true,
|
||||
}},
|
||||
})
|
||||
}
|
||||
@@ -163,11 +140,7 @@ func newCipherForConfig(opt *Options) (*Cipher, error) {
|
||||
return nil, fmt.Errorf("failed to decrypt password2: %w", err)
|
||||
}
|
||||
}
|
||||
enc, err := NewNameEncoding(opt.FilenameEncoding)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption, enc)
|
||||
cipher, err := newCipher(mode, password, salt, opt.DirectoryNameEncryption)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make cipher: %w", err)
|
||||
}
|
||||
@@ -256,7 +229,6 @@ type Options struct {
|
||||
Password2 string `config:"password2"`
|
||||
ServerSideAcrossConfigs bool `config:"server_side_across_configs"`
|
||||
ShowMapping bool `config:"show_mapping"`
|
||||
FilenameEncoding string `config:"filename_encoding"`
|
||||
}
|
||||
|
||||
// Fs represents a wrapped fs.Fs
|
||||
|
||||
@@ -23,13 +23,13 @@ func TestIntegration(t *testing.T) {
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "Resume"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
// TestStandard runs integration tests against the remote
|
||||
func TestStandardBase32(t *testing.T) {
|
||||
func TestStandard(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
@@ -49,48 +49,6 @@ func TestStandardBase32(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardBase64(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name, Key: "filename_encoding", Value: "base64"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStandardBase32768(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
t.Skip("Skipping as -remote set")
|
||||
}
|
||||
tempdir := filepath.Join(os.TempDir(), "rclone-crypt-test-standard")
|
||||
name := "TestCrypt"
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: name + ":",
|
||||
NilObject: (*crypt.Object)(nil),
|
||||
ExtraConfig: []fstests.ExtraConfigItem{
|
||||
{Name: name, Key: "type", Value: "crypt"},
|
||||
{Name: name, Key: "remote", Value: tempdir},
|
||||
{Name: name, Key: "password", Value: obscure.MustObscure("potato")},
|
||||
{Name: name, Key: "filename_encryption", Value: "standard"},
|
||||
{Name: name, Key: "filename_encoding", Value: "base32768"},
|
||||
},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
// TestOff runs integration tests against the remote
|
||||
func TestOff(t *testing.T) {
|
||||
if *fstest.RemoteName != "" {
|
||||
|
||||
@@ -294,7 +294,7 @@ func (f *Fs) dumpLine(r *hashRecord, path string, include bool, err error) strin
|
||||
if hashVal == "" || err != nil {
|
||||
hashVal = "-"
|
||||
}
|
||||
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType, false), hashVal)
|
||||
hashVal = fmt.Sprintf("%-*s", hash.Width(hashType), hashVal)
|
||||
hashes = append(hashes, hashName+":"+hashVal)
|
||||
}
|
||||
hashesStr := strings.Join(hashes, " ")
|
||||
|
||||
@@ -263,98 +263,6 @@ func (f *Fs) Purge(ctx context.Context, dir string) error {
|
||||
return f.client.RemoveAll(realpath)
|
||||
}
|
||||
|
||||
// Move src to this remote using server-side move operations.
|
||||
//
|
||||
// This is stored with the remote path given
|
||||
//
|
||||
// It returns the destination Object and a possible error
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantMove
|
||||
func (f *Fs) Move(ctx context.Context, src fs.Object, remote string) (fs.Object, error) {
|
||||
srcObj, ok := src.(*Object)
|
||||
if !ok {
|
||||
fs.Debugf(src, "Can't move - not same remote type")
|
||||
return nil, fs.ErrorCantMove
|
||||
}
|
||||
|
||||
// Get the real paths from the remote specs:
|
||||
sourcePath := srcObj.fs.realpath(srcObj.remote)
|
||||
targetPath := f.realpath(remote)
|
||||
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
|
||||
|
||||
// Make sure the target folder exists:
|
||||
dirname := path.Dir(targetPath)
|
||||
err := f.client.MkdirAll(dirname, 0755)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
// Note that the underlying HDFS library hard-codes Overwrite=True, but this is expected rclone behaviour.
|
||||
err = f.client.Rename(sourcePath, targetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Look up the resulting object
|
||||
info, err := f.client.Stat(targetPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// And return it:
|
||||
return &Object{
|
||||
fs: f,
|
||||
remote: remote,
|
||||
size: info.Size(),
|
||||
modTime: info.ModTime(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DirMove moves src, srcRemote to this remote at dstRemote
|
||||
// using server-side move operations.
|
||||
//
|
||||
// Will only be called if src.Fs().Name() == f.Name()
|
||||
//
|
||||
// If it isn't possible then return fs.ErrorCantDirMove
|
||||
//
|
||||
// If destination exists then return fs.ErrorDirExists
|
||||
func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string) (err error) {
|
||||
srcFs, ok := src.(*Fs)
|
||||
if !ok {
|
||||
return fs.ErrorCantDirMove
|
||||
}
|
||||
|
||||
// Get the real paths from the remote specs:
|
||||
sourcePath := srcFs.realpath(srcRemote)
|
||||
targetPath := f.realpath(dstRemote)
|
||||
fs.Debugf(f, "rename [%s] to [%s]", sourcePath, targetPath)
|
||||
|
||||
// Check if the destination exists:
|
||||
info, err := f.client.Stat(targetPath)
|
||||
if err == nil {
|
||||
fs.Debugf(f, "target directory already exits, IsDir = [%t]", info.IsDir())
|
||||
return fs.ErrorDirExists
|
||||
}
|
||||
|
||||
// Make sure the targets parent folder exists:
|
||||
dirname := path.Dir(targetPath)
|
||||
err = f.client.MkdirAll(dirname, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Do the move
|
||||
err = f.client.Rename(sourcePath, targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// About gets quota information from the Fs
|
||||
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
|
||||
info, err := f.client.StatFs()
|
||||
@@ -410,6 +318,4 @@ var (
|
||||
_ fs.Purger = (*Fs)(nil)
|
||||
_ fs.PutStreamer = (*Fs)(nil)
|
||||
_ fs.Abouter = (*Fs)(nil)
|
||||
_ fs.Mover = (*Fs)(nil)
|
||||
_ fs.DirMover = (*Fs)(nil)
|
||||
)
|
||||
|
||||
@@ -69,10 +69,6 @@ const (
|
||||
teliaCloudTokenURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/token"
|
||||
teliaCloudAuthURL = "https://cloud-auth.telia.se/auth/realms/telia_se/protocol/openid-connect/auth"
|
||||
teliaCloudClientID = "desktop"
|
||||
|
||||
tele2CloudTokenURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/token"
|
||||
tele2CloudAuthURL = "https://mittcloud-auth.tele2.se/auth/realms/comhem/protocol/openid-connect/auth"
|
||||
tele2CloudClientID = "desktop"
|
||||
)
|
||||
|
||||
// Register with Fs
|
||||
@@ -135,9 +131,6 @@ func Config(ctx context.Context, name string, m configmap.Mapper, config fs.Conf
|
||||
}, {
|
||||
Value: "telia",
|
||||
Help: "Telia Cloud authentication.\nUse this if you are using Telia Cloud.",
|
||||
}, {
|
||||
Value: "tele2",
|
||||
Help: "Tele2 Cloud authentication.\nUse this if you are using Tele2 Cloud.",
|
||||
}})
|
||||
case "auth_type_done":
|
||||
// Jump to next state according to config chosen
|
||||
@@ -245,21 +238,6 @@ machines.`)
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "tele2": // tele2 cloud config
|
||||
m.Set("configVersion", fmt.Sprint(configVersion))
|
||||
m.Set(configClientID, tele2CloudClientID)
|
||||
m.Set(configTokenURL, tele2CloudTokenURL)
|
||||
return oauthutil.ConfigOut("choose_device", &oauthutil.Options{
|
||||
OAuth2Config: &oauth2.Config{
|
||||
Endpoint: oauth2.Endpoint{
|
||||
AuthURL: tele2CloudAuthURL,
|
||||
TokenURL: tele2CloudTokenURL,
|
||||
},
|
||||
ClientID: tele2CloudClientID,
|
||||
Scopes: []string{"openid", "jotta-default", "offline_access"},
|
||||
RedirectURL: oauthutil.RedirectLocalhostURL,
|
||||
},
|
||||
})
|
||||
case "choose_device":
|
||||
return fs.ConfigConfirm("choose_device_query", false, "config_non_standard", "Use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?")
|
||||
case "choose_device_query":
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -25,7 +24,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/config/configstruct"
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -232,7 +230,6 @@ type Fs struct {
|
||||
precision time.Duration // precision of local filesystem
|
||||
warnedMu sync.Mutex // used for locking access to 'warned'.
|
||||
warned map[string]struct{} // whether we have warned about this string
|
||||
hashState map[string]string // set in resume(), used to restore hash state
|
||||
|
||||
// do os.Lstat or os.Stat
|
||||
lstat func(name string) (os.FileInfo, error)
|
||||
@@ -270,12 +267,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}
|
||||
|
||||
f := &Fs{
|
||||
name: name,
|
||||
opt: *opt,
|
||||
warned: make(map[string]struct{}),
|
||||
hashState: make(map[string]string),
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
name: name,
|
||||
opt: *opt,
|
||||
warned: make(map[string]struct{}),
|
||||
dev: devUnset,
|
||||
lstat: os.Lstat,
|
||||
}
|
||||
f.root = cleanRootPath(root, f.opt.NoUNC, f.opt.Enc)
|
||||
f.features = (&fs.Features{
|
||||
@@ -1119,7 +1115,6 @@ func (nwc nopWriterCloser) Close() error {
|
||||
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
|
||||
var out io.WriteCloser
|
||||
var hasher *hash.MultiHasher
|
||||
var resumeOpt *fs.OptionResume
|
||||
|
||||
for _, option := range options {
|
||||
switch x := option.(type) {
|
||||
@@ -1130,32 +1125,6 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
}
|
||||
case *fs.OptionResume:
|
||||
resumeOpt = option.(*fs.OptionResume)
|
||||
if resumeOpt.Pos != 0 {
|
||||
fs.Logf(o, "Resuming at byte position: %d", resumeOpt.Pos)
|
||||
// Discard bytes that already exist on backend
|
||||
_, err := io.CopyN(ioutil.Discard, in, resumeOpt.Pos)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hashType := o.fs.Hashes().GetOne()
|
||||
if resumeOpt.Hash != "" {
|
||||
if err = hashType.Set(resumeOpt.Hash); err != nil {
|
||||
return err
|
||||
}
|
||||
if !o.fs.Hashes().Contains(hashType) {
|
||||
return fmt.Errorf("unsupported resume hash: %q", resumeOpt.Hash)
|
||||
}
|
||||
}
|
||||
hasher, err = hash.NewMultiHasherTypes(hash.NewHashSet(hashType))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := hasher.RestoreHashState(hashType, o.fs.hashState[o.remote]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1169,12 +1138,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
// If it is a translated link, just read in the contents, and
|
||||
// then create a symlink
|
||||
if !o.translatedLink {
|
||||
var f *os.File
|
||||
if resumeOpt != nil && resumeOpt.Pos != 0 {
|
||||
f, err = file.OpenFile(o.path, os.O_WRONLY|os.O_APPEND, 0666)
|
||||
} else {
|
||||
f, err = file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
}
|
||||
f, err := file.OpenFile(o.path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
if runtime.GOOS == "windows" && os.IsPermission(err) {
|
||||
// If permission denied on Windows might be trying to update a
|
||||
@@ -1188,7 +1152,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !o.fs.opt.NoPreAllocate && resumeOpt != nil && resumeOpt.Pos == 0 {
|
||||
if !o.fs.opt.NoPreAllocate {
|
||||
// Pre-allocate the file for performance reasons
|
||||
err = file.PreAllocate(src.Size(), f)
|
||||
if err != nil {
|
||||
@@ -1209,46 +1173,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
in = io.TeeReader(in, hasher)
|
||||
}
|
||||
|
||||
var cacheingWg sync.WaitGroup // Used to halt code execution while resume cache is written
|
||||
var copyWg sync.WaitGroup // Ensure that io.Copy has returned before writing resume data
|
||||
copyWg.Add(1)
|
||||
// Context for read so that we can handle io.copy being interrupted
|
||||
ctxr, cancel := context.WithCancel(ctx)
|
||||
// Create exit handler during Copy so that resume data can be written if interrupted
|
||||
var atexitOnce sync.Once
|
||||
atexitHandle := atexit.Register(func() {
|
||||
atexitOnce.Do(func() {
|
||||
if resumeOpt == nil || hasher == nil {
|
||||
return
|
||||
}
|
||||
// If OptionResume was passed, call SetID to prepare for future resumes
|
||||
// ID is the number of bytes written to the destination
|
||||
// Stops the copy so cache is consistent with remote
|
||||
cacheingWg.Add(1)
|
||||
cancel()
|
||||
copyWg.Wait()
|
||||
fs.Infof(o, "Updating resume cache")
|
||||
fileInfo, _ := o.fs.lstat(o.path)
|
||||
writtenStr := strconv.FormatInt(fileInfo.Size(), 10)
|
||||
hashType := hasher.Hashes().GetOne()
|
||||
hashState, err := hasher.GetHashState(hashType)
|
||||
if err == nil {
|
||||
err = resumeOpt.SetID(ctx, writtenStr, hashType.String(), hashState)
|
||||
}
|
||||
if err != nil {
|
||||
fs.Logf(o, "Updating resume cache failed: %v", err)
|
||||
}
|
||||
})
|
||||
})
|
||||
cr := readers.NewContextReader(ctxr, in)
|
||||
_, err = io.Copy(out, cr)
|
||||
copyWg.Done()
|
||||
atexit.Unregister(atexitHandle)
|
||||
if errors.Is(err, context.Canceled) {
|
||||
// If resume data is being written we want to wait here for the program to exit
|
||||
cacheingWg.Wait()
|
||||
}
|
||||
|
||||
_, err = io.Copy(out, in)
|
||||
closeErr := out.Close()
|
||||
if err == nil {
|
||||
err = closeErr
|
||||
@@ -1413,44 +1338,9 @@ func cleanRootPath(s string, noUNC bool, enc encoder.MultiEncoder) string {
|
||||
return s
|
||||
}
|
||||
|
||||
// Resume checks whether the (remote, ID) pair is valid and returns
|
||||
// the point the file should be resumed from or an error.
|
||||
func (f *Fs) Resume(ctx context.Context, remote, ID, hashName, hashState string) (Pos int64, err error) {
|
||||
cachedPos, err := strconv.ParseInt(ID, 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Compare hash of partial file on remote with partial hash in cache
|
||||
remoteObject, err := f.NewObject(ctx, remote)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if remoteObject.Size() != cachedPos {
|
||||
return 0, errors.New("size on remote does not match resume cache")
|
||||
}
|
||||
hashType := hash.NameToType(hashName)
|
||||
remoteHash, err := remoteObject.Hash(ctx, hashType)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
cachedHash, err := hash.SumPartialHash(hashName, hashState)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Hashes match, attempt resume
|
||||
if cachedHash == remoteHash {
|
||||
f.hashState[remote] = hashState
|
||||
return cachedPos, nil
|
||||
}
|
||||
// No valid position found, restart from beginning
|
||||
fs.Infof(remote, "Not resuming as cached hash state did not match hash state on remote")
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Check the interfaces are satisfied
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Resumer = &Fs{}
|
||||
_ fs.Purger = &Fs{}
|
||||
_ fs.PutStreamer = &Fs{}
|
||||
_ fs.Mover = &Fs{}
|
||||
|
||||
@@ -1020,7 +1020,10 @@ This ACL is used for creating objects and if bucket_acl isn't set, for creating
|
||||
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
|
||||
|
||||
Note that this ACL is applied when server-side copying objects as S3
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.`,
|
||||
doesn't copy the ACL from the source but rather writes a fresh one.
|
||||
|
||||
If the ACL is set as the string "unset" then rclone won't set the ACL
|
||||
header so it will use the default of the cloud provider.`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "default",
|
||||
Help: "Owner gets Full_CONTROL.\nNo one else has access rights (default).",
|
||||
@@ -1990,6 +1993,9 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if opt.ACL == "" {
|
||||
opt.ACL = "private"
|
||||
}
|
||||
if opt.ACL == "unset" {
|
||||
opt.ACL = ""
|
||||
}
|
||||
if opt.BucketACL == "" {
|
||||
opt.BucketACL = opt.ACL
|
||||
}
|
||||
@@ -2526,7 +2532,9 @@ func (f *Fs) makeBucket(ctx context.Context, bucket string) error {
|
||||
return f.cache.Create(bucket, func() error {
|
||||
req := s3.CreateBucketInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &f.opt.BucketACL,
|
||||
}
|
||||
if f.opt.BucketACL != "" {
|
||||
req.ACL = &f.opt.BucketACL
|
||||
}
|
||||
if f.opt.LocationConstraint != "" {
|
||||
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
|
||||
@@ -2591,7 +2599,9 @@ func pathEscape(s string) string {
|
||||
// method
|
||||
func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPath, srcBucket, srcPath string, src *Object) error {
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = &f.opt.ACL
|
||||
if f.opt.ACL != "" {
|
||||
req.ACL = &f.opt.ACL
|
||||
}
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
req.CopySource = &source
|
||||
@@ -3673,11 +3683,13 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
||||
mimeType := fs.MimeType(ctx, src)
|
||||
req := s3.PutObjectInput{
|
||||
Bucket: &bucket,
|
||||
ACL: &o.fs.opt.ACL,
|
||||
Key: &bucketPath,
|
||||
ContentType: &mimeType,
|
||||
Metadata: metadata,
|
||||
}
|
||||
if o.fs.opt.ACL != "" {
|
||||
req.ACL = &o.fs.opt.ACL
|
||||
}
|
||||
if md5sum != "" {
|
||||
req.ContentMD5 = &md5sum
|
||||
}
|
||||
|
||||
@@ -152,11 +152,11 @@ different. This issue affects among others Synology NAS boxes.
|
||||
|
||||
Shared folders can be found in directories representing volumes
|
||||
|
||||
rclone sync /home/local/directory remote:/directory --sftp-path-override /volume2/directory
|
||||
rclone sync /home/local/directory remote:/directory --ssh-path-override /volume2/directory
|
||||
|
||||
Home directory can be found in a shared folder called "home"
|
||||
|
||||
rclone sync /home/local/directory remote:/home/directory --sftp-path-override /volume1/homes/USER/directory`,
|
||||
rclone sync /home/local/directory remote:/home/directory --ssh-path-override /volume1/homes/USER/directory`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "set_modtime",
|
||||
@@ -1155,8 +1155,8 @@ func (f *Fs) Hashes() hash.Set {
|
||||
}
|
||||
|
||||
changed := false
|
||||
md5Works := checkHash([]string{"md5sum", "md5 -r", "rclone md5sum"}, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
|
||||
sha1Works := checkHash([]string{"sha1sum", "sha1 -r", "rclone sha1sum"}, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
|
||||
md5Works := checkHash([]string{"md5sum", "md5 -r"}, "d41d8cd98f00b204e9800998ecf8427e", &f.opt.Md5sumCommand, &changed)
|
||||
sha1Works := checkHash([]string{"sha1sum", "sha1 -r"}, "da39a3ee5e6b4b0d3255bfef95601890afd80709", &f.opt.Sha1sumCommand, &changed)
|
||||
|
||||
if changed {
|
||||
f.m.Set("md5sum_command", f.opt.Md5sumCommand)
|
||||
|
||||
@@ -18,7 +18,7 @@ func TestIntegration(t *testing.T) {
|
||||
}
|
||||
fstests.Run(t, &fstests.Opt{
|
||||
RemoteName: *fstest.RemoteName,
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles", "Resume"},
|
||||
UnimplementableFsMethods: []string{"OpenWriterAt", "DuplicateFiles"},
|
||||
UnimplementableObjectMethods: []string{"MimeType"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package hashsum
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -25,11 +26,11 @@ var (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
AddHashsumFlags(cmdFlags)
|
||||
AddHashFlags(cmdFlags)
|
||||
}
|
||||
|
||||
// AddHashsumFlags is a convenience function to add the command flags OutputBase64 and DownloadFlag to hashsum, md5sum, sha1sum
|
||||
func AddHashsumFlags(cmdFlags *pflag.FlagSet) {
|
||||
// AddHashFlags is a convenience function to add the command flags OutputBase64 and DownloadFlag to hashsum, md5sum, sha1sum
|
||||
func AddHashFlags(cmdFlags *pflag.FlagSet) {
|
||||
flags.BoolVarP(cmdFlags, &OutputBase64, "base64", "", OutputBase64, "Output base64 encoded hashsum")
|
||||
flags.StringVarP(cmdFlags, &HashsumOutfile, "output-file", "", HashsumOutfile, "Output hashsums to a file rather than the terminal")
|
||||
flags.StringVarP(cmdFlags, &ChecksumFile, "checkfile", "C", ChecksumFile, "Validate hashes against a given SUM file instead of printing them")
|
||||
@@ -40,7 +41,7 @@ func AddHashsumFlags(cmdFlags *pflag.FlagSet) {
|
||||
func GetHashsumOutput(filename string) (out *os.File, close func(), err error) {
|
||||
out, err = os.Create(filename)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to open output file %v: %w", filename, err)
|
||||
err = fmt.Errorf("Failed to open output file %v: %w", filename, err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -54,32 +55,6 @@ func GetHashsumOutput(filename string) (out *os.File, close func(), err error) {
|
||||
return out, close, nil
|
||||
}
|
||||
|
||||
// CreateFromStdinArg checks args and produces hashsum from standard input if it is requested
|
||||
func CreateFromStdinArg(ht hash.Type, args []string, startArg int) (bool, error) {
|
||||
var stdinArg bool
|
||||
if len(args) == startArg {
|
||||
// Missing arg: Always read from stdin
|
||||
stdinArg = true
|
||||
} else if len(args) > startArg && args[startArg] == "-" {
|
||||
// Special arg: Read from stdin only if there is data available
|
||||
if fi, _ := os.Stdin.Stat(); fi.Mode()&os.ModeCharDevice == 0 {
|
||||
stdinArg = true
|
||||
}
|
||||
}
|
||||
if !stdinArg {
|
||||
return false, nil
|
||||
}
|
||||
if HashsumOutfile == "" {
|
||||
return true, operations.HashSumStream(ht, OutputBase64, os.Stdin, nil)
|
||||
}
|
||||
output, close, err := GetHashsumOutput(HashsumOutfile)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
defer close()
|
||||
return true, operations.HashSumStream(ht, OutputBase64, os.Stdin, output)
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "hashsum <hash> remote:path",
|
||||
Short: `Produces a hashsum file for all the objects in the path.`,
|
||||
@@ -93,11 +68,6 @@ not supported by the remote, no hash will be returned. With the
|
||||
download flag, the file will be downloaded from the remote and
|
||||
hashed locally enabling any hash for any remote.
|
||||
|
||||
This command can also hash data received on standard input (stdin),
|
||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hypen will be treated literaly,
|
||||
as a relative path).
|
||||
|
||||
Run without a hash to see the list of all supported hashes, e.g.
|
||||
|
||||
$ rclone hashsum
|
||||
@@ -113,6 +83,8 @@ Note that hash names are case insensitive and values are output in lower case.
|
||||
if len(args) == 0 {
|
||||
fmt.Print(hash.HelpString(0))
|
||||
return nil
|
||||
} else if len(args) == 1 {
|
||||
return errors.New("need hash type and remote")
|
||||
}
|
||||
var ht hash.Type
|
||||
err := ht.Set(args[0])
|
||||
@@ -120,10 +92,8 @@ Note that hash names are case insensitive and values are output in lower case.
|
||||
fmt.Println(hash.HelpString(0))
|
||||
return err
|
||||
}
|
||||
if found, err := CreateFromStdinArg(ht, args, 1); found {
|
||||
return err
|
||||
}
|
||||
fsrc := cmd.NewFsSrc(args[1:])
|
||||
|
||||
cmd.Run(false, false, command, func() error {
|
||||
if ChecksumFile != "" {
|
||||
fsum, sumFile := cmd.NewFsFile(ChecksumFile)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
hashsum.AddHashsumFlags(cmdFlags)
|
||||
hashsum.AddHashFlags(cmdFlags)
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -27,17 +27,9 @@ By default, the hash is requested from the remote. If MD5 is
|
||||
not supported by the remote, no hash will be returned. With the
|
||||
download flag, the file will be downloaded from the remote and
|
||||
hashed locally enabling MD5 for any remote.
|
||||
|
||||
This command can also hash data received on standard input (stdin),
|
||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hypen will be treated literaly,
|
||||
as a relative path).
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if found, err := hashsum.CreateFromStdinArg(hash.MD5, args, 0); found {
|
||||
return err
|
||||
}
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
if hashsum.ChecksumFile != "" {
|
||||
@@ -54,6 +46,5 @@ as a relative path).
|
||||
defer close()
|
||||
return operations.HashLister(context.Background(), hash.MD5, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, output)
|
||||
})
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -100,17 +100,6 @@ be used with sshd via ~/.ssh/authorized_keys, for example:
|
||||
|
||||
restrict,command="rclone serve sftp --stdio ./photos" ssh-rsa ...
|
||||
|
||||
On the client you need to set "--transfers 1" when using --stdio.
|
||||
Otherwise multiple instances of the rclone server are started by OpenSSH
|
||||
which can lead to "corrupted on transfer" errors. This is the case because
|
||||
the client chooses indiscriminately which server to send commands to while
|
||||
the servers all have different views of the state of the filing system.
|
||||
|
||||
The "restrict" in authorized_keys prevents SHA1SUMs and MD5SUMs from beeing
|
||||
used. Omitting "restrict" and using --sftp-path-override to enable
|
||||
checksumming is possible but less secure and you could use the SFTP server
|
||||
provided by OpenSSH in this case.
|
||||
|
||||
` + vfs.Help + proxy.Help,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
var f fs.Fs
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
func init() {
|
||||
cmd.Root.AddCommand(commandDefinition)
|
||||
cmdFlags := commandDefinition.Flags()
|
||||
hashsum.AddHashsumFlags(cmdFlags)
|
||||
hashsum.AddHashFlags(cmdFlags)
|
||||
}
|
||||
|
||||
var commandDefinition = &cobra.Command{
|
||||
@@ -27,20 +27,9 @@ By default, the hash is requested from the remote. If SHA-1 is
|
||||
not supported by the remote, no hash will be returned. With the
|
||||
download flag, the file will be downloaded from the remote and
|
||||
hashed locally enabling SHA-1 for any remote.
|
||||
|
||||
This command can also hash data received on standard input (stdin),
|
||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hypen will be treated literaly,
|
||||
as a relative path).
|
||||
|
||||
This command can also hash data received on STDIN, if not passing
|
||||
a remote:path.
|
||||
`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if found, err := hashsum.CreateFromStdinArg(hash.SHA1, args, 0); found {
|
||||
return err
|
||||
}
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
cmd.Run(false, false, command, func() error {
|
||||
if hashsum.ChecksumFile != "" {
|
||||
@@ -57,6 +46,5 @@ a remote:path.
|
||||
defer close()
|
||||
return operations.HashLister(context.Background(), hash.SHA1, hashsum.OutputBase64, hashsum.DownloadFlag, fsrc, output)
|
||||
})
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
@@ -547,6 +547,3 @@ put them back in again.` >}}
|
||||
* bbabich <bbabich@datamossa.com>
|
||||
* David <dp.davide.palma@gmail.com>
|
||||
* Borna Butkovic <borna@favicode.net>
|
||||
* Fredric Arklid <fredric.arklid@consid.se>
|
||||
* Andy Jackson <Andrew.Jackson@bl.uk>
|
||||
* Sinan Tan <i@tinytangent.com>
|
||||
|
||||
@@ -373,14 +373,6 @@ total path length which rclone is more likely to breach using
|
||||
characters in length issues should not be encountered, irrespective of
|
||||
cloud storage provider.
|
||||
|
||||
An experimental advanced option `filename_encoding` is now provided to
|
||||
address this problem to a certain degree.
|
||||
For cloud storage systems with case sensitive file names (e.g. Google Drive),
|
||||
`base64` can be used to reduce file name length.
|
||||
For cloud storage systems using UTF-16 to store file names internally
|
||||
(e.g. OneDrive), `base32768` can be used to drastically reduce
|
||||
file name length.
|
||||
|
||||
An alternative, future rclone file name encryption mode may tolerate
|
||||
backend provider path length limits.
|
||||
|
||||
|
||||
@@ -41,8 +41,8 @@ Note that the web interface may refer to this token as a JottaCli token.
|
||||
|
||||
### Legacy authentication
|
||||
|
||||
If you are using one of the whitelabel versions (e.g. from Elkjøp) you may not have the option
|
||||
to generate a CLI token. In this case you'll have to use the legacy authentication. To do this select
|
||||
If you are using one of the whitelabel versions (e.g. from Elkjøp or Tele2) you may not have the option
|
||||
to generate a CLI token. In this case you'll have to use the legacy authentication. To to this select
|
||||
yes when the setup asks for legacy authentication and enter your username and password.
|
||||
The rest of the setup is identical to the default setup.
|
||||
|
||||
@@ -53,13 +53,6 @@ additionally uses a separate authentication flow where the username is generated
|
||||
rclone to use Telia Cloud, choose Telia Cloud authentication in the setup. The rest of the setup is
|
||||
identical to the default setup.
|
||||
|
||||
### Tele2 Cloud authentication
|
||||
|
||||
As Tele2-Com Hem merger was completed this authentication can be used for former Com Hem Cloud and
|
||||
Tele2 Cloud customers as no support for creating a CLI token exists, and additionally uses a separate
|
||||
authentication flow where the username is generated internally. To setup rclone to use Tele2 Cloud,
|
||||
choose Tele2 Cloud authentication in the setup. The rest of the setup is identical to the default setup.
|
||||
|
||||
## Configuration
|
||||
|
||||
Here is an example of how to make a remote called `remote` with the default setup. First run:
|
||||
|
||||
@@ -422,7 +422,7 @@ upon backend-specific capabilities.
|
||||
| Google Cloud Storage | Yes | Yes | No | No | No | Yes | Yes | No | No | No |
|
||||
| Google Drive | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes | Yes |
|
||||
| Google Photos | No | No | No | No | No | No | No | No | No | No |
|
||||
| HDFS | Yes | No | Yes | Yes | No | No | Yes | No | Yes | Yes |
|
||||
| HDFS | Yes | No | No | No | No | No | Yes | No | Yes | Yes |
|
||||
| HTTP | No | No | No | No | No | No | No | No | No | Yes |
|
||||
| Hubic | Yes † | Yes | No | No | No | Yes | Yes | No | Yes | No |
|
||||
| Jottacloud | Yes | Yes | Yes | Yes | Yes | Yes | No | Yes | Yes | Yes |
|
||||
|
||||
@@ -130,8 +130,6 @@ type ConfigInfo struct {
|
||||
FsCacheExpireDuration time.Duration
|
||||
FsCacheExpireInterval time.Duration
|
||||
DisableHTTP2 bool
|
||||
MaxResumeCacheSize SizeSuffix
|
||||
ResumeCutoff SizeSuffix
|
||||
HumanReadable bool
|
||||
KvLockTime time.Duration // maximum time to keep key-value database locked by process
|
||||
}
|
||||
@@ -165,8 +163,6 @@ func NewConfig() *ConfigInfo {
|
||||
c.TPSLimitBurst = 1
|
||||
c.MaxTransfer = -1
|
||||
c.MaxBacklog = 10000
|
||||
c.MaxResumeCacheSize = SizeSuffix(100 * 1024)
|
||||
c.ResumeCutoff = -1
|
||||
// We do not want to set the default here. We use this variable being empty as part of the fall-through of options.
|
||||
// c.StatsOneLineDateFormat = "2006/01/02 15:04:05 - "
|
||||
c.MultiThreadCutoff = SizeSuffix(250 * 1024 * 1024)
|
||||
|
||||
@@ -132,8 +132,6 @@ func AddFlags(ci *fs.ConfigInfo, flagSet *pflag.FlagSet) {
|
||||
flags.BoolVarP(flagSet, &ci.RefreshTimes, "refresh-times", "", ci.RefreshTimes, "Refresh the modtime of remote files")
|
||||
flags.BoolVarP(flagSet, &ci.NoConsole, "no-console", "", ci.NoConsole, "Hide console window (supported on Windows only)")
|
||||
flags.StringVarP(flagSet, &dscp, "dscp", "", "", "Set DSCP value to connections, value or name, e.g. CS1, LE, DF, AF21")
|
||||
flags.FVarP(flagSet, &ci.MaxResumeCacheSize, "max-resume-cache-size", "", "The maximum size of the cache used to store data necessary for resuming uploads. When the storage grows beyond this size, the oldest resume data will be deleted. (default 100k")
|
||||
flags.FVarP(flagSet, &ci.ResumeCutoff, "resume-cutoff", "", "If set, attempt to resume all partial uploads larger than this size. (default off)")
|
||||
flags.DurationVarP(flagSet, &ci.FsCacheExpireDuration, "fs-cache-expire-duration", "", ci.FsCacheExpireDuration, "Cache remotes for this long (0 to disable caching)")
|
||||
flags.DurationVarP(flagSet, &ci.FsCacheExpireInterval, "fs-cache-expire-interval", "", ci.FsCacheExpireInterval, "Interval to check for expired remotes")
|
||||
flags.BoolVarP(flagSet, &ci.DisableHTTP2, "disable-http2", "", ci.DisableHTTP2, "Disable HTTP/2 in the global transport")
|
||||
|
||||
@@ -163,10 +163,6 @@ type Features struct {
|
||||
// Shutdown the backend, closing any background tasks and any
|
||||
// cached connections.
|
||||
Shutdown func(ctx context.Context) error
|
||||
|
||||
// Resume checks whether the (remote, ID) pair is valid and returns
|
||||
// the point the file should be resumed from or an error.
|
||||
Resume func(ctx context.Context, remote, ID, hashName, hashState string) (Pos int64, err error)
|
||||
}
|
||||
|
||||
// Disable nil's out the named feature. If it isn't found then it
|
||||
@@ -294,9 +290,6 @@ func (ft *Features) Fill(ctx context.Context, f Fs) *Features {
|
||||
if do, ok := f.(Shutdowner); ok {
|
||||
ft.Shutdown = do.Shutdown
|
||||
}
|
||||
if do, ok := f.(Resumer); ok {
|
||||
ft.Resume = do.Resume
|
||||
}
|
||||
return ft.DisableList(GetConfig(ctx).DisableFeatures)
|
||||
}
|
||||
|
||||
@@ -643,13 +636,6 @@ type Shutdowner interface {
|
||||
Shutdown(ctx context.Context) error
|
||||
}
|
||||
|
||||
// Resumer is an optional interface for Fs
|
||||
type Resumer interface {
|
||||
// Resume checks whether the (remote, ID) pair is valid and returns
|
||||
// the point the file should be resumed from or an error.
|
||||
Resume(ctx context.Context, remote, ID, hashName, hashState string) (Pos int64, err error)
|
||||
}
|
||||
|
||||
// ObjectsChan is a channel of Objects
|
||||
type ObjectsChan chan Object
|
||||
|
||||
|
||||
1
fs/fs.go
1
fs/fs.go
@@ -48,7 +48,6 @@ var (
|
||||
ErrorNotImplemented = errors.New("optional feature not implemented")
|
||||
ErrorCommandNotFound = errors.New("command not found")
|
||||
ErrorFileNameTooLong = errors.New("file name too long")
|
||||
ErrorCantResume = errors.New("can't resume file upload")
|
||||
)
|
||||
|
||||
// CheckClose is a utility function used to check the return from
|
||||
|
||||
@@ -136,14 +136,12 @@ func NewClient(ctx context.Context) *http.Client {
|
||||
// Transport is our http Transport which wraps an http.Transport
|
||||
// * Sets the User Agent
|
||||
// * Does logging
|
||||
// * Updates metrics
|
||||
type Transport struct {
|
||||
*http.Transport
|
||||
dump fs.DumpFlags
|
||||
filterRequest func(req *http.Request)
|
||||
userAgent string
|
||||
headers []*fs.HTTPOption
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
// newTransport wraps the http.Transport passed in and logs all
|
||||
@@ -154,7 +152,6 @@ func newTransport(ci *fs.ConfigInfo, transport *http.Transport) *Transport {
|
||||
dump: ci.Dump,
|
||||
userAgent: ci.UserAgent,
|
||||
headers: ci.Headers,
|
||||
metrics: DefaultMetrics,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -286,9 +283,6 @@ func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error
|
||||
fs.Debugf(nil, "%s", separatorResp)
|
||||
logMutex.Unlock()
|
||||
}
|
||||
// Update metrics
|
||||
t.metrics.onResponse(req, resp)
|
||||
|
||||
if err == nil {
|
||||
checkServerTime(req, resp)
|
||||
}
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
package fshttp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// Metrics provide Transport HTTP level metrics.
|
||||
type Metrics struct {
|
||||
StatusCode *prometheus.CounterVec
|
||||
}
|
||||
|
||||
// NewMetrics creates a new metrics instance, the instance shall be assigned to
|
||||
// DefaultMetrics before any processing takes place.
|
||||
func NewMetrics(namespace string) *Metrics {
|
||||
return &Metrics{
|
||||
StatusCode: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: "http",
|
||||
Name: "status_code",
|
||||
}, []string{"host", "method", "code"}),
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultMetrics specifies metrics used for new Transports.
|
||||
var DefaultMetrics = (*Metrics)(nil)
|
||||
|
||||
// Collectors returns all prometheus metrics as collectors for registration.
|
||||
func (m *Metrics) Collectors() []prometheus.Collector {
|
||||
if m == nil {
|
||||
return nil
|
||||
}
|
||||
return []prometheus.Collector{
|
||||
m.StatusCode,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Metrics) onResponse(req *http.Request, resp *http.Response) {
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var statusCode = 0
|
||||
if resp != nil {
|
||||
statusCode = resp.StatusCode
|
||||
}
|
||||
|
||||
m.StatusCode.WithLabelValues(req.Host, req.Method, fmt.Sprint(statusCode)).Inc()
|
||||
}
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -94,11 +92,8 @@ func Supported() Set {
|
||||
}
|
||||
|
||||
// Width returns the width in characters for any HashType
|
||||
func Width(hashType Type, base64Encoded bool) int {
|
||||
func Width(hashType Type) int {
|
||||
if hash := type2hash[hashType]; hash != nil {
|
||||
if base64Encoded {
|
||||
return base64.URLEncoding.EncodedLen(hash.width / 2)
|
||||
}
|
||||
return hash.width
|
||||
}
|
||||
return 0
|
||||
@@ -229,14 +224,6 @@ func (m *MultiHasher) Write(p []byte) (n int, err error) {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Hashes returns accumulated hash types.
|
||||
func (m *MultiHasher) Hashes() (set Set) {
|
||||
for ht := range m.h {
|
||||
set.Add(ht)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Sums returns the sums of all accumulated hashes as hex encoded
|
||||
// strings.
|
||||
func (m *MultiHasher) Sums() map[Type]string {
|
||||
@@ -256,84 +243,11 @@ func (m *MultiHasher) Sum(hashType Type) ([]byte, error) {
|
||||
return h.Sum(nil), nil
|
||||
}
|
||||
|
||||
// SumString returns the specified hash from the multihasher as a hex or base64 encoded string
|
||||
func (m *MultiHasher) SumString(hashType Type, base64Encoded bool) (string, error) {
|
||||
sum, err := m.Sum(hashType)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if base64Encoded {
|
||||
return base64.URLEncoding.EncodeToString(sum), nil
|
||||
}
|
||||
return hex.EncodeToString(sum), nil
|
||||
}
|
||||
|
||||
// Size returns the number of bytes written
|
||||
func (m *MultiHasher) Size() int64 {
|
||||
return m.size
|
||||
}
|
||||
|
||||
// GetHashState returns the partial hash state for the given hash type encoded as a string
|
||||
func (m *MultiHasher) GetHashState(hashType Type) (string, error) {
|
||||
h, ok := m.h[hashType]
|
||||
if !ok {
|
||||
return "", ErrUnsupported
|
||||
}
|
||||
marshaler, ok := h.(encoding.BinaryMarshaler)
|
||||
if !ok {
|
||||
return "", errors.New(hashType.String() + " does not implement encoding.BinaryMarshaler")
|
||||
}
|
||||
data, err := marshaler.MarshalBinary()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(data), nil
|
||||
}
|
||||
|
||||
// RestoreHashState restores the partial hash state for the passed hash type
|
||||
func (m *MultiHasher) RestoreHashState(hashType Type, hashState string) error {
|
||||
partialHashState, err := base64.StdEncoding.DecodeString(hashState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
unmarshaler, ok := m.h[hashType].(encoding.BinaryUnmarshaler)
|
||||
if ok {
|
||||
if err := unmarshaler.UnmarshalBinary(partialHashState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SumPartialHash returns the hash of the partial hash state
|
||||
func SumPartialHash(hashName, hashState string) (string, error) {
|
||||
partialHashDef, ok := name2hash[hashName]
|
||||
if !ok {
|
||||
return "", ErrUnsupported
|
||||
}
|
||||
partialHash := partialHashDef.newFunc()
|
||||
partialHashState, err := base64.StdEncoding.DecodeString(hashState)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
unmarshaler, ok := partialHash.(encoding.BinaryUnmarshaler)
|
||||
if ok {
|
||||
if err := unmarshaler.UnmarshalBinary(partialHashState); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
return hex.EncodeToString(partialHash.Sum(nil)), nil
|
||||
}
|
||||
|
||||
// NameToType returns the requested hash type or None if the hash type isn't supported
|
||||
func NameToType(hashName string) Type {
|
||||
hashDef, ok := name2hash[hashName]
|
||||
if !ok {
|
||||
return None
|
||||
}
|
||||
return hashDef.hashType
|
||||
}
|
||||
|
||||
// A Set Indicates one or more hash types.
|
||||
type Set int
|
||||
|
||||
|
||||
@@ -3,19 +3,13 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/lib/cacheroot"
|
||||
)
|
||||
|
||||
// OpenOption is an interface describing options for Open
|
||||
@@ -236,145 +230,6 @@ func (o *HashesOption) Mandatory() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// OptionResume defines a Put/Upload for doing resumes
|
||||
type OptionResume struct {
|
||||
ID string // resume this ID if set
|
||||
Pos int64 // and resume from this position
|
||||
Hash string
|
||||
Src Object
|
||||
F Fs
|
||||
Remote string
|
||||
CacheCleaned bool
|
||||
CacheDir string
|
||||
}
|
||||
|
||||
// SetID will be called by backend's Put/Update function if the object's upload
|
||||
// could be resumed upon failure
|
||||
//
|
||||
// SetID takes the passed resume ID, hash state, hash name and Fingerprint of the object and stores it in
|
||||
// --cache-dir so that future Copy operations can resume the upload if it fails
|
||||
func (o *OptionResume) SetID(ctx context.Context, ID, hashName, hashState string) error {
|
||||
ci := GetConfig(ctx)
|
||||
// Get the Fingerprint of the src object so that future Copy operations can ensure the
|
||||
// object hasn't changed before resuming an upload
|
||||
fingerprint := Fingerprint(ctx, o.Src, true)
|
||||
data, err := marshalResumeJSON(ctx, fingerprint, ID, hashName, hashState)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal data JSON: %w", err)
|
||||
}
|
||||
if len(data) < int(ci.MaxResumeCacheSize) {
|
||||
// Each remote will have its own directory for cached resume files
|
||||
dirPath, _, err := cacheroot.CreateCacheRoot(o.CacheDir, o.F.Name(), o.F.Root(), "resume")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(dirPath, os.ModePerm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create cache directory %v: %w", dirPath, err)
|
||||
}
|
||||
// Write resume data to disk
|
||||
cachePath := filepath.Join(dirPath, o.Remote)
|
||||
cacheFile, err := os.Create(cachePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create cache file %v: %w", cachePath, err)
|
||||
}
|
||||
defer func() {
|
||||
_ = cacheFile.Close()
|
||||
}()
|
||||
_, errWrite := cacheFile.Write(data)
|
||||
if errWrite != nil {
|
||||
return fmt.Errorf("failed to write JSON to file: %w", errWrite)
|
||||
}
|
||||
}
|
||||
if !o.CacheCleaned {
|
||||
rootCacheDir := filepath.Join(o.CacheDir, "resume")
|
||||
if err := cleanResumeCache(ctx, rootCacheDir); err != nil {
|
||||
return fmt.Errorf("failed to clean resume cache: %w", err)
|
||||
}
|
||||
}
|
||||
o.CacheCleaned = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// ResumeJSON is a struct for storing resume info in cache
|
||||
type ResumeJSON struct {
|
||||
Fingerprint string `json:"fprint"`
|
||||
ID string `json:"id"`
|
||||
HashName string `json:"hname"`
|
||||
HashState string `json:"hstate"`
|
||||
}
|
||||
|
||||
func marshalResumeJSON(ctx context.Context, fprint, id, hashName, hashState string) ([]byte, error) {
|
||||
resumedata := ResumeJSON{
|
||||
Fingerprint: fprint,
|
||||
ID: id,
|
||||
HashName: hashName,
|
||||
HashState: hashState,
|
||||
}
|
||||
data, err := json.Marshal(&resumedata)
|
||||
return data, err
|
||||
}
|
||||
|
||||
// cleanCache checks the size of the resume cache and removes the oldest resume files if more than limit
|
||||
func cleanResumeCache(ctx context.Context, rootCacheDir string) error {
|
||||
ci := GetConfig(ctx)
|
||||
var paths []string
|
||||
pathsWithInfo := make(map[string]os.FileInfo)
|
||||
totalCacheSize := int64(0)
|
||||
walkErr := filepath.Walk(rootCacheDir,
|
||||
func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
// Empty subdirectories in the resume cache dir can be removed
|
||||
removeErr := os.Remove(path)
|
||||
if err != nil && !os.IsNotExist(removeErr) {
|
||||
return fmt.Errorf("failed to remove empty subdirectory: %s: %w", path, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
paths = append(paths, path)
|
||||
pathsWithInfo[path] = info
|
||||
totalCacheSize += info.Size()
|
||||
return nil
|
||||
})
|
||||
if walkErr != nil {
|
||||
return fmt.Errorf("error walking through cache when cleaning cache dir: %w", walkErr)
|
||||
}
|
||||
if totalCacheSize > int64(ci.MaxResumeCacheSize) {
|
||||
sort.Slice(paths, func(i, j int) bool {
|
||||
return pathsWithInfo[paths[i]].ModTime().Before(pathsWithInfo[paths[j]].ModTime())
|
||||
})
|
||||
for _, p := range paths {
|
||||
if totalCacheSize < int64(ci.MaxResumeCacheSize) {
|
||||
break
|
||||
}
|
||||
if err := os.Remove(p); err != nil {
|
||||
return fmt.Errorf("error removing oldest cache file: %s: %w", p, err)
|
||||
}
|
||||
totalCacheSize -= pathsWithInfo[p].Size()
|
||||
Debugf(p, "Successfully removed oldest cache file")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Header formats the option as an http header
|
||||
func (o *OptionResume) Header() (key string, value string) {
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// String formats the option into human readable form
|
||||
func (o *OptionResume) String() string {
|
||||
return fmt.Sprintf("OptionResume(ID:%v, Pos:%v)", o.ID, o.Pos)
|
||||
}
|
||||
|
||||
// Mandatory returns whether the option must be parsed or can be ignored
|
||||
func (o *OptionResume) Mandatory() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// NullOption defines an Option which does nothing
|
||||
type NullOption struct {
|
||||
}
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package operations
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func sendInterrupt() error {
|
||||
p, err := os.FindProcess(syscall.Getpid())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = p.Signal(os.Interrupt)
|
||||
return err
|
||||
}
|
||||
|
||||
func setupCmd(cmd *exec.Cmd) {
|
||||
// Only needed for windows
|
||||
}
|
||||
@@ -1,32 +0,0 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package operations
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// Credit: https://github.com/golang/go/blob/6125d0c4265067cdb67af1340bf689975dd128f4/src/os/signal/signal_windows_test.go#L18
|
||||
func sendInterrupt() error {
|
||||
d, e := syscall.LoadDLL("kernel32.dll")
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
p, e := d.FindProc("GenerateConsoleCtrlEvent")
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
r, _, e := p.Call(syscall.CTRL_BREAK_EVENT, uintptr(syscall.Getpid()))
|
||||
if r == 0 {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupCmd(cmd *exec.Cmd) {
|
||||
(*cmd).SysProcAttr = &syscall.SysProcAttr{
|
||||
CreationFlags: syscall.CREATE_NEW_PROCESS_GROUP,
|
||||
}
|
||||
}
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/object"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/atexit"
|
||||
"github.com/rclone/rclone/lib/cacheroot"
|
||||
"github.com/rclone/rclone/lib/pacer"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/rclone/rclone/lib/readers"
|
||||
@@ -365,11 +364,6 @@ func CommonHash(ctx context.Context, fa, fb fs.Info) (hash.Type, *fs.HashesOptio
|
||||
// be nil.
|
||||
func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
var resumeOpt *fs.OptionResume
|
||||
if f.Features().Resume != nil {
|
||||
resumeOpt = createResumeOpt(ctx, f, remote, src)
|
||||
}
|
||||
|
||||
tr := accounting.Stats(ctx).NewTransfer(src)
|
||||
defer func() {
|
||||
tr.Done(ctx, err)
|
||||
@@ -467,10 +461,6 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
wrappedSrc = NewOverrideRemote(src, remote)
|
||||
}
|
||||
options := []fs.OpenOption{hashOption}
|
||||
// Appends OptionResume if it was set
|
||||
if resumeOpt != nil {
|
||||
options = append(options, resumeOpt)
|
||||
}
|
||||
for _, option := range ci.UploadHeaders {
|
||||
options = append(options, option)
|
||||
}
|
||||
@@ -485,17 +475,6 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
|
||||
if err == nil {
|
||||
newDst = dst
|
||||
err = closeErr
|
||||
cacheParent := config.GetCacheDir()
|
||||
// Remove resume cache file (if one was created) when Put/Upload is successful
|
||||
cacheDir, _, err := cacheroot.CreateCacheRoot(cacheParent, f.Name(), f.Root(), "resume")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cacheFile := filepath.Join(cacheDir, remote)
|
||||
removeErr := os.Remove(cacheFile)
|
||||
if err != nil && !os.IsNotExist(removeErr) {
|
||||
return nil, fmt.Errorf("failed to remove resume cache file after upload: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -967,7 +946,7 @@ func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error {
|
||||
// hashSum returns the human-readable hash for ht passed in. This may
|
||||
// be UNSUPPORTED or ERROR. If it isn't returning a valid hash it will
|
||||
// return an error.
|
||||
func hashSum(ctx context.Context, ht hash.Type, base64Encoded bool, downloadFlag bool, o fs.Object) (string, error) {
|
||||
func hashSum(ctx context.Context, ht hash.Type, downloadFlag bool, o fs.Object) (string, error) {
|
||||
var sum string
|
||||
var err error
|
||||
|
||||
@@ -989,7 +968,7 @@ func hashSum(ctx context.Context, ht hash.Type, base64Encoded bool, downloadFlag
|
||||
}
|
||||
in, err := NewReOpen(ctx, o, fs.GetConfig(ctx).LowLevelRetries, options...)
|
||||
if err != nil {
|
||||
return "ERROR", fmt.Errorf("failed to open file %v: %w", o, err)
|
||||
return "ERROR", fmt.Errorf("Failed to open file %v: %w", o, err)
|
||||
}
|
||||
|
||||
// Account and buffer the transfer
|
||||
@@ -998,20 +977,21 @@ func hashSum(ctx context.Context, ht hash.Type, base64Encoded bool, downloadFlag
|
||||
// Setup hasher
|
||||
hasher, err := hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return "UNSUPPORTED", fmt.Errorf("hash unsupported: %w", err)
|
||||
return "UNSUPPORTED", fmt.Errorf("Hash unsupported: %w", err)
|
||||
}
|
||||
|
||||
// Copy to hasher, downloading the file and passing directly to hash
|
||||
_, err = io.Copy(hasher, in)
|
||||
if err != nil {
|
||||
return "ERROR", fmt.Errorf("failed to copy file to hasher: %w", err)
|
||||
return "ERROR", fmt.Errorf("Failed to copy file to hasher: %w", err)
|
||||
}
|
||||
|
||||
// Get hash as hex or base64 encoded string
|
||||
sum, err = hasher.SumString(ht, base64Encoded)
|
||||
// Get hash and encode as hex
|
||||
byteSum, err := hasher.Sum(ht)
|
||||
if err != nil {
|
||||
return "ERROR", fmt.Errorf("hasher returned an error: %w", err)
|
||||
return "ERROR", fmt.Errorf("Hasher returned an error: %w", err)
|
||||
}
|
||||
sum = hex.EncodeToString(byteSum)
|
||||
} else {
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
|
||||
defer func() {
|
||||
@@ -1019,15 +999,11 @@ func hashSum(ctx context.Context, ht hash.Type, base64Encoded bool, downloadFlag
|
||||
}()
|
||||
|
||||
sum, err = o.Hash(ctx, ht)
|
||||
if base64Encoded {
|
||||
hexBytes, _ := hex.DecodeString(sum)
|
||||
sum = base64.URLEncoding.EncodeToString(hexBytes)
|
||||
}
|
||||
if err == hash.ErrUnsupported {
|
||||
return "", fmt.Errorf("hash unsupported: %w", err)
|
||||
return "", fmt.Errorf("Hash unsupported: %w", err)
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get hash %v from backend: %w", ht, err)
|
||||
return "", fmt.Errorf("Failed to get hash %v from backend: %v: %w", ht, err, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1038,7 +1014,10 @@ func hashSum(ctx context.Context, ht hash.Type, base64Encoded bool, downloadFlag
|
||||
// Updated to handle both standard hex encoding and base64
|
||||
// Updated to perform multiple hashes concurrently
|
||||
func HashLister(ctx context.Context, ht hash.Type, outputBase64 bool, downloadFlag bool, f fs.Fs, w io.Writer) error {
|
||||
width := hash.Width(ht, outputBase64)
|
||||
width := hash.Width(ht)
|
||||
if outputBase64 {
|
||||
width = base64.URLEncoding.EncodedLen(width / 2)
|
||||
}
|
||||
concurrencyControl := make(chan struct{}, fs.GetConfig(ctx).Transfers)
|
||||
var wg sync.WaitGroup
|
||||
err := ListFn(ctx, f, func(o fs.Object) {
|
||||
@@ -1049,11 +1028,15 @@ func HashLister(ctx context.Context, ht hash.Type, outputBase64 bool, downloadFl
|
||||
<-concurrencyControl
|
||||
wg.Done()
|
||||
}()
|
||||
sum, err := hashSum(ctx, ht, outputBase64, downloadFlag, o)
|
||||
sum, err := hashSum(ctx, ht, downloadFlag, o)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "%v", fs.CountError(err))
|
||||
return
|
||||
}
|
||||
if outputBase64 {
|
||||
hexBytes, _ := hex.DecodeString(sum)
|
||||
sum = base64.URLEncoding.EncodeToString(hexBytes)
|
||||
}
|
||||
syncFprintf(w, "%*s %s\n", width, sum, o.Remote())
|
||||
}()
|
||||
})
|
||||
@@ -1061,28 +1044,6 @@ func HashLister(ctx context.Context, ht hash.Type, outputBase64 bool, downloadFl
|
||||
return err
|
||||
}
|
||||
|
||||
// HashSumStream outputs a line compatible with md5sum to w based on the
|
||||
// input stream in and the hash type ht passed in. If outputBase64 is
|
||||
// set then the hash will be base64 instead of hexadecimal.
|
||||
func HashSumStream(ht hash.Type, outputBase64 bool, in io.ReadCloser, w io.Writer) error {
|
||||
hasher, err := hash.NewMultiHasherTypes(hash.NewHashSet(ht))
|
||||
if err != nil {
|
||||
return fmt.Errorf("hash unsupported: %w", err)
|
||||
}
|
||||
written, err := io.Copy(hasher, in)
|
||||
fs.Debugf(nil, "Creating %s hash of %d bytes read from input stream", ht, written)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy input to hasher: %w", err)
|
||||
}
|
||||
sum, err := hasher.SumString(ht, outputBase64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("hasher returned an error: %w", err)
|
||||
}
|
||||
width := hash.Width(ht, outputBase64)
|
||||
syncFprintf(w, "%*s -\n", width, sum)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Count counts the objects and their sizes in the Fs
|
||||
//
|
||||
// Obeys includes and excludes
|
||||
|
||||
@@ -316,52 +316,6 @@ func TestHashSumsWithErrors(t *testing.T) {
|
||||
// TODO mock an unreadable file
|
||||
}
|
||||
|
||||
func TestHashStream(t *testing.T) {
|
||||
reader := strings.NewReader("")
|
||||
in := ioutil.NopCloser(reader)
|
||||
out := &bytes.Buffer{}
|
||||
for _, test := range []struct {
|
||||
input string
|
||||
ht hash.Type
|
||||
wantHex string
|
||||
wantBase64 string
|
||||
}{
|
||||
{
|
||||
input: "",
|
||||
ht: hash.MD5,
|
||||
wantHex: "d41d8cd98f00b204e9800998ecf8427e -\n",
|
||||
wantBase64: "1B2M2Y8AsgTpgAmY7PhCfg== -\n",
|
||||
},
|
||||
{
|
||||
input: "",
|
||||
ht: hash.SHA1,
|
||||
wantHex: "da39a3ee5e6b4b0d3255bfef95601890afd80709 -\n",
|
||||
wantBase64: "2jmj7l5rSw0yVb_vlWAYkK_YBwk= -\n",
|
||||
},
|
||||
{
|
||||
input: "Hello world!",
|
||||
ht: hash.MD5,
|
||||
wantHex: "86fb269d190d2c85f6e0468ceca42a20 -\n",
|
||||
wantBase64: "hvsmnRkNLIX24EaM7KQqIA== -\n",
|
||||
},
|
||||
{
|
||||
input: "Hello world!",
|
||||
ht: hash.SHA1,
|
||||
wantHex: "d3486ae9136e7856bc42212385ea797094475802 -\n",
|
||||
wantBase64: "00hq6RNueFa8QiEjhep5cJRHWAI= -\n",
|
||||
},
|
||||
} {
|
||||
reader.Reset(test.input)
|
||||
require.NoError(t, operations.HashSumStream(test.ht, false, in, out))
|
||||
assert.Equal(t, test.wantHex, out.String())
|
||||
_, _ = reader.Seek(0, io.SeekStart)
|
||||
out.Reset()
|
||||
require.NoError(t, operations.HashSumStream(test.ht, true, in, out))
|
||||
assert.Equal(t, test.wantBase64, out.String())
|
||||
out.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func TestSuffixName(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
ctx, ci := fs.AddConfig(ctx)
|
||||
|
||||
@@ -1,73 +0,0 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/lib/cacheroot"
|
||||
)
|
||||
|
||||
// Creates an OptionResume that will be passed to Put/Upload
|
||||
func createResumeOpt(ctx context.Context, f fs.Fs, remote string, src fs.Object) (resumeOpt *fs.OptionResume) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
cacheParent := config.GetCacheDir()
|
||||
resumeOpt = &fs.OptionResume{ID: "", Pos: 0, Src: src, F: f, Remote: remote, CacheCleaned: false, CacheDir: cacheParent}
|
||||
if ci.ResumeCutoff >= 0 {
|
||||
cacheDir, _, err := cacheroot.CreateCacheRoot(cacheParent, f.Name(), f.Root(), "resume")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
cacheFile := filepath.Join(cacheDir, remote)
|
||||
resumeID, hashName, hashState, attemptResume := readResumeCache(ctx, f, src, cacheFile)
|
||||
if attemptResume {
|
||||
fs.Debugf(f, "Existing resume cache file found: %s. A resume will now be attempted.", cacheFile)
|
||||
position, resumeErr := f.Features().Resume(ctx, remote, resumeID, hashName, hashState)
|
||||
if resumeErr != nil {
|
||||
fs.Errorf(src, "Resume canceled: %v", resumeErr)
|
||||
} else if position > int64(ci.ResumeCutoff) {
|
||||
resumeOpt.Pos = position
|
||||
resumeOpt.Hash = hashName
|
||||
}
|
||||
}
|
||||
}
|
||||
return resumeOpt
|
||||
}
|
||||
|
||||
// readResumeCache checks to see if a resume ID has been cached for the source object.
|
||||
// If it finds one it returns it along with true to signal a resume can be attempted
|
||||
func readResumeCache(ctx context.Context, f fs.Fs, src fs.Object, cacheName string) (resumeID, hashName, hashState string, attemptResume bool) {
|
||||
existingCacheFile, statErr := os.Open(cacheName)
|
||||
defer func() {
|
||||
_ = existingCacheFile.Close()
|
||||
}()
|
||||
if !os.IsNotExist(statErr) {
|
||||
rawData, readErr := ioutil.ReadAll(existingCacheFile)
|
||||
if readErr == nil {
|
||||
existingFingerprint, resumeID, hashName, hashState, unmarshalErr := unmarshalResumeJSON(ctx, rawData)
|
||||
if unmarshalErr != nil {
|
||||
fs.Debugf(f, "Failed to unmarshal Resume JSON: %s. Resume will not be attempted.", unmarshalErr.Error())
|
||||
} else if existingFingerprint != "" {
|
||||
// Check if the src object has changed by comparing new Fingerprint to Fingerprint in cache file
|
||||
fingerprint := fs.Fingerprint(ctx, src, true)
|
||||
if existingFingerprint == fingerprint {
|
||||
return resumeID, hashName, hashState, true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", "", "", false
|
||||
}
|
||||
|
||||
func unmarshalResumeJSON(ctx context.Context, data []byte) (fprint, id, hashName, hashState string, err error) {
|
||||
var resumedata fs.ResumeJSON
|
||||
err = json.Unmarshal(data, &resumedata)
|
||||
if err != nil {
|
||||
return "", "", "", "", err
|
||||
}
|
||||
return resumedata.Fingerprint, resumedata.ID, resumedata.HashName, resumedata.HashState, nil
|
||||
}
|
||||
@@ -1,163 +0,0 @@
|
||||
package operations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/rclone/rclone/fstest/mockobject"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type interruptReader struct {
|
||||
once sync.Once
|
||||
r io.Reader
|
||||
}
|
||||
|
||||
// Read sends an OS specific interrupt signal and then reads 1 byte at a time
|
||||
func (r *interruptReader) Read(b []byte) (n int, err error) {
|
||||
r.once.Do(func() {
|
||||
_ = sendInterrupt()
|
||||
})
|
||||
buffer := make([]byte, 1)
|
||||
n, err = r.r.Read(buffer)
|
||||
b[0] = buffer[0]
|
||||
// Simulate duration of a larger read without needing to test with a large file
|
||||
// Allows for the interrupt to be handled before Copy completes
|
||||
time.Sleep(time.Microsecond * 10)
|
||||
return n, err
|
||||
}
|
||||
|
||||
// this is a wrapper for a mockobject with a custom Open function
|
||||
//
|
||||
// n indicates the number of bytes to read before sending an
|
||||
// interrupt signal
|
||||
type resumeTestObject struct {
|
||||
fs.Object
|
||||
n int64
|
||||
}
|
||||
|
||||
// Open opens the file for read. Call Close() on the returned io.ReadCloser
|
||||
//
|
||||
// The Reader will signal an interrupt after reading n bytes, then continue to read 1 byte at a time.
|
||||
// If TestResume is successful, the interrupt will be processed and reads will be cancelled before running
|
||||
// out of bytes to read
|
||||
func (o *resumeTestObject) Open(ctx context.Context, options ...fs.OpenOption) (io.ReadCloser, error) {
|
||||
rc, err := o.Object.Open(ctx, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r := io.MultiReader(&io.LimitedReader{R: rc, N: o.n}, &interruptReader{r: rc})
|
||||
// Wrap with Close in a new readCloser
|
||||
rc = readCloser{Reader: r, Closer: rc}
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
func makeContent(t *testing.T, size int) []byte {
|
||||
content := make([]byte, size)
|
||||
r := rand.New(rand.NewSource(42))
|
||||
_, err := io.ReadFull(r, content)
|
||||
assert.NoError(t, err)
|
||||
return content
|
||||
}
|
||||
|
||||
func TestResume(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
ci := fs.GetConfig(ctx)
|
||||
ci.ResumeCutoff = 0
|
||||
|
||||
// Contents for the mock object
|
||||
var (
|
||||
// Test contents must be large enough that io.Copy does not complete during the first Rclone Copy operation
|
||||
resumeTestContents = makeContent(t, 1024)
|
||||
expectedContents = resumeTestContents
|
||||
)
|
||||
|
||||
// Create mockobjects with given breaks
|
||||
createTestSrc := func(interrupt int64) (fs.Object, fs.Object) {
|
||||
srcOrig := mockobject.New("potato").WithContent(resumeTestContents, mockobject.SeekModeNone)
|
||||
srcOrig.SetFs(r.Flocal)
|
||||
src := &resumeTestObject{
|
||||
Object: srcOrig,
|
||||
n: interrupt,
|
||||
}
|
||||
return src, srcOrig
|
||||
}
|
||||
|
||||
checkContents := func(obj fs.Object, contents string) {
|
||||
assert.NotNil(t, obj)
|
||||
assert.Equal(t, int64(len(contents)), obj.Size())
|
||||
|
||||
r, err := obj.Open(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, r)
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(r)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, contents, string(data))
|
||||
_ = r.Close()
|
||||
}
|
||||
|
||||
srcBreak, srcNoBreak := createTestSrc(2)
|
||||
|
||||
// Run first Copy only in a subprocess so that it can be interrupted without ending the test
|
||||
// adapted from: https://stackoverflow.com/questions/26225513/how-to-test-os-exit-scenarios-in-go
|
||||
if os.Getenv("RUNTEST") == "1" {
|
||||
remoteRoot := os.Getenv("REMOTEROOT")
|
||||
remoteFs, err := fs.NewFs(ctx, remoteRoot)
|
||||
require.NoError(t, err)
|
||||
_, _ = Copy(ctx, remoteFs, nil, "testdst", srcBreak)
|
||||
// This should never be reached as the subroutine should exit during Copy
|
||||
require.True(t, false, "Problem with test, first Copy operation should've been interrupted before completion")
|
||||
return
|
||||
}
|
||||
// Start the subprocess
|
||||
cmd := exec.Command(os.Args[0], "-test.run=TestResume")
|
||||
cmd.Env = append(os.Environ(), "RUNTEST=1", "REMOTEROOT="+r.Fremote.Root())
|
||||
cmd.Stdout = os.Stdout
|
||||
setupCmd(cmd)
|
||||
err := cmd.Run()
|
||||
|
||||
e, ok := err.(*exec.ExitError)
|
||||
|
||||
// Exit code after signal will be (128+signum) on Linux or (signum) on Windows
|
||||
expectedErrorString := "exit status 1"
|
||||
if runtime.GOOS == "windows" {
|
||||
expectedErrorString = "exit status 2"
|
||||
}
|
||||
assert.True(t, ok)
|
||||
assert.Contains(t, e.Error(), expectedErrorString)
|
||||
|
||||
var buf bytes.Buffer
|
||||
log.SetOutput(&buf)
|
||||
defer func() {
|
||||
log.SetOutput(os.Stderr)
|
||||
}()
|
||||
|
||||
// Start copy again, but with no breaks
|
||||
newDst, err := Copy(ctx, r.Fremote, nil, "testdst", srcNoBreak)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Checks to see if a resume was initiated
|
||||
// Resumed byte position can vary slightly depending how long it takes atexit to process the interrupt
|
||||
assert.True(t, strings.Contains(buf.String(), "Resuming at byte position: "), "The upload did not resume when restarted. Message: %q", buf.String())
|
||||
|
||||
checkContents(newDst, string(expectedContents))
|
||||
}
|
||||
@@ -18,22 +18,23 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs/rc/webgui"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
|
||||
"github.com/rclone/rclone/cmd/serve/httplib"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/cache"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
"github.com/rclone/rclone/fs/fshttp"
|
||||
"github.com/rclone/rclone/fs/list"
|
||||
"github.com/rclone/rclone/fs/rc"
|
||||
"github.com/rclone/rclone/fs/rc/jobs"
|
||||
"github.com/rclone/rclone/fs/rc/rcflags"
|
||||
"github.com/rclone/rclone/fs/rc/webgui"
|
||||
"github.com/rclone/rclone/lib/http/serve"
|
||||
"github.com/rclone/rclone/lib/random"
|
||||
"github.com/skratchdot/open-golang/open"
|
||||
)
|
||||
|
||||
var promHandler http.Handler
|
||||
@@ -42,13 +43,6 @@ var onlyOnceWarningAllowOrigin sync.Once
|
||||
func init() {
|
||||
rcloneCollector := accounting.NewRcloneCollector(context.Background())
|
||||
prometheus.MustRegister(rcloneCollector)
|
||||
|
||||
m := fshttp.NewMetrics("rclone")
|
||||
for _, c := range m.Collectors() {
|
||||
prometheus.MustRegister(c)
|
||||
}
|
||||
fshttp.DefaultMetrics = m
|
||||
|
||||
promHandler = promhttp.Handler()
|
||||
}
|
||||
|
||||
|
||||
2
go.mod
2
go.mod
@@ -10,7 +10,6 @@ require (
|
||||
github.com/Azure/azure-storage-blob-go v0.14.0
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.17
|
||||
github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c
|
||||
github.com/Max-Sum/base32768 v0.0.0-20191205131208-7937843c71d5 // indirect
|
||||
github.com/Unknwon/goconfig v0.0.0-20200908083735-df7de6a44db8
|
||||
github.com/a8m/tree v0.0.0-20210414114729-ce3525c5c2ef
|
||||
github.com/aalpar/deheap v0.0.0-20210914013432-0cc84d79dec3
|
||||
@@ -44,7 +43,6 @@ require (
|
||||
github.com/ncw/swift/v2 v2.0.1
|
||||
github.com/nsf/termbox-go v1.1.1
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pkg/sftp v1.13.4
|
||||
github.com/pmezard/go-difflib v1.0.0
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
|
||||
2
go.sum
2
go.sum
@@ -76,8 +76,6 @@ github.com/Azure/go-ntlmssp v0.0.0-20200615164410-66371956d46c/go.mod h1:chxPXzS
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/Julusian/godocdown v0.0.0-20170816220326-6d19f8ff2df8/go.mod h1:INZr5t32rG59/5xeltqoCJoNY7e5x/3xoY9WSWVWg74=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20191205131208-7937843c71d5 h1:w/vNc+SQRYKGWBHeDrzvvNttHwZEbSAP0kmTdORl4OI=
|
||||
github.com/Max-Sum/base32768 v0.0.0-20191205131208-7937843c71d5/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
|
||||
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
|
||||
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
package cacheroot
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
)
|
||||
|
||||
// CreateCacheRoot will derive and make a subsystem cache path.
|
||||
//
|
||||
// Returned root OS path is an absolute path with UNC prefix,
|
||||
// OS-specific path separators, and encoded with OS-specific encoder.
|
||||
//
|
||||
// Additionally it is returned as a standard path without UNC prefix,
|
||||
// with slash path separators, and standard (internal) encoding.
|
||||
//
|
||||
// Care is taken when creating OS paths so that the ':' separator
|
||||
// following a drive letter is not encoded, e.g. into unicode fullwidth colon.
|
||||
//
|
||||
// parentOSPath should contain an absolute local path in OS encoding.
|
||||
//
|
||||
// Note: instead of fs.Fs it takes name and root as plain strings
|
||||
// to prevent import loops due to dependency on the fs package.
|
||||
func CreateCacheRoot(parentOSPath, fsName, fsRoot, cacheName string) (rootOSPath, standardPath string, err error) {
|
||||
// Get a relative cache path representing the remote.
|
||||
relativeDir := fsRoot
|
||||
if runtime.GOOS == "windows" && strings.HasPrefix(relativeDir, `//?/`) {
|
||||
// Trim off the leading "//" to make the result
|
||||
// valid for appending to another path.
|
||||
relativeDir = relativeDir[2:]
|
||||
}
|
||||
relativeDir = fsName + "/" + relativeDir
|
||||
|
||||
// Derive and make the cache root directory
|
||||
relativeOSPath := filepath.FromSlash(encoder.OS.FromStandardPath(relativeDir))
|
||||
rootOSPath = file.UNCPath(filepath.Join(parentOSPath, cacheName, relativeOSPath))
|
||||
if err = os.MkdirAll(rootOSPath, 0700); err != nil {
|
||||
return "", "", errors.Wrapf(err, "failed to create %s cache directory", cacheName)
|
||||
}
|
||||
|
||||
parentStdPath := encoder.OS.ToStandardPath(filepath.ToSlash(parentOSPath))
|
||||
standardPath = fmt.Sprintf("%s/%s/%s", parentStdPath, cacheName, relativeDir)
|
||||
return rootOSPath, standardPath, nil
|
||||
}
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
@@ -47,6 +48,7 @@ func PreAllocate(size int64, out *os.File) (err error) {
|
||||
// Try the next flags combination
|
||||
index++
|
||||
atomic.StoreInt32(&fallocFlagsIndex, index)
|
||||
fs.Debugf(nil, "preAllocate: got error on fallocate, trying combination %d/%d: %v", index, len(fallocFlags), err)
|
||||
goto again
|
||||
|
||||
}
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -20,7 +21,6 @@ import (
|
||||
"github.com/rclone/rclone/fs/fserrors"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/lib/cacheroot"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/file"
|
||||
"github.com/rclone/rclone/vfs/vfscache/writeback"
|
||||
@@ -75,30 +75,43 @@ type AddVirtualFn func(remote string, size int64, isDir bool) error
|
||||
// This starts background goroutines which can be cancelled with the
|
||||
// context passed in.
|
||||
func New(ctx context.Context, fremote fs.Fs, opt *vfscommon.Options, avFn AddVirtualFn) (*Cache, error) {
|
||||
// Get cache root path.
|
||||
// We need it in two variants: OS path as an absolute path with UNC prefix,
|
||||
// OS-specific path separators, and encoded with OS-specific encoder. Standard path
|
||||
// without UNC prefix, with slash path separators, and standard (internal) encoding.
|
||||
// Care must be taken when creating OS paths so that the ':' separator following a
|
||||
// drive letter is not encoded (e.g. into unicode fullwidth colon).
|
||||
var err error
|
||||
parentOSPath := config.GetCacheDir() // Assuming string contains a local absolute path in OS encoding
|
||||
fsName, fsRoot := fremote.Name(), fremote.Root()
|
||||
dataOSPath, dataStdPath, err := cacheroot.CreateCacheRoot(parentOSPath, fsName, fsRoot, "vfs")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
fs.Debugf(nil, "vfs cache: root is %q", parentOSPath)
|
||||
parentPath := fromOSPath(parentOSPath)
|
||||
|
||||
// Get a relative cache path representing the remote.
|
||||
relativeDirPath := fremote.Root() // This is a remote path in standard encoding
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.HasPrefix(relativeDirPath, `//?/`) {
|
||||
relativeDirPath = relativeDirPath[2:] // Trim off the "//" for the result to be a valid when appending to another path
|
||||
}
|
||||
}
|
||||
fdata, err := fscache.Get(ctx, dataStdPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get data cache backend: %w", err)
|
||||
relativeDirPath = fremote.Name() + "/" + relativeDirPath
|
||||
relativeDirOSPath := toOSPath(relativeDirPath)
|
||||
|
||||
// Create cache root dirs
|
||||
var dataOSPath, metaOSPath string
|
||||
if dataOSPath, metaOSPath, err = createRootDirs(parentOSPath, relativeDirOSPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fs.Debugf(nil, "vfs cache: data root is %q", dataOSPath)
|
||||
|
||||
metaOSPath, metaStdPath, err := cacheroot.CreateCacheRoot(parentOSPath, fsName, fsRoot, "vfsMeta")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fmeta, err := fscache.Get(ctx, metaStdPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get metadata cache backend: %w", err)
|
||||
}
|
||||
fs.Debugf(nil, "vfs cache: metadata root is %q", metaOSPath)
|
||||
|
||||
// Create the cache object
|
||||
// Get (create) cache backends
|
||||
var fdata, fmeta fs.Fs
|
||||
if fdata, fmeta, err = getBackends(ctx, parentPath, relativeDirPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashType, hashOption := operations.CommonHash(ctx, fdata, fremote)
|
||||
|
||||
// Create the cache object
|
||||
c := &Cache{
|
||||
fremote: fremote,
|
||||
fcache: fdata,
|
||||
@@ -137,6 +150,23 @@ func createDir(dir string) error {
|
||||
return file.MkdirAll(dir, 0700)
|
||||
}
|
||||
|
||||
// createRootDir creates a single cache root directory
|
||||
func createRootDir(parentOSPath string, name string, relativeDirOSPath string) (path string, err error) {
|
||||
path = file.UNCPath(filepath.Join(parentOSPath, name, relativeDirOSPath))
|
||||
err = createDir(path)
|
||||
return
|
||||
}
|
||||
|
||||
// createRootDirs creates all cache root directories
|
||||
func createRootDirs(parentOSPath string, relativeDirOSPath string) (dataOSPath string, metaOSPath string, err error) {
|
||||
if dataOSPath, err = createRootDir(parentOSPath, "vfs", relativeDirOSPath); err != nil {
|
||||
err = fmt.Errorf("failed to create data cache directory: %w", err)
|
||||
} else if metaOSPath, err = createRootDir(parentOSPath, "vfsMeta", relativeDirOSPath); err != nil {
|
||||
err = fmt.Errorf("failed to create metadata cache directory: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// createItemDir creates the directory for named item in all cache roots
|
||||
//
|
||||
// Returns an os path for the data cache file.
|
||||
@@ -156,6 +186,22 @@ func (c *Cache) createItemDir(name string) (string, error) {
|
||||
return filepath.Join(parentPath, leaf), nil
|
||||
}
|
||||
|
||||
// getBackend gets a backend for a cache root dir
|
||||
func getBackend(ctx context.Context, parentPath string, name string, relativeDirPath string) (fs.Fs, error) {
|
||||
path := fmt.Sprintf("%s/%s/%s", parentPath, name, relativeDirPath)
|
||||
return fscache.Get(ctx, path)
|
||||
}
|
||||
|
||||
// getBackends gets backends for all cache root dirs
|
||||
func getBackends(ctx context.Context, parentPath string, relativeDirPath string) (fdata fs.Fs, fmeta fs.Fs, err error) {
|
||||
if fdata, err = getBackend(ctx, parentPath, "vfs", relativeDirPath); err != nil {
|
||||
err = fmt.Errorf("failed to get data cache backend: %w", err)
|
||||
} else if fmeta, err = getBackend(ctx, parentPath, "vfsMeta", relativeDirPath); err != nil {
|
||||
err = fmt.Errorf("failed to get metadata cache backend: %w", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// clean returns the cleaned version of name for use in the index map
|
||||
//
|
||||
// name should be a remote path not an osPath
|
||||
@@ -168,6 +214,11 @@ func clean(name string) string {
|
||||
return name
|
||||
}
|
||||
|
||||
// fromOSPath turns a OS path into a standard/remote path
|
||||
func fromOSPath(osPath string) string {
|
||||
return encoder.OS.ToStandardPath(filepath.ToSlash(osPath))
|
||||
}
|
||||
|
||||
// toOSPath turns a standard/remote path into an OS path
|
||||
func toOSPath(standardPath string) string {
|
||||
return filepath.FromSlash(encoder.OS.FromStandardPath(standardPath))
|
||||
|
||||
Reference in New Issue
Block a user