1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-30 16:24:01 +00:00

Compare commits

..

1 Commits

Author SHA1 Message Date
Nick Craig-Wood
c48424c0eb mega: skip undecodable FSNodes - #3740 - FIXME vendor patch DO NOT MERGE
Also test whether we have a decryption key for shared nodes
2019-11-19 20:15:55 +00:00
31 changed files with 526 additions and 1084 deletions

View File

@@ -102,7 +102,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v1
uses: actions/checkout@master
with:
path: ./src/github.com/${{ github.repository }}
@@ -211,7 +211,7 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v1
uses: actions/checkout@master
with:
path: ./src/github.com/${{ github.repository }}

View File

@@ -12,13 +12,11 @@ import (
gohash "hash"
"io"
"io/ioutil"
"math/rand"
"path"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/pkg/errors"
@@ -36,57 +34,46 @@ import (
// and optional metadata object. If it's present,
// meta object is named after the original file.
//
// The only supported metadata format is simplejson atm.
// It supports only per-file meta objects that are rudimentary,
// used mostly for consistency checks (lazily for performance reasons).
// Other formats can be developed that use an external meta store
// free of these limitations, but this needs some support from
// rclone core (eg. metadata store interfaces).
//
// The following types of chunks are supported:
// data and control, active and temporary.
// Chunk type is identified by matching chunk file name
// based on the chunk name format configured by user.
//
// Both data and control chunks can be either temporary (aka hidden)
// or active (non-temporary aka normal aka permanent).
// Both data and control chunks can be either temporary or
// active (non-temporary).
// An operation creates temporary chunks while it runs.
// By completion it removes temporary and leaves active chunks.
// By completion it removes temporary and leaves active
// (aka normal aka permanent) chunks.
//
// Temporary chunks have a special hardcoded suffix in addition
// to the configured name pattern.
// Temporary suffix includes so called transaction identifier
// (abbreviated as `xactID` below), a generic non-negative base-36 "number"
// Temporary (aka hidden) chunks have a special hardcoded suffix
// in addition to the configured name pattern. The suffix comes last
// to prevent name collisions with non-temporary chunks.
// Temporary suffix includes so called transaction number usually
// abbreviated as `xactNo` below, a generic non-negative integer
// used by parallel operations to share a composite object.
// Chunker also accepts the longer decimal temporary suffix (obsolete),
// which is transparently converted to the new format. In its maximum
// length of 13 decimals it makes a 7-digit base-36 number.
//
// Chunker can tell data chunks from control chunks by the characters
// located in the "hash placeholder" position of configured format.
// Data chunks have decimal digits there.
// Control chunks have in that position a short lowercase alphanumeric
// string (starting with a letter) prepended by underscore.
// Control chunks have a short lowercase literal prepended by underscore
// in that position.
//
// Metadata format v1 does not define any control chunk types,
// they are currently ignored aka reserved.
// In future they can be used to implement resumable uploads etc.
//
const (
ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
tempSuffixFormat = `_%04s`
tempSuffixRegStr = `_([0-9a-z]{4,9})`
tempSuffixRegOld = `\.\.tmp_([0-9]{10,13})`
ctrlTypeRegStr = `[a-z]{3,9}`
tempChunkFormat = `%s..tmp_%010d`
tempChunkRegStr = `\.\.tmp_([0-9]{10,19})`
)
var (
// regular expressions to validate control type and temporary suffix
ctrlTypeRegexp = regexp.MustCompile(`^` + ctrlTypeRegStr + `$`)
tempSuffixRegexp = regexp.MustCompile(`^` + tempSuffixRegStr + `$`)
ctrlTypeRegexp = regexp.MustCompile(`^` + ctrlTypeRegStr + `$`)
)
// Normally metadata is a small piece of JSON (about 100-300 bytes).
// The size of valid metadata must never exceed this limit.
// The size of valid metadata size must never exceed this limit.
// Current maximum provides a reasonable room for future extensions.
//
// Please refrain from increasing it, this can cause old rclone versions
@@ -114,9 +101,6 @@ const revealHidden = false
// Prevent memory overflow due to specially crafted chunk name
const maxSafeChunkNumber = 10000000
// Number of attempts to find unique transaction identifier
const maxTransactionProbes = 100
// standard chunker errors
var (
ErrChunkOverflow = errors.New("chunk number overflow")
@@ -129,6 +113,13 @@ const (
delFailed = 2 // move, then delete and try again if failed
)
// Note: metadata logic is tightly coupled with chunker code in many
// places, eg. in checks whether a file should have meta object or is
// eligible for chunking.
// If more metadata formats (or versions of a format) are added in future,
// it may be advisable to factor it into a "metadata strategy" interface
// similar to chunkingReader or linearReader below.
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
@@ -270,7 +261,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// detects a composite file because it finds the first chunk!
// (yet can't satisfy fstest.CheckListing, will ignore)
if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
firstChunkPath := f.makeChunkName(remotePath, 0, "", -1)
_, testErr := baseInfo.NewFs(baseName, firstChunkPath, baseConfig)
if testErr == fs.ErrorIsFile {
err = testErr
@@ -319,16 +310,12 @@ type Fs struct {
dataNameFmt string // name format of data chunks
ctrlNameFmt string // name format of control chunks
nameRegexp *regexp.Regexp // regular expression to match chunk names
xactIDRand *rand.Rand // generator of random transaction identifiers
xactIDMutex sync.Mutex // mutex for the source of randomness
opt Options // copy of Options
features *fs.Features // optional features
dirSort bool // reserved for future, ignored
}
// configure sets up chunker for given name format, meta format and hash type.
// It also seeds the source of random transaction identifiers.
// configure must be called only from NewFs or by unit tests.
// configure must be called only from NewFs or by unit tests
func (f *Fs) configure(nameFormat, metaFormat, hashType string) error {
if err := f.setChunkNameFormat(nameFormat); err != nil {
return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
@@ -339,10 +326,6 @@ func (f *Fs) configure(nameFormat, metaFormat, hashType string) error {
if err := f.setHashType(hashType); err != nil {
return err
}
randomSeed := time.Now().UnixNano()
f.xactIDRand = rand.New(rand.NewSource(randomSeed))
return nil
}
@@ -431,13 +414,13 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
}
reDataOrCtrl := fmt.Sprintf("(?:(%s)|_(%s))", reDigits, ctrlTypeRegStr)
// this must be non-greedy or else it could eat up temporary suffix
// this must be non-greedy or else it can eat up temporary suffix
const mainNameRegStr = "(.+?)"
strRegex := regexp.QuoteMeta(pattern)
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1)
strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
strRegex = fmt.Sprintf("^%s(?:%s)?$", strRegex, tempChunkRegStr)
f.nameRegexp = regexp.MustCompile(strRegex)
// craft printf formats for active data/control chunks
@@ -452,36 +435,34 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
return nil
}
// makeChunkName produces chunk name (or path) for a given file.
// makeChunkName produces chunk name (or path) for given file.
//
// filePath can be name, relative or absolute path of main file.
// mainPath can be name, relative or absolute path of main file.
//
// chunkNo must be a zero based index of data chunk.
// Negative chunkNo eg. -1 indicates a control chunk.
// ctrlType is type of control chunk (must be valid).
// ctrlType must be "" for data chunks.
//
// xactID is a transaction identifier. Empty xactID denotes active chunk,
// otherwise temporary chunk name is produced.
// xactNo is a transaction number.
// Negative xactNo eg. -1 indicates an active chunk,
// otherwise produce temporary chunk name.
//
func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
dir, parentName := path.Split(filePath)
var name, tempSuffix string
func (f *Fs) makeChunkName(mainPath string, chunkNo int, ctrlType string, xactNo int64) string {
dir, mainName := path.Split(mainPath)
var name string
switch {
case chunkNo >= 0 && ctrlType == "":
name = fmt.Sprintf(f.dataNameFmt, parentName, chunkNo+f.opt.StartFrom)
name = fmt.Sprintf(f.dataNameFmt, mainName, chunkNo+f.opt.StartFrom)
case chunkNo < 0 && ctrlTypeRegexp.MatchString(ctrlType):
name = fmt.Sprintf(f.ctrlNameFmt, parentName, ctrlType)
name = fmt.Sprintf(f.ctrlNameFmt, mainName, ctrlType)
default:
panic("makeChunkName: invalid argument") // must not produce something we can't consume
}
if xactID != "" {
tempSuffix = fmt.Sprintf(tempSuffixFormat, xactID)
if !tempSuffixRegexp.MatchString(tempSuffix) {
panic("makeChunkName: invalid argument")
}
if xactNo >= 0 {
name = fmt.Sprintf(tempChunkFormat, name, xactNo)
}
return dir + name + tempSuffix
return dir + name
}
// parseChunkName checks whether given file path belongs to
@@ -489,21 +470,20 @@ func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string
//
// filePath can be name, relative or absolute path of a file.
//
// Returned parentPath is path of the composite file owning the chunk.
// It's a non-empty string if valid chunk name is detected
// or "" if it's not a chunk.
// Returned mainPath is a non-empty string if valid chunk name
// is detected or "" if it's not a chunk.
// Other returned values depend on detected chunk type:
// data or control, active or temporary:
//
// data chunk - the returned chunkNo is non-negative and ctrlType is ""
// control chunk - the chunkNo is -1 and ctrlType is a non-empty string
// active chunk - the returned xactID is ""
// temporary chunk - the xactID is a non-empty string
func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ctrlType, xactID string) {
// control chunk - the chunkNo is -1 and ctrlType is non-empty string
// active chunk - the returned xactNo is -1
// temporary chunk - the xactNo is non-negative integer
func (f *Fs) parseChunkName(filePath string) (mainPath string, chunkNo int, ctrlType string, xactNo int64) {
dir, name := path.Split(filePath)
match := f.nameRegexp.FindStringSubmatch(name)
if match == nil || match[1] == "" {
return "", -1, "", ""
return "", -1, "", -1
}
var err error
@@ -514,26 +494,19 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
}
if chunkNo -= f.opt.StartFrom; chunkNo < 0 {
fs.Infof(f, "invalid data chunk number in file %q", name)
return "", -1, "", ""
return "", -1, "", -1
}
}
xactNo = -1
if match[4] != "" {
xactID = match[4]
}
if match[5] != "" {
// old-style temporary suffix
number, err := strconv.ParseInt(match[5], 10, 64)
if err != nil || number < 0 {
fs.Infof(f, "invalid old-style transaction number in file %q", name)
return "", -1, "", ""
if xactNo, err = strconv.ParseInt(match[4], 10, 64); err != nil || xactNo < 0 {
fs.Infof(f, "invalid transaction number in file %q", name)
return "", -1, "", -1
}
// convert old-style transaction number to base-36 transaction ID
xactID = fmt.Sprintf(tempSuffixFormat, strconv.FormatInt(number, 36))
xactID = xactID[1:] // strip leading underscore
}
parentPath = dir + match[1]
mainPath = dir + match[1]
ctrlType = match[3]
return
}
@@ -541,74 +514,17 @@ func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ct
// forbidChunk prints error message or raises error if file is chunk.
// First argument sets log prefix, use `false` to suppress message.
func (f *Fs) forbidChunk(o interface{}, filePath string) error {
if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
if mainPath, _, _, _ := f.parseChunkName(filePath); mainPath != "" {
if f.opt.FailHard {
return fmt.Errorf("chunk overlap with %q", parentPath)
return fmt.Errorf("chunk overlap with %q", mainPath)
}
if boolVal, isBool := o.(bool); !isBool || boolVal {
fs.Errorf(o, "chunk overlap with %q", parentPath)
fs.Errorf(o, "chunk overlap with %q", mainPath)
}
}
return nil
}
// newXactID produces a sufficiently random transaction identifier.
//
// The temporary suffix mask allows identifiers consisting of 4-9
// base-36 digits (ie. digits 0-9 or lowercase letters a-z).
// The identifiers must be unique between transactions running on
// the single file in parallel.
//
// Currently the function produces 6-character identifiers.
// Together with underscore this makes a 7-character temporary suffix.
//
// The first 4 characters isolate groups of transactions by time intervals.
// The maximum length of interval is base-36 "zzzz" ie. 1,679,615 seconds.
// The function rather takes a maximum prime closest to this number
// (see https://primes.utm.edu) as the interval length to better safeguard
// against repeating pseudo-random sequences in cases when rclone is
// invoked from a periodic scheduler like unix cron.
// Thus, the interval is slightly more than 19 days 10 hours 33 minutes.
//
// The remaining 2 base-36 digits (in the range from 0 to 1295 inclusive)
// are taken from the local random source.
// This provides about 0.1% collision probability for two parallel
// operations started at the same second and working on the same file.
//
// Non-empty filePath argument enables probing for existing temporary chunk
// to further eliminate collisions.
func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err error) {
const closestPrimeZzzzSeconds = 1679609
const maxTwoBase36Digits = 1295
unixSec := time.Now().Unix()
if unixSec < 0 {
unixSec = -unixSec // unlikely but the number must be positive
}
circleSec := unixSec % closestPrimeZzzzSeconds
first4chars := strconv.FormatInt(circleSec, 36)
for tries := 0; tries < maxTransactionProbes; tries++ {
f.xactIDMutex.Lock()
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
f.xactIDMutex.Unlock()
last2chars := strconv.FormatInt(randomness, 36)
xactID = fmt.Sprintf("%04s%02s", first4chars, last2chars)
if filePath == "" {
return
}
probeChunk := f.makeChunkName(filePath, 0, "", xactID)
_, probeErr := f.base.NewObject(ctx, probeChunk)
if probeErr != nil {
return
}
}
return "", fmt.Errorf("can't setup transaction for %s", filePath)
}
// List the objects and directories in dir into entries.
// The entries can be returned in any order but should be
// for a complete directory.
@@ -686,8 +602,8 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
switch entry := dirOrObject.(type) {
case fs.Object:
remote := entry.Remote()
if mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote); mainRemote != "" {
if xactID != "" {
if mainRemote, chunkNo, ctrlType, xactNo := f.parseChunkName(remote); mainRemote != "" {
if xactNo != -1 {
if revealHidden {
fs.Infof(f, "ignore temporary chunk %q", remote)
}
@@ -770,7 +686,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
//
// Please note that every NewObject invocation will scan the whole directory.
// Using here something like fs.DirCache might improve performance
// (yet making the logic more complex).
// (but will make logic more complex, though).
//
// Note that chunker prefers analyzing file names rather than reading
// the content of meta object assuming that directory scans are fast
@@ -836,8 +752,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if !strings.Contains(entryRemote, remote) {
continue // bypass regexp to save cpu
}
mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote)
if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactID != "" {
mainRemote, chunkNo, ctrlType, xactNo := f.parseChunkName(entryRemote)
if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactNo != -1 {
continue // skip non-conforming, temporary and control chunks
}
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
@@ -870,7 +786,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// This is either a composite object with metadata or a non-chunked
// file without metadata. Validate it and update the total data size.
// As an optimization, skip metadata reading here - we will call
// readMetadata lazily when needed (reading can be expensive).
// readMetadata lazily when needed.
if err := o.validate(); err != nil {
return nil, err
}
@@ -927,11 +843,14 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
}
}()
baseRemote := remote
xactID, errXact := f.newXactID(ctx, baseRemote)
if errXact != nil {
return nil, errXact
// Use system timer as a trivial source of transaction numbers,
// don't try hard to safeguard against chunk collisions between
// parallel transactions.
xactNo := time.Now().Unix()
if xactNo < 0 {
xactNo = -xactNo // unlikely but transaction number must be positive
}
baseRemote := remote
// Transfer chunks data
for c.chunkNo = 0; !c.done; c.chunkNo++ {
@@ -939,7 +858,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
return nil, ErrChunkOverflow
}
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactNo)
size := c.sizeLeft
if size > c.chunkSize {
size = c.chunkSize
@@ -1043,7 +962,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
// Rename data chunks from temporary to final names
for chunkNo, chunk := range c.chunks {
chunkRemote := f.makeChunkName(baseRemote, chunkNo, "", "")
chunkRemote := f.makeChunkName(baseRemote, chunkNo, "", -1)
chunkMoved, errMove := f.baseMove(ctx, chunk, chunkRemote, delFailed)
if errMove != nil {
return nil, errMove
@@ -1302,6 +1221,11 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
return f.newObject("", o, nil), nil
}
// Precision returns the precision of this Fs
func (f *Fs) Precision() time.Duration {
return f.base.Precision()
}
// Hashes returns the supported hash sets.
// Chunker advertises a hash type if and only if it can be calculated
// for files of any size, non-chunked or composite.
@@ -1689,8 +1613,8 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
//fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
if entryType == fs.EntryObject {
mainPath, _, _, xactID := f.parseChunkName(path)
if mainPath != "" && xactID == "" {
mainPath, _, _, xactNo := f.parseChunkName(path)
if mainPath != "" && xactNo == -1 {
path = mainPath
}
}
@@ -2139,7 +2063,7 @@ type metaSimpleJSON struct {
// Current implementation creates metadata in three cases:
// - for files larger than chunk size
// - if file contents can be mistaken as meta object
// - if consistent hashing is On but wrapped remote can't provide given hash
// - if consistent hashing is on but wrapped remote can't provide given hash
//
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 string) ([]byte, error) {
version := metadataVersion
@@ -2253,11 +2177,6 @@ func (f *Fs) String() string {
return fmt.Sprintf("Chunked '%s:%s'", f.name, f.root)
}
// Precision returns the precision of this Fs
func (f *Fs) Precision() time.Duration {
return f.base.Precision()
}
// Check the interfaces are satisfied
var (
_ fs.Fs = (*Fs)(nil)

View File

@@ -64,40 +64,35 @@ func testChunkNameFormat(t *testing.T, f *Fs) {
assert.Error(t, err)
}
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType, xactID string) {
gotChunkName := ""
assert.NotPanics(t, func() {
gotChunkName = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) must not panic", mainName, chunkNo, ctrlType, xactID)
if gotChunkName != "" {
assert.Equal(t, wantChunkName, gotChunkName)
}
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType string, xactNo int64) {
gotChunkName := f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
assert.Equal(t, wantChunkName, gotChunkName)
}
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType, xactID string) {
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType string, xactNo int64) {
assert.Panics(t, func() {
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) should panic", mainName, chunkNo, ctrlType, xactID)
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactNo)
}, "makeChunkName(%q,%d,%q,%d) should panic", mainName, chunkNo, ctrlType, xactNo)
}
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType, wantXactID string) {
gotMainName, gotChunkNo, gotCtrlType, gotXactID := f.parseChunkName(fileName)
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType string, wantXactNo int64) {
gotMainName, gotChunkNo, gotCtrlType, gotXactNo := f.parseChunkName(fileName)
assert.Equal(t, wantMainName, gotMainName)
assert.Equal(t, wantChunkNo, gotChunkNo)
assert.Equal(t, wantCtrlType, gotCtrlType)
assert.Equal(t, wantXactID, gotXactID)
assert.Equal(t, wantXactNo, gotXactNo)
}
const newFormatSupported = false // support for patterns not starting with base name (*)
// valid formats
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:\.\.tmp_([0-9]{10,19}))?$`)
if newFormatSupported {
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z][a-z0-9]{2,6})),(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z]{3,9})),(?:\.\.tmp_([0-9]{10,19}))?$`)
}
// invalid formats
@@ -116,223 +111,142 @@ func testChunkNameFormat(t *testing.T, f *Fs) {
// quick tests
if newFormatSupported {
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9][0-9a-z]{3,8})\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
f.opt.StartFrom = 1
assertMakeName(`part_fish_1`, "fish", 0, "", "")
assertParseName(`part_fish_43`, "fish", 42, "", "")
assertMakeName(`part_fish__locks`, "fish", -2, "locks", "")
assertParseName(`part_fish__locks`, "fish", -1, "locks", "")
assertMakeName(`part_fish__x2y`, "fish", -2, "x2y", "")
assertParseName(`part_fish__x2y`, "fish", -1, "x2y", "")
assertMakeName(`part_fish_3_0004`, "fish", 2, "", "4")
assertParseName(`part_fish_4_0005`, "fish", 3, "", "0005")
assertMakeName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -3, "blkinfo", "jj5fvo3wr")
assertParseName(`part_fish__blkinfo_zz9fvo3wr`, "fish", -1, "blkinfo", "zz9fvo3wr")
// old-style temporary suffix (parse only)
assertParseName(`part_fish_4..tmp_0000000011`, "fish", 3, "", "000b")
assertParseName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -1, "blkinfo", "jj5fvo3wr")
assertMakeName(`part_fish_1`, "fish", 0, "", -1)
assertParseName(`part_fish_43`, "fish", 42, "", -1)
assertMakeName(`part_fish_3..tmp_0000000004`, "fish", 2, "", 4)
assertParseName(`part_fish_4..tmp_0000000005`, "fish", 3, "", 5)
assertMakeName(`part_fish__locks`, "fish", -2, "locks", -3)
assertParseName(`part_fish__locks`, "fish", -1, "locks", -1)
assertMakeName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -3, "blockinfo", 1234567890123456789)
assertParseName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
}
// prepare format for long tests
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`)
f.opt.StartFrom = 2
// valid data chunks
assertMakeName(`fish.chunk.003`, "fish", 1, "", "")
assertParseName(`fish.chunk.003`, "fish", 1, "", "")
assertMakeName(`fish.chunk.021`, "fish", 19, "", "")
assertParseName(`fish.chunk.021`, "fish", 19, "", "")
assertMakeName(`fish.chunk.003`, "fish", 1, "", -1)
assertMakeName(`fish.chunk.011..tmp_0000054321`, "fish", 9, "", 54321)
assertMakeName(`fish.chunk.011..tmp_1234567890`, "fish", 9, "", 1234567890)
assertMakeName(`fish.chunk.1916..tmp_123456789012345`, "fish", 1914, "", 123456789012345)
// valid temporary data chunks
assertMakeName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertParseName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertMakeName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertParseName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertMakeName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertParseName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertMakeName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
assertParseName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
// valid temporary data chunks (old temporary suffix, only parse)
assertParseName(`fish.chunk.004..tmp_0000000047`, "fish", 2, "", "001b")
assertParseName(`fish.chunk.323..tmp_9994567890123`, "fish", 321, "", "3jjfvo3wr")
assertParseName(`fish.chunk.003`, "fish", 1, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021`, "fish", 2, "", 21)
assertParseName(`fish.chunk.021`, "fish", 19, "", -1)
assertParseName(`fish.chunk.323..tmp_1234567890123456789`, "fish", 321, "", 1234567890123456789)
// parsing invalid data chunk names
assertParseName(`fish.chunk.3`, "", -1, "", "")
assertParseName(`fish.chunk.001`, "", -1, "", "")
assertParseName(`fish.chunk.21`, "", -1, "", "")
assertParseName(`fish.chunk.-21`, "", -1, "", "")
assertParseName(`fish.chunk.3`, "", -1, "", -1)
assertParseName(`fish.chunk.001`, "", -1, "", -1)
assertParseName(`fish.chunk.21`, "", -1, "", -1)
assertParseName(`fish.chunk.-21`, "", -1, "", -1)
assertParseName(`fish.chunk.004abcd`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk.004__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk.004_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk.004_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk.004_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk.004_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk.004_12.3`, "", -1, "", "") // punctuation not allowed
// parsing invalid data chunk names (old temporary suffix)
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", "")
assertParseName(`fish.chunk.323..tmp_12345678901234`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", "")
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", -1)
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", -1)
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", -1)
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", -1)
// valid control chunks
assertMakeName(`fish.chunk._info`, "fish", -1, "info", "")
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", "")
assertMakeName(`fish.chunk._blkinfo`, "fish", -3, "blkinfo", "")
assertMakeName(`fish.chunk._x2y`, "fish", -4, "x2y", "")
assertMakeName(`fish.chunk._info`, "fish", -1, "info", -1)
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", -1)
assertMakeName(`fish.chunk._blockinfo`, "fish", -3, "blockinfo", -1)
assertParseName(`fish.chunk._info`, "fish", -1, "info", "")
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", "")
assertParseName(`fish.chunk._blkinfo`, "fish", -1, "blkinfo", "")
assertParseName(`fish.chunk._x2y`, "fish", -1, "x2y", "")
assertParseName(`fish.chunk._info`, "fish", -1, "info", -1)
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", -1)
assertParseName(`fish.chunk._blockinfo`, "fish", -1, "blockinfo", -1)
// valid temporary control chunks
assertMakeName(`fish.chunk._info_0001`, "fish", -1, "info", "1")
assertMakeName(`fish.chunk._locks_4321`, "fish", -2, "locks", "4321")
assertMakeName(`fish.chunk._uploads_abcd`, "fish", -3, "uploads", "abcd")
assertMakeName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -4, "blkinfo", "xyzabcdef")
assertMakeName(`fish.chunk._x2y_1aaa`, "fish", -5, "x2y", "1aaa")
assertMakeName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
assertMakeName(`fish.chunk._locks..tmp_0000054321`, "fish", -2, "locks", 54321)
assertMakeName(`fish.chunk._uploads..tmp_0000000000`, "fish", -3, "uploads", 0)
assertMakeName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -4, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk._info_0001`, "fish", -1, "info", "0001")
assertParseName(`fish.chunk._locks_4321`, "fish", -1, "locks", "4321")
assertParseName(`fish.chunk._uploads_9abc`, "fish", -1, "uploads", "9abc")
assertParseName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -1, "blkinfo", "xyzabcdef")
assertParseName(`fish.chunk._x2y_1aaa`, "fish", -1, "x2y", "1aaa")
// valid temporary control chunks (old temporary suffix, parse only)
assertParseName(`fish.chunk._info..tmp_0000000047`, "fish", -1, "info", "001b")
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", "15wx")
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", "0000")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123`, "fish", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._x2y..tmp_0000000000`, "fish", -1, "x2y", "0000")
assertParseName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21)
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", 54321)
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", 0)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789)
// parsing invalid control chunk names
assertParseName(`fish.chunk.metadata`, "", -1, "", "") // must be prepended by underscore
assertParseName(`fish.chunk.info`, "", -1, "", "")
assertParseName(`fish.chunk.locks`, "", -1, "", "")
assertParseName(`fish.chunk.uploads`, "", -1, "", "")
assertParseName(`fish.chunk.info`, "", -1, "", -1)
assertParseName(`fish.chunk.locks`, "", -1, "", -1)
assertParseName(`fish.chunk.uploads`, "", -1, "", -1)
assertParseName(`fish.chunk.blockinfo`, "", -1, "", -1)
assertParseName(`fish.chunk._os`, "", -1, "", "") // too short
assertParseName(`fish.chunk._metadata`, "", -1, "", "") // too long
assertParseName(`fish.chunk._blockinfo`, "", -1, "", "") // way too long
assertParseName(`fish.chunk._4me`, "", -1, "", "") // cannot start with digit
assertParseName(`fish.chunk._567`, "", -1, "", "") // cannot be all digits
assertParseName(`fish.chunk._me_ta`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._in-fo`, "", -1, "", "")
assertParseName(`fish.chunk._.bin`, "", -1, "", "")
assertParseName(`fish.chunk._.2xy`, "", -1, "", "")
assertParseName(`fish.chunk._os`, "", -1, "", -1)
assertParseName(`fish.chunk._futuredata`, "", -1, "", -1)
assertParseName(`fish.chunk._me_ta`, "", -1, "", -1)
assertParseName(`fish.chunk._in-fo`, "", -1, "", -1)
assertParseName(`fish.chunk._.bin`, "", -1, "", -1)
// parsing invalid temporary control chunks
assertParseName(`fish.chunk._blkinfo1234`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk._info__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk._info_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk._info_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk._info_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk._info_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk._info_12.3`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", "")
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", "")
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", -1)
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", -1)
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", -1)
// short control chunk names: 3 letters ok, 1-2 letters not allowed
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", "")
assertParseName(`fish.chunk._int`, "fish", -1, "int", "")
assertMakeNamePanics("fish", -1, "in", "")
assertMakeNamePanics("fish", -1, "up", "4")
assertMakeNamePanics("fish", -1, "x", "")
assertMakeNamePanics("fish", -1, "c", "1z")
assertMakeName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0")
assertMakeName(`fish.chunk._ext_0026`, "fish", -1, "ext", "26")
assertMakeName(`fish.chunk._int_0abc`, "fish", -1, "int", "abc")
assertMakeName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0000")
assertParseName(`fish.chunk._ext_0026`, "fish", -1, "ext", "0026")
assertParseName(`fish.chunk._int_0abc`, "fish", -1, "int", "0abc")
assertParseName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", -1)
assertMakeName(`fish.chunk._ext..tmp_0000000021`, "fish", -1, "ext", 21)
assertParseName(`fish.chunk._int`, "fish", -1, "int", -1)
assertParseName(`fish.chunk._int..tmp_0000000021`, "fish", -1, "int", 21)
assertMakeNamePanics("fish", -1, "in", -1)
assertMakeNamePanics("fish", -1, "up", 4)
assertMakeNamePanics("fish", -1, "x", -1)
assertMakeNamePanics("fish", -1, "c", 4)
// base file name can sometimes look like a valid chunk name
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", "")
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", "")
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", -1)
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000021`, "fish.chunk.003", 3, "", 21)
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", -1)
assertParseName(`fish.chunk.003.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.003", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", -1)
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", "")
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", "")
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000021`, "fish.chunk.004..tmp_0000000021", 3, "", 21)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.004..tmp_0000000021", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", -1)
// base file name looking like a valid chunk name (old temporary suffix)
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000022`, "fish.chunk.003", 3, "", "000m")
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000023`, "fish.chunk._info", 3, "", "000n")
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", -1)
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000021`, "fish.chunk._info", 3, "", 21)
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", -1)
assertParseName(`fish.chunk._info.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._info", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
assertParseName(`fish.chunk.003.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.003", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._info.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._info", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000025`, "fish.chunk.004..tmp_0000000021", 3, "", "000p")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.004..tmp_0000000021", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.004`, "fish.chunk._blkinfo..tmp_9994567890123", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.005..tmp_0000000026`, "fish.chunk._blkinfo..tmp_9994567890123", 3, "", "000q")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blkinfo..tmp_1234567890123456789", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.005..tmp_0000000022`, "fish.chunk._blkinfo..tmp_1234567890123456789", 3, "", "000m")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blockinfo..tmp_1234567890123456789", 2, "", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.005..tmp_0000000021`, "fish.chunk._blockinfo..tmp_1234567890123456789", 3, "", 21)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "info", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
// attempts to make invalid chunk names
assertMakeNamePanics("fish", -1, "", "") // neither data nor control
assertMakeNamePanics("fish", 0, "info", "") // both data and control
assertMakeNamePanics("fish", -1, "metadata", "") // control type too long
assertMakeNamePanics("fish", -1, "blockinfo", "") // control type way too long
assertMakeNamePanics("fish", -1, "2xy", "") // first digit not allowed
assertMakeNamePanics("fish", -1, "123", "") // all digits not allowed
assertMakeNamePanics("fish", -1, "Meta", "") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", "") // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", "")
assertMakeNamePanics("fish", -1, "info_", "")
assertMakeNamePanics("fish", -2, ".bind", "")
assertMakeNamePanics("fish", -2, "bind.", "")
assertMakeNamePanics("fish", -1, "", -1) // neither data nor control
assertMakeNamePanics("fish", 0, "info", -1) // both data and control
assertMakeNamePanics("fish", -1, "futuredata", -1) // control type too long
assertMakeNamePanics("fish", -1, "123", -1) // digits not allowed
assertMakeNamePanics("fish", -1, "Meta", -1) // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", -1) // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", -1)
assertMakeNamePanics("fish", -1, "info_", -1)
assertMakeNamePanics("fish", -2, ".bind", -3)
assertMakeNamePanics("fish", -2, "bind.", -3)
assertMakeNamePanics("fish", -1, "", "1") // neither data nor control
assertMakeNamePanics("fish", 0, "info", "23") // both data and control
assertMakeNamePanics("fish", -1, "metadata", "45") // control type too long
assertMakeNamePanics("fish", -1, "blockinfo", "7") // control type way too long
assertMakeNamePanics("fish", -1, "2xy", "abc") // first digit not allowed
assertMakeNamePanics("fish", -1, "123", "def") // all digits not allowed
assertMakeNamePanics("fish", -1, "Meta", "mnk") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", "xyz") // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", "5678")
assertMakeNamePanics("fish", -1, "info_", "999")
assertMakeNamePanics("fish", -2, ".bind", "0")
assertMakeNamePanics("fish", -2, "bind.", "0")
assertMakeNamePanics("fish", 0, "", "1234567890") // temporary suffix too long
assertMakeNamePanics("fish", 0, "", "123F4") // uppercase not allowed
assertMakeNamePanics("fish", 0, "", "123.") // punctuation not allowed
assertMakeNamePanics("fish", 0, "", "_123")
assertMakeNamePanics("fish", -1, "", 1) // neither data nor control
assertMakeNamePanics("fish", 0, "info", 12) // both data and control
assertMakeNamePanics("fish", -1, "futuredata", 45) // control type too long
assertMakeNamePanics("fish", -1, "123", 123) // digits not allowed
assertMakeNamePanics("fish", -1, "Meta", 456) // only lower case letters allowed
assertMakeNamePanics("fish", -1, "in-fo", 321) // punctuation not allowed
assertMakeNamePanics("fish", -1, "_info", 15678)
assertMakeNamePanics("fish", -1, "info_", 999)
assertMakeNamePanics("fish", -2, ".bind", 0)
assertMakeNamePanics("fish", -2, "bind.", 0)
}
func testSmallFileInternals(t *testing.T, f *Fs) {
@@ -469,7 +383,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
billyObj := newFile("billy")
billyChunkName := func(chunkNo int) string {
return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
return f.makeChunkName(billyObj.Remote(), chunkNo, "", -1)
}
err := f.Mkdir(ctx, billyChunkName(1))
@@ -519,7 +433,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
// recreate billy in case it was anyhow corrupted
willyObj := newFile("willy")
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", -1)
f.opt.FailHard = false
willyChunk, err := f.NewObject(ctx, willyChunkName)
f.opt.FailHard = true
@@ -570,7 +484,7 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
f.opt.FailHard = false
file, fileName := newFile(f, "wreaker")
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", -1))
f.opt.FailHard = false
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
@@ -618,7 +532,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
filename := path.Join(dir, name)
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
part := putFile(f.base, f.makeChunkName(filename, 0, "", -1), "oops", "", true)
_ = putFile(f, filename, contents, "upload "+description, false)
obj, err := f.NewObject(ctx, filename)

View File

@@ -326,17 +326,6 @@ Photos folder" option in your google drive settings. You can then copy
or move the photos locally and use the date the image was taken
(created) set as the modification date.`,
Advanced: true,
}, {
Name: "use_shared_date",
Default: false,
Help: `Use date file was shared instead of modified date.
Note that, as with "--drive-use-created-date", this flag may have
unexpected consequences when uploading/downloading files.
If both this flag and "--drive-use-created-date" are set, the created
date is used.`,
Advanced: true,
}, {
Name: "list_chunk",
Default: 1000,
@@ -474,7 +463,6 @@ type Options struct {
ImportExtensions string `config:"import_formats"`
AllowImportNameChange bool `config:"allow_import_name_change"`
UseCreatedDate bool `config:"use_created_date"`
UseSharedDate bool `config:"use_shared_date"`
ListChunk int64 `config:"list_chunk"`
Impersonate string `config:"impersonate"`
AlternateExport bool `config:"alternate_export"`
@@ -706,9 +694,6 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
if f.opt.AuthOwnerOnly {
fields += ",owners"
}
if f.opt.UseSharedDate {
fields += ",sharedWithMeTime"
}
if f.opt.SkipChecksumGphotos {
fields += ",spaces"
}
@@ -1110,8 +1095,6 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
modifiedDate := info.ModifiedTime
if f.opt.UseCreatedDate {
modifiedDate = info.CreatedTime
} else if f.opt.UseSharedDate && info.SharedWithMeTime != "" {
modifiedDate = info.SharedWithMeTime
}
size := info.Size
if f.opt.SizeAsQuota {
@@ -1480,14 +1463,6 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if iErr != nil {
return nil, iErr
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx)
if err != nil {
return nil, err
}
}
return entries, nil
}
@@ -1625,7 +1600,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
out := make(chan error, fs.Config.Checkers)
list := walk.NewListRHelper(callback)
overflow := []listREntry{}
listed := 0
cb := func(entry fs.DirEntry) error {
mu.Lock()
@@ -1638,7 +1612,6 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
overflow = append(overflow, listREntry{d.ID(), d.Remote()})
}
}
listed++
return list.Add(entry)
}
@@ -1695,21 +1668,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return err
}
err = list.Flush()
if err != nil {
return err
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f.isTeamDrive && listed == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx)
if err != nil {
return err
}
}
return nil
return list.Flush()
}
// itemToDirEntry converts a drive.File to a fs.DirEntry.
@@ -2082,30 +2041,9 @@ func (f *Fs) CleanUp(ctx context.Context) error {
return nil
}
// teamDriveOK checks to see if we can access the team drive
func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
if !f.isTeamDrive {
return nil
}
var td *drive.Drive
err = f.pacer.Call(func() (bool, error) {
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "failed to get Team/Shared Drive info")
}
fs.Debugf(f, "read info from team drive %q", td.Name)
return err
}
// About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if f.isTeamDrive {
err := f.teamDriveOK(ctx)
if err != nil {
return nil, err
}
// Teamdrives don't appear to have a usage API so just return empty
return &fs.Usage{}, nil
}

View File

@@ -46,26 +46,13 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// LoginToken is struct representing the login token generated in the WebUI
type LoginToken struct {
Username string `json:"username"`
Realm string `json:"realm"`
WellKnownLink string `json:"well_known_link"`
AuthToken string `json:"auth_token"`
}
// TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form.
type TokenJSON struct {
AccessToken string `json:"access_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
RefreshExpiresIn int32 `json:"refresh_expires_in"`
RefreshToken string `json:"refresh_token"`
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
NotBeforePolicy int32 `json:"not-before-policy"`
SessionState string `json:"session_state"`
Scope string `json:"scope"`
AccessToken string `json:"access_token"`
TokenType string `json:"token_type"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
}
// JSON structures returned by new API

View File

@@ -4,13 +4,12 @@ import (
"bytes"
"context"
"crypto/md5"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"os"
@@ -26,6 +25,7 @@ import (
"github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp"
@@ -41,25 +41,29 @@ const enc = encodings.JottaCloud
// Globals
const (
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
tokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
cachePrefix = "rclone-jcmd5-"
configDevice = "device"
configMountpoint = "mountpoint"
configVersion = 1
minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta"
defaultMountpoint = "Archive"
rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/"
tokenURL = "https://api.jottacloud.com/auth/v1/token"
registerURL = "https://api.jottacloud.com/auth/v1/register"
cachePrefix = "rclone-jcmd5-"
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40"
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2"
configClientID = "client_id"
configClientSecret = "client_secret"
configDevice = "device"
configMountpoint = "mountpoint"
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
)
var (
// Description of how to auth for this app for a personal account
oauthConfig = &oauth2.Config{
ClientID: "jottacli",
Endpoint: oauth2.Endpoint{
AuthURL: tokenURL,
TokenURL: tokenURL,
@@ -77,39 +81,43 @@ func init() {
NewFs: NewFs,
Config: func(name string, m configmap.Mapper) {
ctx := context.TODO()
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm(false) {
return
}
}
refresh := false
if version, ok := m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
srv := rest.NewClient(fshttp.NewClient(fs.Config))
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n")
if config.Confirm(false) {
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil {
log.Fatalf("Failed to parse config version - corrupted config")
log.Fatalf("Failed to register device: %v", err)
}
refresh = ver != configVersion
} else {
refresh = true
m.Set(configClientID, deviceRegistration.ClientID)
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret))
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
}
if refresh {
fmt.Printf("Config outdated - refreshing\n")
} else {
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm(false) {
return
}
}
clientID, ok := m.Get(configClientID)
if !ok {
clientID = rcloneClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
clientConfig := *fs.Config
clientConfig.UserAgent = "JottaCli 0.6.18626 windows-amd64"
srv := rest.NewClient(fshttp.NewClient(&clientConfig))
fmt.Printf("Username> ")
username := config.ReadLine()
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.")
fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
token, err := doAuth(ctx, srv, loginToken)
token, err := doAuth(ctx, srv, username, password)
if err != nil {
log.Fatalf("Failed to get oauth token: %s", err)
}
@@ -135,8 +143,6 @@ func init() {
m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint)
}
m.Set("configVersion", strconv.Itoa(configVersion))
},
Options: []fs.Option{{
Name: "md5_memory_limit",
@@ -243,51 +249,67 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
}
// doAuth runs the actual token request
func doAuth(ctx context.Context, srv *rest.Client, loginTokenBase64 string) (token oauth2.Token, err error) {
loginTokenBytes, err := base64.StdEncoding.DecodeString(loginTokenBase64)
if err != nil {
return token, err
// registerDevice register a new device for use with the jottacloud API
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
// random generator to generate random device names
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
randonDeviceNamePartLength := 21
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
for i := range randomDeviceNamePart {
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
}
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
var loginToken api.LoginToken
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
err = decoder.Decode(&loginToken)
if err != nil {
return token, err
}
values := url.Values{}
values.Set("device_id", randomDeviceName)
// we don't seem to need any data from this link but the API is not happy if skip it
opts := rest.Opts{
Method: "GET",
RootURL: loginToken.WellKnownLink,
NoResponse: true,
}
_, err = srv.Call(ctx, &opts)
if err != nil {
return token, err
Method: "POST",
RootURL: registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
}
var deviceRegistration *api.DeviceRegistrationResponse
_, err = srv.CallJSON(ctx, &opts, nil, &deviceRegistration)
return deviceRegistration, err
}
// doAuth runs the actual token request
func doAuth(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) {
// prepare out token request with username and password
values := url.Values{}
values.Set("client_id", "jottacli")
values.Set("grant_type", "password")
values.Set("password", loginToken.AuthToken)
values.Set("scope", "offline_access+openid")
values.Set("username", loginToken.Username)
values.Encode()
opts = rest.Opts{
values.Set("grant_type", "PASSWORD")
values.Set("password", password)
values.Set("username", username)
values.Set("client_id", oauthConfig.ClientID)
values.Set("client_secret", oauthConfig.ClientSecret)
opts := rest.Opts{
Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded",
Body: strings.NewReader(values.Encode()),
Parameters: values,
}
// do the first request
var jsonToken api.TokenJSON
_, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil {
return token, err
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header
if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ")
authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
}
}
}
token.AccessToken = jsonToken.AccessToken
@@ -449,6 +471,29 @@ func (f *Fs) filePath(file string) string {
return urlPathEscape(f.filePathRaw(file))
}
// Jottacloud requires the grant_type 'refresh_token' string
// to be uppercase and throws a 400 Bad Request if we use the
// lower case used by the oauth2 module
//
// This filter catches all refresh requests, reads the body,
// changes the case and then sends it on
func grantTypeFilter(req *http.Request) {
if tokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil {
return
}
_ = req.Body.Close()
// make the refresh token upper case
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close())
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
}
}
// NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO()
@@ -459,23 +504,30 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err
}
var ok bool
var version string
if version, ok = m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
if err != nil {
return nil, errors.New("Failed to parse config version")
}
ok = ver == configVersion
}
if !ok {
return nil, errors.New("Outdated config - please reconfigure this backend")
}
rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root)
clientID, ok := m.Get(configClientID)
if !ok {
clientID = rcloneClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
// the oauth client for the api servers needs
// a filter to fix the grant_type issues (see above)
baseClient := fshttp.NewClient(fs.Config)
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
do.SetRequestFilter(grantTypeFilter)
} else {
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
if err != nil {
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")

View File

@@ -820,10 +820,10 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
return 0, errors.Wrap(err, "can't read status of source file while transferring")
}
if file.o.size != fi.Size() {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size()))
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size())
}
if !file.o.modTime.Equal(fi.ModTime()) {
return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime()))
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime())
}
}

View File

@@ -715,16 +715,6 @@ file you can stream upload is 48GB. If you wish to stream upload
larger files then you will need to increase chunk_size.`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy
Any files larger than this that need to be server side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 5GB.`,
Default: fs.SizeSuffix(maxSizeForCopy),
Advanced: true,
}, {
Name: "disable_checksum",
Help: "Don't store MD5 checksum with object metadata",
@@ -819,7 +809,6 @@ type Options struct {
SSEKMSKeyID string `config:"sse_kms_key_id"`
StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableChecksum bool `config:"disable_checksum"`
SessionToken string `config:"session_token"`
@@ -1398,12 +1387,6 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
} else {
marker = resp.NextMarker
}
if urlEncodeListings {
*marker, err = url.QueryUnescape(*marker)
if err != nil {
return errors.Wrapf(err, "failed to URL decode NextMarker %q", *marker)
}
}
}
return nil
}
@@ -1670,7 +1653,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
req.StorageClass = &f.opt.StorageClass
}
if srcSize >= int64(f.opt.CopyCutoff) {
if srcSize >= int64(f.opt.UploadCutoff) {
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize)
}
return f.pacer.Call(func() (bool, error) {
@@ -1683,8 +1666,8 @@ func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
start := partIndex * partSize
var ends string
if partIndex == numParts-1 {
if totalSize >= 1 {
ends = strconv.FormatInt(totalSize-1, 10)
if totalSize >= 0 {
ends = strconv.FormatInt(totalSize, 10)
}
} else {
ends = strconv.FormatInt(start+partSize-1, 10)
@@ -1721,7 +1704,7 @@ func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBuck
}
}()
partSize := int64(f.opt.CopyCutoff)
partSize := int64(f.opt.ChunkSize)
numParts := (srcSize-1)/partSize + 1
var parts []*s3.CompletedPart
@@ -1949,6 +1932,11 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
}
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
if o.bytes >= maxSizeForCopy {
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
return nil
}
// Can't update metadata here, so return this error to force a recopy
if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
return fs.ErrorCantSetModTime
@@ -2052,7 +2040,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// read the md5sum if available for non multpart and if
// disable checksum isn't present.
var md5sum string
if !multipart && !o.fs.opt.DisableChecksum {
if !multipart || !o.fs.opt.DisableChecksum {
hash, err := src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(hash) {
hashBytes, err := hex.DecodeString(hash)

View File

@@ -156,11 +156,6 @@ Home directory can be found in a shared folder called "home"
Default: "",
Help: "The command used to read sha1 hashes. Leave blank for autodetect.",
Advanced: true,
}, {
Name: "skip_links",
Default: false,
Help: "Set to skip any symlinks and any other non regular files.",
Advanced: true,
}},
}
fs.Register(fsi)
@@ -182,7 +177,6 @@ type Options struct {
SetModTime bool `config:"set_modtime"`
Md5sumCommand string `config:"md5sum_command"`
Sha1sumCommand string `config:"sha1sum_command"`
SkipLinks bool `config:"skip_links"`
}
// Fs stores the interface to the remote SFTP files
@@ -606,16 +600,12 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
remote := path.Join(dir, info.Name())
// If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to
// pick up the size and type of the destination, instead of the size and type of the symlink.
if !info.Mode().IsRegular() && !info.IsDir() {
if f.opt.SkipLinks {
// skip non regular file if SkipLinks is set
continue
}
if !info.Mode().IsRegular() {
oldInfo := info
info, err = f.stat(remote)
if err != nil {
if !os.IsNotExist(err) {
fs.Errorf(remote, "stat of non-regular file failed: %v", err)
fs.Errorf(remote, "stat of non-regular file/dir failed: %v", err)
}
info = oldInfo
}

View File

@@ -7,7 +7,6 @@ import (
"context"
"fmt"
"io"
"net/url"
"path"
"strconv"
"strings"
@@ -953,8 +952,8 @@ func (o *Object) isStaticLargeObject() (bool, error) {
return o.hasHeader("X-Static-Large-Object")
}
func (o *Object) isInContainerVersioning(container string) (bool, error) {
_, headers, err := o.fs.c.Container(container)
func (o *Object) isInContainerVersioning() (bool, error) {
_, headers, err := o.fs.c.Container(o.fs.root)
if err != nil {
return false, err
}
@@ -1131,10 +1130,6 @@ func (o *Object) getSegmentsDlo() (segmentsContainer string, prefix string, err
return
}
dirManifest := o.headers["X-Object-Manifest"]
dirManifest, err = url.PathUnescape(dirManifest)
if err != nil {
return
}
delimiter := strings.Index(dirManifest, "/")
if len(dirManifest) == 0 || delimiter < 0 {
err = errors.New("Missing or wrong structure of manifest of Dynamic large object")
@@ -1346,7 +1341,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
}
// ...then segments if required
if isDynamicLargeObject {
isInContainerVersioning, err := o.isInContainerVersioning(container)
isInContainerVersioning, err := o.isInContainerVersioning()
if err != nil {
return err
}

View File

@@ -113,8 +113,7 @@ type Fs struct {
canStream bool // set if can stream
useOCMtime bool // set if can use X-OC-Mtime
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
hasMD5 bool // set if can use owncloud style checksums for MD5
hasSHA1 bool // set if can use owncloud style checksums for SHA1
hasChecksums bool // set if can use owncloud style checksums
}
// Object describes a webdav object
@@ -216,7 +215,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
},
NoRedirect: true,
}
if f.hasMD5 || f.hasSHA1 {
if f.hasChecksums {
opts.Body = bytes.NewBuffer(owncloudProps)
}
var result api.Multistatus
@@ -384,7 +383,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// sets the BearerToken up
func (f *Fs) setBearerToken(token string) {
f.opt.BearerToken = token
f.srv.SetHeader("Authorization", "Bearer "+token)
f.srv.SetHeader("Authorization", "BEARER "+token)
}
// fetch the bearer token using the command
@@ -431,12 +430,11 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
f.canStream = true
f.precision = time.Second
f.useOCMtime = true
f.hasMD5 = true
f.hasSHA1 = true
f.hasChecksums = true
case "nextcloud":
f.precision = time.Second
f.useOCMtime = true
f.hasSHA1 = true
f.hasChecksums = true
case "sharepoint":
// To mount sharepoint, two Cookies are required
// They have to be set instead of BasicAuth
@@ -538,7 +536,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
"Depth": depth,
},
}
if f.hasMD5 || f.hasSHA1 {
if f.hasChecksums {
opts.Body = bytes.NewBuffer(owncloudProps)
}
var result api.Multistatus
@@ -947,14 +945,10 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
hashes := hash.Set(hash.None)
if f.hasMD5 {
hashes.Add(hash.MD5)
if f.hasChecksums {
return hash.NewHashSet(hash.MD5, hash.SHA1)
}
if f.hasSHA1 {
hashes.Add(hash.SHA1)
}
return hashes
return hash.Set(hash.None)
}
// About gets quota information
@@ -1021,11 +1015,13 @@ func (o *Object) Remote() string {
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if t == hash.MD5 && o.fs.hasMD5 {
return o.md5, nil
}
if t == hash.SHA1 && o.fs.hasSHA1 {
return o.sha1, nil
if o.fs.hasChecksums {
switch t {
case hash.SHA1:
return o.sha1, nil
case hash.MD5:
return o.md5, nil
}
}
return "", hash.ErrUnsupported
}
@@ -1046,14 +1042,10 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
o.hasMetaData = true
o.size = info.Size
o.modTime = time.Time(info.Modified)
if o.fs.hasMD5 || o.fs.hasSHA1 {
if o.fs.hasChecksums {
hashes := info.Hashes()
if o.fs.hasSHA1 {
o.sha1 = hashes[hash.SHA1]
}
if o.fs.hasMD5 {
o.md5 = hashes[hash.MD5]
}
o.sha1 = hashes[hash.SHA1]
o.md5 = hashes[hash.MD5]
}
return nil
}
@@ -1134,21 +1126,19 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
ContentType: fs.MimeType(ctx, src),
}
if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
if o.fs.useOCMtime || o.fs.hasChecksums {
opts.ExtraHeaders = map[string]string{}
if o.fs.useOCMtime {
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
}
// Set one upload checksum
// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
// Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one
if o.fs.hasSHA1 {
if o.fs.hasChecksums {
// Set an upload checksum - prefer SHA1
//
// This is used as an upload integrity test. If we set
// only SHA1 here, owncloud will calculate the MD5 too.
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
}
}
if o.fs.hasMD5 && opts.ExtraHeaders["OC-Checksum"] == "" {
if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
} else if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
}
}

View File

@@ -46,11 +46,10 @@ __rclone_custom_func() {
else
__rclone_init_completion -n : || return
fi
local rclone=(command rclone --ask-password=false)
if [[ $cur != *:* ]]; then
local ifs=$IFS
IFS=$'\n'
local remotes=($("${rclone[@]}" listremotes 2> /dev/null))
local remotes=($(command rclone listremotes))
IFS=$ifs
local remote
for remote in "${remotes[@]}"; do
@@ -69,7 +68,7 @@ __rclone_custom_func() {
fi
local ifs=$IFS
IFS=$'\n'
local lines=($("${rclone[@]}" lsf "${cur%%:*}:$prefix" 2> /dev/null))
local lines=($(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null))
IFS=$ifs
local line
for line in "${lines[@]}"; do

View File

@@ -263,7 +263,7 @@ Contributors
* garry415 <garry.415@gmail.com>
* forgems <forgems@gmail.com>
* Florian Apolloner <florian@apolloner.eu>
* Aleksandar Janković <office@ajankovic.com> <ajankovic@users.noreply.github.com>
* Aleksandar Jankovic <office@ajankovic.com>
* Maran <maran@protonmail.com>
* nguyenhuuluan434 <nguyenhuuluan434@gmail.com>
* Laura Hausmann <zotan@zotan.pw> <laura@hausmann.dev>
@@ -313,6 +313,3 @@ Contributors
* Marco Molteni <marco.molteni@mailbox.org>
* Ankur Gupta <ankur0493@gmail.com>
* Maciej Zimnoch <maciej@scylladb.com>
* anuar45 <serdaliyev.anuar@gmail.com>
* Fernando <ferferga@users.noreply.github.com>
* David Cole <david.cole@sohonet.com>

View File

@@ -130,10 +130,10 @@ error message in such cases.
#### Chunk names
The default chunk name format is `*.rclone_chunk.###`, hence by default
chunk names are `BIG_FILE_NAME.rclone_chunk.001`,
`BIG_FILE_NAME.rclone_chunk.002` etc. You can configure another name format
using the `name_format` configuration file option. The format uses asterisk
The default chunk name format is `*.rclone-chunk.###`, hence by default
chunk names are `BIG_FILE_NAME.rclone-chunk.001`,
`BIG_FILE_NAME.rclone-chunk.002` etc. You can configure a different name
format using the `--chunker-name-format` option. The format uses asterisk
`*` as a placeholder for the base file name and one or more consecutive
hash characters `#` as a placeholder for sequential chunk number.
There must be one and only one asterisk. The number of consecutive hash
@@ -211,9 +211,6 @@ file hashing, configure chunker with `md5all` or `sha1all`. These two modes
guarantee given hash for all files. If wrapped remote doesn't support it,
chunker will then add metadata to all files, even small. However, this can
double the amount of small files in storage and incur additional service charges.
You can even use chunker to force md5/sha1 support in any other remote
at expence of sidecar meta objects by setting eg. `chunk_type=sha1all`
to force hashsums and `chunk_size=1P` to effectively disable chunking.
Normally, when a file is copied to chunker controlled remote, chunker
will ask the file source for compatible file hash and revert to on-the-fly
@@ -277,14 +274,6 @@ Chunker requires wrapped remote to support server side `move` (or `copy` +
This is because it internally renames temporary chunk files to their final
names when an operation completes successfully.
Chunker encodes chunk number in file name, so with default `name_format`
setting it adds 17 characters. Also chunker adds 7 characters of temporary
suffix during operations. Many file systems limit base file name without path
by 255 characters. Using rclone's crypt remote as a base file system limits
file name by 143 characters. Thus, maximum name length is 231 for most files
and 119 for chunker-over-crypt. A user in need can change name format to
eg. `*.rcc##` and save 10 characters (provided at most 99 chunks per file).
Note that a move implemented using the copy-and-delete method may incur
double charging with some cloud storage providers.

View File

@@ -65,28 +65,6 @@ infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Archit
which creates drives accessible for everyone on the system or
alternatively using [the nssm service manager](https://nssm.cc/usage).
#### Mount as a network drive
By default, rclone will mount the remote as a normal drive. However, you can also mount it as a **Network Drive**
(or **Network Share**, as mentioned in some places)
Unlike other systems, Windows provides a different filesystem type for network drives.
Windows and other programs treat the network drives and fixed/removable drives differently:
In network drives, many I/O operations are optimized, as the high latency and low reliability
(compared to a normal drive) of a network is expected.
Although many people prefer network shares to be mounted as normal system drives, this might cause
some issues, such as programs not working as expected or freezes and errors while operating with the
mounted remote in Windows Explorer. If you experience any of those, consider mounting rclone remotes as network shares,
as Windows expects normal drives to be fast and reliable, while cloud storage is far from that.
See also [Limitations](#limitations) section below for more info
Add `--fuse-flag --VolumePrefix=\server\share` to your `mount` command, **replacing `share` with any other
name of your choice if you are mounting more than one remote**. Otherwise, the mountpoints will conflict and
your mounted filesystems will overlap.
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
### Limitations
Without the use of "--vfs-cache-mode" this can only write files

View File

@@ -99,7 +99,7 @@ Or instead of htpassword if you just want a single user and password:
The GUI is being developed in the: [rclone/rclone-webui-react respository](https://github.com/rclone/rclone-webui-react).
Bug reports and contributions are very welcome :-)
Bug reports and contributions very welcome welcome :-)
If you have questions then please ask them on the [rclone forum](https://forum.rclone.org/).

View File

@@ -56,14 +56,7 @@ Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
rclone config
## macOS installation with brew ##
brew install rclone
## macOS installation from precompiled binary, using curl ##
To avoid problems with macOS gatekeeper enforcing the binary to be signed and
notarized it is enough to download with `curl`.
## macOS installation from precompiled binary ##
Download the latest version of rclone.
@@ -88,19 +81,6 @@ Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
rclone config
## macOS installation from precompiled binary, using a web browser ##
When downloading a binary with a web browser, the browser will set the macOS
gatekeeper quarantine attribute. Starting from Catalina, when attempting to run
`rclone`, a pop-up will appear saying:
“rclone” cannot be opened because the developer cannot be verified.
macOS cannot verify that this app is free from malware.
The simplest fix is to run
xattr -d com.apple.quarantine rclone
## Install with docker ##
The rclone maintains a [docker image for rclone](https://hub.docker.com/r/rclone/rclone).

View File

@@ -11,7 +11,7 @@ Paths are specified as `remote:path`
Paths may be as deep as required, eg `remote:directory/subdirectory`.
To configure Jottacloud you will need to generate a personal security token in the Jottacloud web inteface. You will the option to do in your [account security settings](https://www.jottacloud.com/web/secure). Note that the web inteface may refer to this token as a JottaCli token.
To configure Jottacloud you will need to enter your username and password and select a mountpoint.
Here is an example of how to make a remote called `remote`. First run:
@@ -42,8 +42,16 @@ n) No
y/n> n
Remote config
Generate a personal login token here: https://www.jottacloud.com/web/secure
Login Token> <your token here>
Do you want to create a machine specific API key?
Rclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.
y) Yes
n) No
y/n> y
Username> 0xC4KE@gmail.com
Your Jottacloud password is only required during setup and will not be stored.
password:
Do you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?
@@ -66,10 +74,11 @@ Mountpoints> 1
[jotta]
type = jottacloud
user = 0xC4KE@gmail.com
client_id = .....
client_secret = ........
token = {........}
device = Jotta
mountpoint = Archive
configVersion = 1
--------------------
y) Yes this is OK
e) Edit this remote
@@ -93,7 +102,7 @@ To copy a local directory to an Jottacloud directory called backup
### Devices and Mountpoints ###
The official Jottacloud client registers a device for each computer you install it on and then creates a mountpoint for each folder you select for Backup.
The web interface uses a special device called Jotta for the Archive, Sync and Shared mountpoints. In most cases you'll want to use the Jotta/Archive device/mounpoint however if you want to access files uploaded by any of the official clients rclone provides the option to select other devices and mountpoints during config.
The web interface uses a special device called Jotta for the Archive, Sync and Shared mountpoints. In most cases you'll want to use the Jotta/Archive device/mounpoint however if you want to access files uploaded by the official rclone provides the option to select other devices and mountpoints during config.
### --fast-list ###

View File

@@ -292,7 +292,7 @@ func (s *StatsInfo) String() string {
}
}
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s\n",
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
dateString,
fs.SizeSuffix(s.bytes),
fs.SizeSuffix(totalSize).Unit("Bytes"),
@@ -313,23 +313,16 @@ func (s *StatsInfo) String() string {
errorDetails = " (no need to retry)"
}
// Add only non zero stats
if s.errors != 0 {
_, _ = fmt.Fprintf(buf, "Errors: %10d%s\n",
s.errors, errorDetails)
}
if s.checks != 0 || totalChecks != 0 {
_, _ = fmt.Fprintf(buf, "Checks: %10d / %d, %s\n",
s.checks, totalChecks, percent(s.checks, totalChecks))
}
if s.deletes != 0 {
_, _ = fmt.Fprintf(buf, "Deleted: %10d\n", s.deletes)
}
if s.transfers != 0 || totalTransfer != 0 {
_, _ = fmt.Fprintf(buf, "Transferred: %10d / %d, %s\n",
s.transfers, totalTransfer, percent(s.transfers, totalTransfer))
}
_, _ = fmt.Fprintf(buf, "Elapsed time: %10v\n", dtRounded)
_, _ = fmt.Fprintf(buf, `
Errors: %10d%s
Checks: %10d / %d, %s
Transferred: %10d / %d, %s
Elapsed time: %10v
`,
s.errors, errorDetails,
s.checks, totalChecks, percent(s.checks, totalChecks),
s.transfers, totalTransfer, percent(s.transfers, totalTransfer),
dtRounded)
}
// checking and transferring have their own locking so unlock

View File

@@ -174,9 +174,6 @@ func (a *AsyncReader) WriteTo(w io.Writer) (n int64, err error) {
n = 0
for {
err = a.fill()
if err == io.EOF {
return n, nil
}
if err != nil {
return n, err
}
@@ -186,10 +183,6 @@ func (a *AsyncReader) WriteTo(w io.Writer) (n int64, err error) {
if err != nil {
return n, err
}
if a.cur.err == io.EOF {
a.err = a.cur.err
return n, err
}
if a.cur.err != nil {
a.err = a.cur.err
return n, a.cur.err

View File

@@ -60,12 +60,12 @@ func TestAsyncWriteTo(t *testing.T) {
var dst = &bytes.Buffer{}
n, err := io.Copy(dst, ar)
require.NoError(t, err)
assert.Equal(t, io.EOF, err)
assert.Equal(t, int64(10), n)
// Should still not return any errors
// Should still return EOF
n, err = io.Copy(dst, ar)
require.NoError(t, err)
assert.Equal(t, io.EOF, err)
assert.Equal(t, int64(0), n)
err = ar.Close()

View File

@@ -178,53 +178,6 @@ func IsNoRetryError(err error) (isNoRetry bool) {
return
}
// NoLowLevelRetrier is an optional interface for error as to whether
// the operation should not be retried at a low level.
//
// NoLowLevelRetry errors won't be retried by low level retry loops.
type NoLowLevelRetrier interface {
error
NoLowLevelRetry() bool
}
// wrappedNoLowLevelRetryError is an error wrapped so it will satisfy the
// NoLowLevelRetrier interface and return true
type wrappedNoLowLevelRetryError struct {
error
}
// NoLowLevelRetry interface
func (err wrappedNoLowLevelRetryError) NoLowLevelRetry() bool {
return true
}
// Check interface
var _ NoLowLevelRetrier = wrappedNoLowLevelRetryError{error(nil)}
// NoLowLevelRetryError makes an error which indicates the sync
// shouldn't be low level retried.
func NoLowLevelRetryError(err error) error {
return wrappedNoLowLevelRetryError{err}
}
// Cause returns the underlying error
func (err wrappedNoLowLevelRetryError) Cause() error {
return err.error
}
// IsNoLowLevelRetryError returns true if err conforms to the NoLowLevelRetry
// interface and calling the NoLowLevelRetry method returns true.
func IsNoLowLevelRetryError(err error) (isNoLowLevelRetry bool) {
errors.Walk(err, func(err error) bool {
if r, ok := err.(NoLowLevelRetrier); ok {
isNoLowLevelRetry = r.NoLowLevelRetry()
return true
}
return false
})
return
}
// RetryAfter is an optional interface for error as to whether the
// operation should be retried after a given delay
//
@@ -392,11 +345,6 @@ func ShouldRetry(err error) bool {
return false
}
// If error has been marked to NoLowLevelRetry then don't retry
if IsNoLowLevelRetryError(err) {
return false
}
// Find root cause if available
retriable, err := Cause(err)
if retriable {

View File

@@ -211,17 +211,11 @@ func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) ([][]fs.Directory, er
if err != nil {
return nil, errors.Wrap(err, "find duplicate dirs")
}
// make sure parents are before children
duplicateNames := []string{}
for name, ds := range dirs {
if len(ds) > 1 {
duplicateNames = append(duplicateNames, name)
}
}
sort.Strings(duplicateNames)
duplicateDirs := [][]fs.Directory{}
for _, name := range duplicateNames {
duplicateDirs = append(duplicateDirs, dirs[name])
for _, ds := range dirs {
if len(ds) > 1 {
duplicateDirs = append(duplicateDirs, ds)
}
}
return duplicateDirs, nil
}
@@ -241,8 +235,7 @@ func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]fs
fs.Infof(dirs[0], "Merging contents of duplicate directories")
err := mergeDirs(ctx, dirs)
if err != nil {
err = fs.CountError(err)
fs.Errorf(nil, "merge duplicate dirs: %v", err)
return errors.Wrap(err, "merge duplicate dirs")
}
} else {
fs.Infof(dirs[0], "NOT Merging contents of duplicate directories as --dry-run")
@@ -258,16 +251,23 @@ func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]fs
func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
fs.Infof(f, "Looking for duplicates using %v mode.", mode)
// Find duplicate directories first and fix them
duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f)
if err != nil {
return err
}
if len(duplicateDirs) != 0 {
// Find duplicate directories first and fix them - repeat
// until all fixed
for {
duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f)
if err != nil {
return err
}
if len(duplicateDirs) == 0 {
break
}
err = dedupeMergeDuplicateDirs(ctx, f, duplicateDirs)
if err != nil {
return err
}
if fs.Config.DryRun {
break
}
}
// find a hash to use
@@ -275,7 +275,7 @@ func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
// Now find duplicate files
files := map[string][]fs.Object{}
err = walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
err := walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
entries.ForObject(func(o fs.Object) {
remote := o.Remote()
files[remote] = append(files[remote], o)

View File

@@ -721,9 +721,6 @@ func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) {
atomic.AddInt32(&c.srcFilesMissing, 1)
case fs.Directory:
// Do the same thing to the entire contents of the directory
if c.oneway {
return false
}
return true
default:
panic("Bad object in DirEntries")

View File

@@ -7,7 +7,6 @@ import (
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
)
// reOpen is a wrapper for an object reader which reopens the stream on error
@@ -105,7 +104,7 @@ func (h *reOpen) Read(p []byte) (n int, err error) {
h.err = err
}
h.read += int64(n)
if err != nil && err != io.EOF && !fserrors.IsNoLowLevelRetryError(err) {
if err != nil && err != io.EOF {
// close underlying stream
h.opened = false
_ = h.rc.Close()

View File

@@ -750,7 +750,10 @@ func (m *Mega) addFSNode(itm FSNode) (*Node, error) {
}
// Shared file
default:
k := m.FS.skmap[args[0]]
k, ok := m.FS.skmap[args[0]]
if !ok {
return nil, errors.New("couldn't find decryption key for shared file")
}
b, err := base64urldecode(k)
if err != nil {
return nil, err
@@ -921,7 +924,8 @@ func (m *Mega) getFileSystem() error {
for _, itm := range res[0].F {
_, err = m.addFSNode(itm)
if err != nil {
return err
m.debugf("couldn't decode FSNode %#v: %v ", itm, err)
continue
}
}

View File

@@ -309,13 +309,6 @@ func (c *cache) rename(name string, newName string) (err error) {
if err = os.Rename(osOldPath, osNewPath); err != nil {
return errors.Wrapf(err, "Failed to rename in cache: %s to %s", osOldPath, osNewPath)
}
// Rename the cache item
c.itemMu.Lock()
if oldItem, ok := c.item[name]; ok {
c.item[newName] = oldItem
delete(c.item, name)
}
c.itemMu.Unlock()
fs.Infof(name, "Renamed in cache")
return nil
}

View File

@@ -37,19 +37,13 @@ type File struct {
}
// newFile creates a new File
//
// o may be nil
func newFile(d *Dir, o fs.Object, leaf string) *File {
f := &File{
return &File{
d: d,
o: o,
leaf: leaf,
inode: newInode(),
}
if o != nil {
f.size = o.Size()
}
return f
}
// String converts it to printable
@@ -95,11 +89,6 @@ func (f *File) Path() string {
return path.Join(f.d.path, f.leaf)
}
// osPath returns the full path of the file in the cache in OS format
func (f *File) osPath() string {
return f.d.vfs.cache.toOSPath(f.Path())
}
// Sys returns underlying data source (can be nil) - satisfies Node interface
func (f *File) Sys() interface{} {
return nil
@@ -137,42 +126,29 @@ func (f *File) applyPendingRename() {
func (f *File) rename(ctx context.Context, destDir *Dir, newName string) error {
f.mu.RLock()
d := f.d
o := f.o
oldPendingRenameFun := f.pendingRenameFun
f.mu.RUnlock()
if features := d.f.Features(); features.Move == nil && features.Copy == nil {
err := errors.Errorf("Fs %q can't rename files (no server side Move or Copy)", d.f)
fs.Errorf(f.Path(), "Dir.Rename error: %v", err)
return err
}
newPath := path.Join(destDir.path, newName)
renameCall := func(ctx context.Context) error {
f.mu.RLock()
o := f.o
f.mu.RUnlock()
if o == nil {
return errors.New("Cannot rename: file object is not available")
}
// chain rename calls if any
if oldPendingRenameFun != nil {
err := oldPendingRenameFun(ctx)
if err != nil {
return err
}
}
// do the move of the remote object
newPath := path.Join(destDir.path, newName)
dstOverwritten, _ := d.f.NewObject(ctx, newPath)
newObject, err := operations.Move(ctx, d.f, dstOverwritten, newPath, o)
newObject, err := operations.Move(ctx, d.f, dstOverwritten, newPath, f.o)
if err != nil {
fs.Errorf(f.Path(), "File.Rename error: %v", err)
return err
}
// Rename in the cache too if it exists
if f.d.vfs.Opt.CacheMode >= CacheModeWrites && f.d.vfs.cache.exists(f.Path()) {
if err := f.d.vfs.cache.rename(f.Path(), newPath); err != nil {
fs.Infof(f.Path(), "File.Rename failed in Cache: %v", err)
}
}
// newObject can be nil here for example if --dry-run
if newObject == nil {
err = errors.New("rename failed: nil object returned")
@@ -180,32 +156,25 @@ func (f *File) rename(ctx context.Context, destDir *Dir, newName string) error {
return err
}
// Update the node with the new details
fs.Debugf(o, "Updating file with %v %p", newObject, f)
fs.Debugf(f.o, "Updating file with %v %p", newObject, f)
// f.rename(destDir, newObject)
f.mu.Lock()
f.o = newObject
f.d = destDir
f.leaf = path.Base(newObject.Remote())
f.pendingRenameFun = nil
f.mu.Unlock()
return nil
}
// Rename in the cache if it exists
if f.d.vfs.Opt.CacheMode != CacheModeOff && f.d.vfs.cache.exists(f.Path()) {
if err := f.d.vfs.cache.rename(f.Path(), newPath); err != nil {
fs.Infof(f.Path(), "File.Rename failed in Cache: %v", err)
}
}
// rename the file object
f.mu.Lock()
f.d = destDir
f.leaf = newName
f.mu.RLock()
writing := f._writingInProgress()
f.mu.Unlock()
f.mu.RUnlock()
if writing {
fs.Debugf(o, "File is currently open, delaying rename %p", f)
fs.Debugf(f.o, "File is currently open, delaying rename %p", f)
f.mu.Lock()
f.d = destDir
f.leaf = newName
f.pendingRenameFun = renameCall
f.mu.Unlock()
return nil
@@ -498,7 +467,7 @@ func (f *File) openRW(flags int) (fh *RWFileHandle, err error) {
}
// fs.Debugf(o, "File.openRW")
fh, err = newRWFileHandle(d, f, flags)
fh, err = newRWFileHandle(d, f, f.Path(), flags)
if err != nil {
fs.Errorf(f, "File.openRW failed: %v", err)
return nil, err

View File

@@ -6,7 +6,6 @@ import (
"os"
"testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/mockfs"
"github.com/rclone/rclone/fstest/mockobject"
@@ -14,10 +13,8 @@ import (
"github.com/stretchr/testify/require"
)
func fileCreate(t *testing.T, r *fstest.Run, mode CacheMode) (*VFS, *File, fstest.Item) {
opt := DefaultOpt
opt.CacheMode = mode
vfs := New(r.Fremote, &opt)
func fileCreate(t *testing.T, r *fstest.Run) (*VFS, *File, fstest.Item) {
vfs := New(r.Fremote, nil)
file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1)
fstest.CheckItems(t, r.Fremote, file1)
@@ -32,7 +29,7 @@ func fileCreate(t *testing.T, r *fstest.Run, mode CacheMode) (*VFS, *File, fstes
func TestFileMethods(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
vfs, file, _ := fileCreate(t, r, CacheModeOff)
vfs, file, _ := fileCreate(t, r)
// String
assert.Equal(t, "dir/file1", file.String())
@@ -87,7 +84,7 @@ func TestFileSetModTime(t *testing.T) {
return
}
defer r.Finalise()
vfs, file, file1 := fileCreate(t, r, CacheModeOff)
vfs, file, file1 := fileCreate(t, r)
err := file.SetModTime(t2)
require.NoError(t, err)
@@ -100,8 +97,12 @@ func TestFileSetModTime(t *testing.T) {
assert.Equal(t, EROFS, err)
}
func fileCheckContents(t *testing.T, file *File) {
fd, err := file.Open(os.O_RDONLY)
func TestFileOpenRead(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
_, file, _ := fileCreate(t, r)
fd, err := file.openRead()
require.NoError(t, err)
contents, err := ioutil.ReadAll(fd)
@@ -111,14 +112,6 @@ func fileCheckContents(t *testing.T, file *File) {
require.NoError(t, fd.Close())
}
func TestFileOpenRead(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
_, file, _ := fileCreate(t, r, CacheModeOff)
fileCheckContents(t, file)
}
func TestFileOpenReadUnknownSize(t *testing.T) {
var (
contents = []byte("file contents")
@@ -167,7 +160,7 @@ func TestFileOpenReadUnknownSize(t *testing.T) {
func TestFileOpenWrite(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
vfs, file, _ := fileCreate(t, r, CacheModeOff)
vfs, file, _ := fileCreate(t, r)
fd, err := file.openWrite(os.O_WRONLY | os.O_TRUNC)
require.NoError(t, err)
@@ -188,7 +181,7 @@ func TestFileOpenWrite(t *testing.T) {
func TestFileRemove(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
vfs, file, _ := fileCreate(t, r, CacheModeOff)
vfs, file, _ := fileCreate(t, r)
err := file.Remove()
require.NoError(t, err)
@@ -203,7 +196,7 @@ func TestFileRemove(t *testing.T) {
func TestFileRemoveAll(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
vfs, file, _ := fileCreate(t, r, CacheModeOff)
vfs, file, _ := fileCreate(t, r)
err := file.RemoveAll()
require.NoError(t, err)
@@ -218,7 +211,7 @@ func TestFileRemoveAll(t *testing.T) {
func TestFileOpen(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
_, file, _ := fileCreate(t, r, CacheModeOff)
_, file, _ := fileCreate(t, r)
fd, err := file.Open(os.O_RDONLY)
require.NoError(t, err)
@@ -240,90 +233,3 @@ func TestFileOpen(t *testing.T) {
fd, err = file.Open(3)
assert.Equal(t, EPERM, err)
}
func testFileRename(t *testing.T, mode CacheMode) {
r := fstest.NewRun(t)
defer r.Finalise()
vfs, file, item := fileCreate(t, r, mode)
rootDir, err := vfs.Root()
require.NoError(t, err)
// check file in cache
if mode != CacheModeOff {
// read contents to get file in cache
fileCheckContents(t, file)
assert.True(t, vfs.cache.exists(item.Path))
}
dir := file.Dir()
// start with "dir/file1"
fstest.CheckItems(t, r.Fremote, item)
// rename file to "newLeaf"
err = dir.Rename("file1", "newLeaf", rootDir)
require.NoError(t, err)
item.Path = "newLeaf"
fstest.CheckItems(t, r.Fremote, item)
// check file in cache
if mode != CacheModeOff {
assert.True(t, vfs.cache.exists(item.Path))
}
// check file exists in the vfs layer at its new name
_, err = vfs.Stat("newLeaf")
require.NoError(t, err)
// rename it back to "dir/file1"
err = rootDir.Rename("newLeaf", "file1", dir)
require.NoError(t, err)
item.Path = "dir/file1"
fstest.CheckItems(t, r.Fremote, item)
// check file in cache
if mode != CacheModeOff {
assert.True(t, vfs.cache.exists(item.Path))
}
// now try renaming it with the file open
// first open it and write to it but dont close it
fd, err := file.Open(os.O_WRONLY | os.O_TRUNC)
require.NoError(t, err)
newContents := []byte("this is some new contents")
_, err = fd.Write(newContents)
require.NoError(t, err)
// rename file to "newLeaf"
err = dir.Rename("file1", "newLeaf", rootDir)
require.NoError(t, err)
newItem := fstest.NewItem("newLeaf", string(newContents), item.ModTime)
// check file has been renamed immediately in the cache
if mode != CacheModeOff {
assert.True(t, vfs.cache.exists("newLeaf"))
}
// check file exists in the vfs layer at its new name
_, err = vfs.Stat("newLeaf")
require.NoError(t, err)
// Close the file
require.NoError(t, fd.Close())
// Check file has now been renamed on the remote
item.Path = "newLeaf"
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{newItem}, nil, fs.ModTimeNotSupported)
}
func TestFileRename(t *testing.T) {
t.Run("CacheModeOff", func(t *testing.T) {
testFileRename(t, CacheModeOff)
})
t.Run("CacheModeFull", func(t *testing.T) {
testFileRename(t, CacheModeFull)
})
}

View File

@@ -24,12 +24,14 @@ type RWFileHandle struct {
*os.File
mu sync.Mutex
closed bool // set if handle has been closed
remote string
file *File
d *Dir
opened bool
flags int // open flags
writeCalled bool // if any Write() methods have been called
changed bool // file contents was changed in any other way
flags int // open flags
osPath string // path to the file in the cache
writeCalled bool // if any Write() methods have been called
changed bool // file contents was changed in any other way
}
// Check interfaces
@@ -42,25 +44,26 @@ var (
_ io.Closer = (*RWFileHandle)(nil)
)
func newRWFileHandle(d *Dir, f *File, flags int) (fh *RWFileHandle, err error) {
func newRWFileHandle(d *Dir, f *File, remote string, flags int) (fh *RWFileHandle, err error) {
// if O_CREATE and O_EXCL are set and if path already exists, then return EEXIST
if flags&(os.O_CREATE|os.O_EXCL) == os.O_CREATE|os.O_EXCL && f.exists() {
return nil, EEXIST
}
fh = &RWFileHandle{
file: f,
d: d,
flags: flags,
file: f,
d: d,
remote: remote,
flags: flags,
}
// mark the file as open in the cache - must be done before the mkdir
fh.d.vfs.cache.open(fh.file.Path())
fh.d.vfs.cache.open(fh.remote)
// Make a place for the file
_, err = d.vfs.cache.mkdir(fh.file.Path())
fh.osPath, err = d.vfs.cache.mkdir(remote)
if err != nil {
fh.d.vfs.cache.close(fh.file.Path())
fh.d.vfs.cache.close(fh.remote)
return nil, errors.Wrap(err, "open RW handle failed to make cache directory")
}
@@ -110,9 +113,9 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) {
// If the remote object exists AND its cached file exists locally AND there are no
// other RW handles with it open, then attempt to update it.
if o != nil && fh.file.rwOpens() == 0 {
cacheObj, err := fh.d.vfs.cache.f.NewObject(context.TODO(), fh.file.Path())
cacheObj, err := fh.d.vfs.cache.f.NewObject(context.TODO(), fh.remote)
if err == nil && cacheObj != nil {
_, err = copyObj(fh.d.vfs.cache.f, cacheObj, fh.file.Path(), o)
_, err = copyObj(fh.d.vfs.cache.f, cacheObj, fh.remote, o)
if err != nil {
return errors.Wrap(err, "open RW handle failed to update cached file")
}
@@ -120,12 +123,12 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) {
}
// try to open a exising cache file
fd, err = file.OpenFile(fh.file.osPath(), cacheFileOpenFlags&^os.O_CREATE, 0600)
fd, err = file.OpenFile(fh.osPath, cacheFileOpenFlags&^os.O_CREATE, 0600)
if os.IsNotExist(err) {
// cache file does not exist, so need to fetch it if we have an object to fetch
// it from
if o != nil {
_, err = copyObj(fh.d.vfs.cache.f, nil, fh.file.Path(), o)
_, err = copyObj(fh.d.vfs.cache.f, nil, fh.remote, o)
if err != nil {
cause := errors.Cause(err)
if cause != fs.ErrorObjectNotFound && cause != fs.ErrorDirNotFound {
@@ -159,7 +162,7 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) {
fh.changed = true
if fh.flags&os.O_CREATE == 0 && fh.file.exists() {
// create an empty file if it exists on the source
err = ioutil.WriteFile(fh.file.osPath(), []byte{}, 0600)
err = ioutil.WriteFile(fh.osPath, []byte{}, 0600)
if err != nil {
return errors.Wrap(err, "cache open failed to create zero length file")
}
@@ -169,9 +172,9 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) {
// exists in these cases.
if runtime.GOOS == "windows" && fh.flags&os.O_APPEND != 0 {
cacheFileOpenFlags &^= os.O_TRUNC
_, err = os.Stat(fh.file.osPath())
_, err = os.Stat(fh.osPath)
if err == nil {
err = os.Truncate(fh.file.osPath(), 0)
err = os.Truncate(fh.osPath, 0)
if err != nil {
return errors.Wrap(err, "cache open failed to truncate")
}
@@ -181,7 +184,7 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) {
if fd == nil {
fs.Debugf(fh.logPrefix(), "Opening cached copy with flags=%s", decodeOpenFlags(fh.flags))
fd, err = file.OpenFile(fh.file.osPath(), cacheFileOpenFlags, 0600)
fd, err = file.OpenFile(fh.osPath, cacheFileOpenFlags, 0600)
if err != nil {
return errors.Wrap(err, "cache open file failed")
}
@@ -277,14 +280,14 @@ func (fh *RWFileHandle) flushWrites(closeFile bool) error {
if isCopied {
// Transfer the temp file to the remote
cacheObj, err := fh.d.vfs.cache.f.NewObject(context.TODO(), fh.file.Path())
cacheObj, err := fh.d.vfs.cache.f.NewObject(context.TODO(), fh.remote)
if err != nil {
err = errors.Wrap(err, "failed to find cache file")
fs.Errorf(fh.logPrefix(), "%v", err)
return err
}
o, err := copyObj(fh.d.vfs.f, fh.file.getObject(), fh.file.Path(), cacheObj)
o, err := copyObj(fh.d.vfs.f, fh.file.getObject(), fh.remote, cacheObj)
if err != nil {
err = errors.Wrap(err, "failed to transfer file from cache to remote")
fs.Errorf(fh.logPrefix(), "%v", err)
@@ -317,7 +320,7 @@ func (fh *RWFileHandle) close() (err error) {
if fh.opened {
fh.file.delRWOpen()
}
fh.d.vfs.cache.close(fh.file.Path())
fh.d.vfs.cache.close(fh.remote)
}()
return fh.flushWrites(true)
@@ -546,5 +549,5 @@ func (fh *RWFileHandle) Sync() error {
}
func (fh *RWFileHandle) logPrefix() string {
return fmt.Sprintf("%s(%p)", fh.file.Path(), fh)
return fmt.Sprintf("%s(%p)", fh.remote, fh)
}

View File

@@ -21,18 +21,16 @@ func cleanup(t *testing.T, r *fstest.Run, vfs *VFS) {
r.Finalise()
}
// Create a file and open it with the flags passed in
func rwHandleCreateFlags(t *testing.T, r *fstest.Run, create bool, filename string, flags int) (*VFS, *RWFileHandle) {
// Open a file for write
func rwHandleCreateReadOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) {
opt := DefaultOpt
opt.CacheMode = CacheModeFull
vfs := New(r.Fremote, &opt)
if create {
file1 := r.WriteObject(context.Background(), filename, "0123456789abcdef", t1)
fstest.CheckItems(t, r.Fremote, file1)
}
file1 := r.WriteObject(context.Background(), "dir/file1", "0123456789abcdef", t1)
fstest.CheckItems(t, r.Fremote, file1)
h, err := vfs.OpenFile(filename, flags, 0777)
h, err := vfs.OpenFile("dir/file1", os.O_RDONLY, 0777)
require.NoError(t, err)
fh, ok := h.(*RWFileHandle)
require.True(t, ok)
@@ -40,14 +38,18 @@ func rwHandleCreateFlags(t *testing.T, r *fstest.Run, create bool, filename stri
return vfs, fh
}
// Open a file for read
func rwHandleCreateReadOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) {
return rwHandleCreateFlags(t, r, true, "dir/file1", os.O_RDONLY)
}
// Open a file for write
func rwHandleCreateWriteOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) {
return rwHandleCreateFlags(t, r, false, "file1", os.O_WRONLY|os.O_CREATE)
opt := DefaultOpt
opt.CacheMode = CacheModeFull
vfs := New(r.Fremote, &opt)
h, err := vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE, 0777)
require.NoError(t, err)
fh, ok := h.(*RWFileHandle)
require.True(t, ok)
return vfs, fh
}
// read data from the string
@@ -492,96 +494,6 @@ func TestRWFileHandleReleaseWrite(t *testing.T) {
assert.True(t, fh.closed)
}
// check the size of the file through the open file (if not nil) and via stat
func assertSize(t *testing.T, vfs *VFS, fh *RWFileHandle, filepath string, size int64) {
if fh != nil {
assert.Equal(t, size, fh.Size())
}
fi, err := vfs.Stat(filepath)
require.NoError(t, err)
assert.Equal(t, size, fi.Size())
}
func TestRWFileHandleSizeTruncateExisting(t *testing.T) {
r := fstest.NewRun(t)
vfs, fh := rwHandleCreateFlags(t, r, true, "dir/file1", os.O_WRONLY|os.O_TRUNC)
defer cleanup(t, r, vfs)
// check initial size after opening
assertSize(t, vfs, fh, "dir/file1", 0)
// write some bytes
n, err := fh.Write([]byte("hello"))
assert.NoError(t, err)
assert.Equal(t, 5, n)
// check size after writing
assertSize(t, vfs, fh, "dir/file1", 5)
// close
assert.NoError(t, fh.Close())
// check size after close
assertSize(t, vfs, nil, "dir/file1", 5)
}
func TestRWFileHandleSizeCreateExisting(t *testing.T) {
r := fstest.NewRun(t)
vfs, fh := rwHandleCreateFlags(t, r, true, "dir/file1", os.O_WRONLY|os.O_CREATE)
defer cleanup(t, r, vfs)
// check initial size after opening
assertSize(t, vfs, fh, "dir/file1", 16)
// write some bytes
n, err := fh.Write([]byte("hello"))
assert.NoError(t, err)
assert.Equal(t, 5, n)
// check size after writing
assertSize(t, vfs, fh, "dir/file1", 16)
// write some more bytes
n, err = fh.Write([]byte("helloHELLOhello"))
assert.NoError(t, err)
assert.Equal(t, 15, n)
// check size after writing
assertSize(t, vfs, fh, "dir/file1", 20)
// close
assert.NoError(t, fh.Close())
// check size after close
assertSize(t, vfs, nil, "dir/file1", 20)
}
func TestRWFileHandleSizeCreateNew(t *testing.T) {
r := fstest.NewRun(t)
vfs, fh := rwHandleCreateFlags(t, r, false, "file1", os.O_WRONLY|os.O_CREATE)
defer cleanup(t, r, vfs)
// check initial size after opening
assertSize(t, vfs, fh, "file1", 0)
// write some bytes
n, err := fh.Write([]byte("hello"))
assert.NoError(t, err)
assert.Equal(t, 5, n)
// check size after writing
assertSize(t, vfs, fh, "file1", 5)
// check size after writing
assertSize(t, vfs, fh, "file1", 5)
// close
assert.NoError(t, fh.Close())
// check size after close
assertSize(t, vfs, nil, "file1", 5)
}
func testRWFileHandleOpenTest(t *testing.T, vfs *VFS, test *openTest) {
fileName := "open-test-file"
@@ -698,7 +610,7 @@ func TestRWFileModTimeWithOpenWriters(t *testing.T) {
}
}
func TestRWCacheRename(t *testing.T) {
func TestCacheRename(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()