1
0
mirror of https://github.com/rclone/rclone.git synced 2025-12-27 21:53:27 +00:00

Compare commits

..

35 Commits

Author SHA1 Message Date
Cnly
4948bfa8c5 accounting: fix error count shown as checks 2019-12-20 14:37:21 +08:00
Nick Craig-Wood
0ecb8bc2f9 s3: fix url decoding of NextMarker - fixes #3799
Before this patch we were failing to URL decode the NextMarker when
url encoding was used for the listing.

The result of this was duplicated listings entries for directories
with >1000 entries where the NextMarker was a file containing a space.
2019-12-12 13:33:30 +00:00
Nick Craig-Wood
1ab4985046 vfs: when renaming files in the cache, rename the cache item in memory too 2019-12-12 13:31:10 +00:00
Nick Craig-Wood
6e683b4359 vfs: fix rename of open files when using the VFS cache
Before this change, renaming an open file when using the VFS cache was
delayed until the file was closed.  This meant that the file was not
readable after a rename even though it is was in the cache.

After this change we rename the local cache file and the in memory
cache, delaying only the rename of the file in object storage.

See: https://forum.rclone.org/t/xen-orchestra-ebadf-bad-file-descriptor-write/13104
2019-12-12 13:31:10 +00:00
Nick Craig-Wood
241921c786 vfs: don't cache the path in RW file objects to fix renaming 2019-12-12 13:31:10 +00:00
buengese
a186284b23 asyncreader: fix EOF error 2019-12-10 12:12:29 +00:00
Ivan Andreev
41ba1bba2b chunker: reduce length of temporary suffix 2019-12-09 16:56:32 +00:00
Nick Craig-Wood
50bb9b7bdd check: fix --one-way recursing more directories than it needs to
Before this change rclone traversed all directories in the destination.

After this change rclone doesn't traverse directories in the
destination that don't exist in the source if the `--one-way` flag is
set.

See: https://forum.rclone.org/t/check-with-one-way-flag-should-not-traverses-all-destination-directories/13263
2019-12-07 13:26:55 +00:00
Nick Craig-Wood
4537d9b5cf operations: make reopen code error on NoLowLevelRetry errors - fixes #3777 2019-12-06 10:54:03 +00:00
Nick Craig-Wood
684dbe0e9d local: make source file being updated errors be NoLowLevelRetry errors #3777 2019-12-06 10:54:03 +00:00
Nick Craig-Wood
572c1079a5 fserrors: Make a new NoLowLevelRetry error and don't retry them #3777 2019-12-06 10:54:03 +00:00
Nick Craig-Wood
cb97239a60 build: pin actions/checkout to v1 to fix build failure 2019-12-04 13:48:03 +00:00
Nick Craig-Wood
e48145f959 Add David Cole to contributors 2019-12-04 12:14:30 +00:00
Nick Craig-Wood
2150cf7362 Add email for Aleksandar Janković 2019-12-04 12:14:21 +00:00
David Cole
707e51eac7 docs: correct typo in gui docs 2019-12-04 12:08:52 +00:00
Nick Craig-Wood
0d10640aaa s3: add --s3-copy-cutoff for size to switch to multipart copy
Before this change we used the same (relatively low limits) for server
side copy as we did for multipart uploads.  It doesn't make sense to
use the same limits since no data is being downloaded or uploaded for
a server side copy.

This change introduces a new parameter --s3-copy-cutoff to control
when the switch from single to multipart server size copy happens and
defaults it to the maximum 5GB.

This makes server side copies much more efficient.

It also fixes the erroneous error when trying to set the modification
time of a file bigger than 5GB.

See #3778
2019-12-03 10:37:55 +00:00
Nick Craig-Wood
f4746f5064 s3: fix multipart copy - fixes #3778
Before this change multipart copies were giving the error

    Range specified is not valid for source object of size

This was due to an off by one error in the range source introduced in
7b1274e29a "s3: support for multipart copy"
2019-12-03 10:37:55 +00:00
Aleksandar Janković
c05bb63f96 s3: fix DisableChecksum condition 2019-12-02 15:15:59 +00:00
Danil Semelenov
e2773b3b4e Fix completion with an encrypted config
Closes #3767.
2019-11-29 14:48:12 +00:00
Nick Craig-Wood
d3b0bed091 drive: make sure invalid auth for teamdrives always reports an error
For some reason Google doesn't return an error if you use a service
account with the wrong permissions to list a team drive.  This gives
the user the false impression that the drive is empty.

This change:
- calls teamdrives get on rclone about
- calls teamdrives get on a listing of the root which returned no entries

These will both detect a team drive which has the incorrect auth and
workaround the issue.

Fixes: #3763
See: https://forum.rclone.org/t/rclone-missing-error-code-when-sas-have-no-permission/13086
See: https://forum.rclone.org/t/need-need-bug-verification-rclone-about-doesnt-work-on-teamdrives-empty-output/13105
2019-11-28 10:51:17 +00:00
Nick Craig-Wood
33c80bbb96 jottacloud: add URL to generate Login Token to config wizard 2019-11-28 10:03:48 +00:00
Nick Craig-Wood
705e4694ed webdav: fix case of "Bearer" in Authorization: header to agree with RFC
Before this change rclone used "Authorization: BEARER token".  However
according the the RFC this should be "Bearer"

https://tools.ietf.org/html/rfc6750#section-2.1

This changes it to "Authorization: Bearer token"

Fixes #3751 and interop with Salesforce Webdav server
2019-11-27 12:04:31 +00:00
Nick Craig-Wood
4fbc90d115 webdav: make nextcloud only upload SHA1 checksums
When using nextcloud, before this change we only uploaded one of SHA1
or MD5 checksum in the OC-Checksum header with preference to SHA1 if
both were set.

This makes the MD5 checksums read as empty string which makes syncing
with checksums less useful than they should be as all the MD5
checksums are blank.

This change makes it so that we only upload the SHA1 to nextcloud.

The behaviour of owncloud is unchanged as owncloud uses the checksum
as an upload integrity check only and calculates its own checksums.

See: https://forum.rclone.org/t/how-to-specify-hash-method-to-checksum/13055
2019-11-27 11:58:55 +00:00
Nick Craig-Wood
ed39adc65b Add Fernando to contributors 2019-11-27 11:40:44 +00:00
Fernando
162fdfe455 mount: document remotes as network shares on Windows
Provided instructions for mounting remotes as network shares/network drives in a Windows environment
2019-11-27 11:40:24 +00:00
buengese
8f33c932f2 jottacloud: update docs for new auth method 2019-11-26 13:49:49 +00:00
buengese
4195bd7880 jottacloud: use new auth method used by official client 2019-11-26 13:49:49 +00:00
Marco Molteni
d72f3e31c0 docs/install: explain how to workaround macOS Gatekeeper requiring notarization
Fix #3689
2019-11-26 12:33:30 +00:00
Garry McNulty
11f44cff50 drive: add --drive-use-shared-date to use date file was shared instead of modified date - fixes #3624 2019-11-26 12:19:44 +00:00
SezalAgrawal
c3751e9a50 operations: fix dedupe continuing on errors like insufficientFilePermisson - fixes #3470
* Fix dedupe on merge continuing on errors like insufficientFilePermisson
* Sorted the directories to remove recursion logic
2019-11-26 10:58:52 +00:00
Nick Craig-Wood
420ae905b5 vfs: make sure existing files opened for write show correct size
Before this change if an existing file was opened for write without
truncate its size would show as 0 rather than the full size of the
file.
2019-11-25 11:31:44 +00:00
Nick Craig-Wood
a7d65bd519 sftp: add --sftp-skip-links to skip symlinks and non regular files - fixes #3716
This also corrects the symlink detection logic to only check symlink
files.  Previous to this it was checking all directories too which was
making it do more stat calls than was necessary.
2019-11-24 16:10:53 +00:00
Nick Craig-Wood
1db31d7149 swift: fix parsing of X-Object-Manifest
Before this change we forgot to URL decode the X-Object-Manifest in a dynamic large object.

This problem was introduced by 2fe8285f89 "swift: reserve
segments of dynamic large object when delete objects in container what
was enabled versioning."
2019-11-21 13:25:02 +00:00
Nick Craig-Wood
4641bd5116 Add anuar45 to contributors 2019-11-21 11:16:04 +00:00
anuar45
7e602dbf39 stats: show deletes in stats and hide zero stats
This shows deletes in the stats.  It also doesn't show zero stats
in order not to make the stats block too long.
2019-11-21 11:15:47 +00:00
31 changed files with 1088 additions and 530 deletions

View File

@@ -102,7 +102,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@master uses: actions/checkout@v1
with: with:
path: ./src/github.com/${{ github.repository }} path: ./src/github.com/${{ github.repository }}
@@ -211,7 +211,7 @@ jobs:
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@master uses: actions/checkout@v1
with: with:
path: ./src/github.com/${{ github.repository }} path: ./src/github.com/${{ github.repository }}

View File

@@ -12,11 +12,13 @@ import (
gohash "hash" gohash "hash"
"io" "io"
"io/ioutil" "io/ioutil"
"math/rand"
"path" "path"
"regexp" "regexp"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"sync"
"time" "time"
"github.com/pkg/errors" "github.com/pkg/errors"
@@ -34,46 +36,57 @@ import (
// and optional metadata object. If it's present, // and optional metadata object. If it's present,
// meta object is named after the original file. // meta object is named after the original file.
// //
// The only supported metadata format is simplejson atm.
// It supports only per-file meta objects that are rudimentary,
// used mostly for consistency checks (lazily for performance reasons).
// Other formats can be developed that use an external meta store
// free of these limitations, but this needs some support from
// rclone core (eg. metadata store interfaces).
//
// The following types of chunks are supported: // The following types of chunks are supported:
// data and control, active and temporary. // data and control, active and temporary.
// Chunk type is identified by matching chunk file name // Chunk type is identified by matching chunk file name
// based on the chunk name format configured by user. // based on the chunk name format configured by user.
// //
// Both data and control chunks can be either temporary or // Both data and control chunks can be either temporary (aka hidden)
// active (non-temporary). // or active (non-temporary aka normal aka permanent).
// An operation creates temporary chunks while it runs. // An operation creates temporary chunks while it runs.
// By completion it removes temporary and leaves active // By completion it removes temporary and leaves active chunks.
// (aka normal aka permanent) chunks.
// //
// Temporary (aka hidden) chunks have a special hardcoded suffix // Temporary chunks have a special hardcoded suffix in addition
// in addition to the configured name pattern. The suffix comes last // to the configured name pattern.
// to prevent name collisions with non-temporary chunks. // Temporary suffix includes so called transaction identifier
// Temporary suffix includes so called transaction number usually // (abbreviated as `xactID` below), a generic non-negative base-36 "number"
// abbreviated as `xactNo` below, a generic non-negative integer
// used by parallel operations to share a composite object. // used by parallel operations to share a composite object.
// Chunker also accepts the longer decimal temporary suffix (obsolete),
// which is transparently converted to the new format. In its maximum
// length of 13 decimals it makes a 7-digit base-36 number.
// //
// Chunker can tell data chunks from control chunks by the characters // Chunker can tell data chunks from control chunks by the characters
// located in the "hash placeholder" position of configured format. // located in the "hash placeholder" position of configured format.
// Data chunks have decimal digits there. // Data chunks have decimal digits there.
// Control chunks have a short lowercase literal prepended by underscore // Control chunks have in that position a short lowercase alphanumeric
// in that position. // string (starting with a letter) prepended by underscore.
// //
// Metadata format v1 does not define any control chunk types, // Metadata format v1 does not define any control chunk types,
// they are currently ignored aka reserved. // they are currently ignored aka reserved.
// In future they can be used to implement resumable uploads etc. // In future they can be used to implement resumable uploads etc.
// //
const ( const (
ctrlTypeRegStr = `[a-z]{3,9}` ctrlTypeRegStr = `[a-z][a-z0-9]{2,6}`
tempChunkFormat = `%s..tmp_%010d` tempSuffixFormat = `_%04s`
tempChunkRegStr = `\.\.tmp_([0-9]{10,19})` tempSuffixRegStr = `_([0-9a-z]{4,9})`
tempSuffixRegOld = `\.\.tmp_([0-9]{10,13})`
) )
var ( var (
ctrlTypeRegexp = regexp.MustCompile(`^` + ctrlTypeRegStr + `$`) // regular expressions to validate control type and temporary suffix
ctrlTypeRegexp = regexp.MustCompile(`^` + ctrlTypeRegStr + `$`)
tempSuffixRegexp = regexp.MustCompile(`^` + tempSuffixRegStr + `$`)
) )
// Normally metadata is a small piece of JSON (about 100-300 bytes). // Normally metadata is a small piece of JSON (about 100-300 bytes).
// The size of valid metadata size must never exceed this limit. // The size of valid metadata must never exceed this limit.
// Current maximum provides a reasonable room for future extensions. // Current maximum provides a reasonable room for future extensions.
// //
// Please refrain from increasing it, this can cause old rclone versions // Please refrain from increasing it, this can cause old rclone versions
@@ -101,6 +114,9 @@ const revealHidden = false
// Prevent memory overflow due to specially crafted chunk name // Prevent memory overflow due to specially crafted chunk name
const maxSafeChunkNumber = 10000000 const maxSafeChunkNumber = 10000000
// Number of attempts to find unique transaction identifier
const maxTransactionProbes = 100
// standard chunker errors // standard chunker errors
var ( var (
ErrChunkOverflow = errors.New("chunk number overflow") ErrChunkOverflow = errors.New("chunk number overflow")
@@ -113,13 +129,6 @@ const (
delFailed = 2 // move, then delete and try again if failed delFailed = 2 // move, then delete and try again if failed
) )
// Note: metadata logic is tightly coupled with chunker code in many
// places, eg. in checks whether a file should have meta object or is
// eligible for chunking.
// If more metadata formats (or versions of a format) are added in future,
// it may be advisable to factor it into a "metadata strategy" interface
// similar to chunkingReader or linearReader below.
// Register with Fs // Register with Fs
func init() { func init() {
fs.Register(&fs.RegInfo{ fs.Register(&fs.RegInfo{
@@ -261,7 +270,7 @@ func NewFs(name, rpath string, m configmap.Mapper) (fs.Fs, error) {
// detects a composite file because it finds the first chunk! // detects a composite file because it finds the first chunk!
// (yet can't satisfy fstest.CheckListing, will ignore) // (yet can't satisfy fstest.CheckListing, will ignore)
if err == nil && !f.useMeta && strings.Contains(rpath, "/") { if err == nil && !f.useMeta && strings.Contains(rpath, "/") {
firstChunkPath := f.makeChunkName(remotePath, 0, "", -1) firstChunkPath := f.makeChunkName(remotePath, 0, "", "")
_, testErr := baseInfo.NewFs(baseName, firstChunkPath, baseConfig) _, testErr := baseInfo.NewFs(baseName, firstChunkPath, baseConfig)
if testErr == fs.ErrorIsFile { if testErr == fs.ErrorIsFile {
err = testErr err = testErr
@@ -310,12 +319,16 @@ type Fs struct {
dataNameFmt string // name format of data chunks dataNameFmt string // name format of data chunks
ctrlNameFmt string // name format of control chunks ctrlNameFmt string // name format of control chunks
nameRegexp *regexp.Regexp // regular expression to match chunk names nameRegexp *regexp.Regexp // regular expression to match chunk names
xactIDRand *rand.Rand // generator of random transaction identifiers
xactIDMutex sync.Mutex // mutex for the source of randomness
opt Options // copy of Options opt Options // copy of Options
features *fs.Features // optional features features *fs.Features // optional features
dirSort bool // reserved for future, ignored dirSort bool // reserved for future, ignored
} }
// configure must be called only from NewFs or by unit tests // configure sets up chunker for given name format, meta format and hash type.
// It also seeds the source of random transaction identifiers.
// configure must be called only from NewFs or by unit tests.
func (f *Fs) configure(nameFormat, metaFormat, hashType string) error { func (f *Fs) configure(nameFormat, metaFormat, hashType string) error {
if err := f.setChunkNameFormat(nameFormat); err != nil { if err := f.setChunkNameFormat(nameFormat); err != nil {
return errors.Wrapf(err, "invalid name format '%s'", nameFormat) return errors.Wrapf(err, "invalid name format '%s'", nameFormat)
@@ -326,6 +339,10 @@ func (f *Fs) configure(nameFormat, metaFormat, hashType string) error {
if err := f.setHashType(hashType); err != nil { if err := f.setHashType(hashType); err != nil {
return err return err
} }
randomSeed := time.Now().UnixNano()
f.xactIDRand = rand.New(rand.NewSource(randomSeed))
return nil return nil
} }
@@ -414,13 +431,13 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
} }
reDataOrCtrl := fmt.Sprintf("(?:(%s)|_(%s))", reDigits, ctrlTypeRegStr) reDataOrCtrl := fmt.Sprintf("(?:(%s)|_(%s))", reDigits, ctrlTypeRegStr)
// this must be non-greedy or else it can eat up temporary suffix // this must be non-greedy or else it could eat up temporary suffix
const mainNameRegStr = "(.+?)" const mainNameRegStr = "(.+?)"
strRegex := regexp.QuoteMeta(pattern) strRegex := regexp.QuoteMeta(pattern)
strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl) strRegex = reHashes.ReplaceAllLiteralString(strRegex, reDataOrCtrl)
strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1) strRegex = strings.Replace(strRegex, "\\*", mainNameRegStr, -1)
strRegex = fmt.Sprintf("^%s(?:%s)?$", strRegex, tempChunkRegStr) strRegex = fmt.Sprintf("^%s(?:%s|%s)?$", strRegex, tempSuffixRegStr, tempSuffixRegOld)
f.nameRegexp = regexp.MustCompile(strRegex) f.nameRegexp = regexp.MustCompile(strRegex)
// craft printf formats for active data/control chunks // craft printf formats for active data/control chunks
@@ -435,34 +452,36 @@ func (f *Fs) setChunkNameFormat(pattern string) error {
return nil return nil
} }
// makeChunkName produces chunk name (or path) for given file. // makeChunkName produces chunk name (or path) for a given file.
// //
// mainPath can be name, relative or absolute path of main file. // filePath can be name, relative or absolute path of main file.
// //
// chunkNo must be a zero based index of data chunk. // chunkNo must be a zero based index of data chunk.
// Negative chunkNo eg. -1 indicates a control chunk. // Negative chunkNo eg. -1 indicates a control chunk.
// ctrlType is type of control chunk (must be valid). // ctrlType is type of control chunk (must be valid).
// ctrlType must be "" for data chunks. // ctrlType must be "" for data chunks.
// //
// xactNo is a transaction number. // xactID is a transaction identifier. Empty xactID denotes active chunk,
// Negative xactNo eg. -1 indicates an active chunk, // otherwise temporary chunk name is produced.
// otherwise produce temporary chunk name.
// //
func (f *Fs) makeChunkName(mainPath string, chunkNo int, ctrlType string, xactNo int64) string { func (f *Fs) makeChunkName(filePath string, chunkNo int, ctrlType, xactID string) string {
dir, mainName := path.Split(mainPath) dir, parentName := path.Split(filePath)
var name string var name, tempSuffix string
switch { switch {
case chunkNo >= 0 && ctrlType == "": case chunkNo >= 0 && ctrlType == "":
name = fmt.Sprintf(f.dataNameFmt, mainName, chunkNo+f.opt.StartFrom) name = fmt.Sprintf(f.dataNameFmt, parentName, chunkNo+f.opt.StartFrom)
case chunkNo < 0 && ctrlTypeRegexp.MatchString(ctrlType): case chunkNo < 0 && ctrlTypeRegexp.MatchString(ctrlType):
name = fmt.Sprintf(f.ctrlNameFmt, mainName, ctrlType) name = fmt.Sprintf(f.ctrlNameFmt, parentName, ctrlType)
default: default:
panic("makeChunkName: invalid argument") // must not produce something we can't consume panic("makeChunkName: invalid argument") // must not produce something we can't consume
} }
if xactNo >= 0 { if xactID != "" {
name = fmt.Sprintf(tempChunkFormat, name, xactNo) tempSuffix = fmt.Sprintf(tempSuffixFormat, xactID)
if !tempSuffixRegexp.MatchString(tempSuffix) {
panic("makeChunkName: invalid argument")
}
} }
return dir + name return dir + name + tempSuffix
} }
// parseChunkName checks whether given file path belongs to // parseChunkName checks whether given file path belongs to
@@ -470,20 +489,21 @@ func (f *Fs) makeChunkName(mainPath string, chunkNo int, ctrlType string, xactNo
// //
// filePath can be name, relative or absolute path of a file. // filePath can be name, relative or absolute path of a file.
// //
// Returned mainPath is a non-empty string if valid chunk name // Returned parentPath is path of the composite file owning the chunk.
// is detected or "" if it's not a chunk. // It's a non-empty string if valid chunk name is detected
// or "" if it's not a chunk.
// Other returned values depend on detected chunk type: // Other returned values depend on detected chunk type:
// data or control, active or temporary: // data or control, active or temporary:
// //
// data chunk - the returned chunkNo is non-negative and ctrlType is "" // data chunk - the returned chunkNo is non-negative and ctrlType is ""
// control chunk - the chunkNo is -1 and ctrlType is non-empty string // control chunk - the chunkNo is -1 and ctrlType is a non-empty string
// active chunk - the returned xactNo is -1 // active chunk - the returned xactID is ""
// temporary chunk - the xactNo is non-negative integer // temporary chunk - the xactID is a non-empty string
func (f *Fs) parseChunkName(filePath string) (mainPath string, chunkNo int, ctrlType string, xactNo int64) { func (f *Fs) parseChunkName(filePath string) (parentPath string, chunkNo int, ctrlType, xactID string) {
dir, name := path.Split(filePath) dir, name := path.Split(filePath)
match := f.nameRegexp.FindStringSubmatch(name) match := f.nameRegexp.FindStringSubmatch(name)
if match == nil || match[1] == "" { if match == nil || match[1] == "" {
return "", -1, "", -1 return "", -1, "", ""
} }
var err error var err error
@@ -494,19 +514,26 @@ func (f *Fs) parseChunkName(filePath string) (mainPath string, chunkNo int, ctrl
} }
if chunkNo -= f.opt.StartFrom; chunkNo < 0 { if chunkNo -= f.opt.StartFrom; chunkNo < 0 {
fs.Infof(f, "invalid data chunk number in file %q", name) fs.Infof(f, "invalid data chunk number in file %q", name)
return "", -1, "", -1 return "", -1, "", ""
} }
} }
xactNo = -1
if match[4] != "" { if match[4] != "" {
if xactNo, err = strconv.ParseInt(match[4], 10, 64); err != nil || xactNo < 0 { xactID = match[4]
fs.Infof(f, "invalid transaction number in file %q", name) }
return "", -1, "", -1 if match[5] != "" {
// old-style temporary suffix
number, err := strconv.ParseInt(match[5], 10, 64)
if err != nil || number < 0 {
fs.Infof(f, "invalid old-style transaction number in file %q", name)
return "", -1, "", ""
} }
// convert old-style transaction number to base-36 transaction ID
xactID = fmt.Sprintf(tempSuffixFormat, strconv.FormatInt(number, 36))
xactID = xactID[1:] // strip leading underscore
} }
mainPath = dir + match[1] parentPath = dir + match[1]
ctrlType = match[3] ctrlType = match[3]
return return
} }
@@ -514,17 +541,74 @@ func (f *Fs) parseChunkName(filePath string) (mainPath string, chunkNo int, ctrl
// forbidChunk prints error message or raises error if file is chunk. // forbidChunk prints error message or raises error if file is chunk.
// First argument sets log prefix, use `false` to suppress message. // First argument sets log prefix, use `false` to suppress message.
func (f *Fs) forbidChunk(o interface{}, filePath string) error { func (f *Fs) forbidChunk(o interface{}, filePath string) error {
if mainPath, _, _, _ := f.parseChunkName(filePath); mainPath != "" { if parentPath, _, _, _ := f.parseChunkName(filePath); parentPath != "" {
if f.opt.FailHard { if f.opt.FailHard {
return fmt.Errorf("chunk overlap with %q", mainPath) return fmt.Errorf("chunk overlap with %q", parentPath)
} }
if boolVal, isBool := o.(bool); !isBool || boolVal { if boolVal, isBool := o.(bool); !isBool || boolVal {
fs.Errorf(o, "chunk overlap with %q", mainPath) fs.Errorf(o, "chunk overlap with %q", parentPath)
} }
} }
return nil return nil
} }
// newXactID produces a sufficiently random transaction identifier.
//
// The temporary suffix mask allows identifiers consisting of 4-9
// base-36 digits (ie. digits 0-9 or lowercase letters a-z).
// The identifiers must be unique between transactions running on
// the single file in parallel.
//
// Currently the function produces 6-character identifiers.
// Together with underscore this makes a 7-character temporary suffix.
//
// The first 4 characters isolate groups of transactions by time intervals.
// The maximum length of interval is base-36 "zzzz" ie. 1,679,615 seconds.
// The function rather takes a maximum prime closest to this number
// (see https://primes.utm.edu) as the interval length to better safeguard
// against repeating pseudo-random sequences in cases when rclone is
// invoked from a periodic scheduler like unix cron.
// Thus, the interval is slightly more than 19 days 10 hours 33 minutes.
//
// The remaining 2 base-36 digits (in the range from 0 to 1295 inclusive)
// are taken from the local random source.
// This provides about 0.1% collision probability for two parallel
// operations started at the same second and working on the same file.
//
// Non-empty filePath argument enables probing for existing temporary chunk
// to further eliminate collisions.
func (f *Fs) newXactID(ctx context.Context, filePath string) (xactID string, err error) {
const closestPrimeZzzzSeconds = 1679609
const maxTwoBase36Digits = 1295
unixSec := time.Now().Unix()
if unixSec < 0 {
unixSec = -unixSec // unlikely but the number must be positive
}
circleSec := unixSec % closestPrimeZzzzSeconds
first4chars := strconv.FormatInt(circleSec, 36)
for tries := 0; tries < maxTransactionProbes; tries++ {
f.xactIDMutex.Lock()
randomness := f.xactIDRand.Int63n(maxTwoBase36Digits + 1)
f.xactIDMutex.Unlock()
last2chars := strconv.FormatInt(randomness, 36)
xactID = fmt.Sprintf("%04s%02s", first4chars, last2chars)
if filePath == "" {
return
}
probeChunk := f.makeChunkName(filePath, 0, "", xactID)
_, probeErr := f.base.NewObject(ctx, probeChunk)
if probeErr != nil {
return
}
}
return "", fmt.Errorf("can't setup transaction for %s", filePath)
}
// List the objects and directories in dir into entries. // List the objects and directories in dir into entries.
// The entries can be returned in any order but should be // The entries can be returned in any order but should be
// for a complete directory. // for a complete directory.
@@ -602,8 +686,8 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
switch entry := dirOrObject.(type) { switch entry := dirOrObject.(type) {
case fs.Object: case fs.Object:
remote := entry.Remote() remote := entry.Remote()
if mainRemote, chunkNo, ctrlType, xactNo := f.parseChunkName(remote); mainRemote != "" { if mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(remote); mainRemote != "" {
if xactNo != -1 { if xactID != "" {
if revealHidden { if revealHidden {
fs.Infof(f, "ignore temporary chunk %q", remote) fs.Infof(f, "ignore temporary chunk %q", remote)
} }
@@ -686,7 +770,7 @@ func (f *Fs) processEntries(ctx context.Context, origEntries fs.DirEntries, dirP
// //
// Please note that every NewObject invocation will scan the whole directory. // Please note that every NewObject invocation will scan the whole directory.
// Using here something like fs.DirCache might improve performance // Using here something like fs.DirCache might improve performance
// (but will make logic more complex, though). // (yet making the logic more complex).
// //
// Note that chunker prefers analyzing file names rather than reading // Note that chunker prefers analyzing file names rather than reading
// the content of meta object assuming that directory scans are fast // the content of meta object assuming that directory scans are fast
@@ -752,8 +836,8 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if !strings.Contains(entryRemote, remote) { if !strings.Contains(entryRemote, remote) {
continue // bypass regexp to save cpu continue // bypass regexp to save cpu
} }
mainRemote, chunkNo, ctrlType, xactNo := f.parseChunkName(entryRemote) mainRemote, chunkNo, ctrlType, xactID := f.parseChunkName(entryRemote)
if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactNo != -1 { if mainRemote == "" || mainRemote != remote || ctrlType != "" || xactID != "" {
continue // skip non-conforming, temporary and control chunks continue // skip non-conforming, temporary and control chunks
} }
//fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo) //fs.Debugf(f, "%q belongs to %q as chunk %d", entryRemote, mainRemote, chunkNo)
@@ -786,7 +870,7 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// This is either a composite object with metadata or a non-chunked // This is either a composite object with metadata or a non-chunked
// file without metadata. Validate it and update the total data size. // file without metadata. Validate it and update the total data size.
// As an optimization, skip metadata reading here - we will call // As an optimization, skip metadata reading here - we will call
// readMetadata lazily when needed. // readMetadata lazily when needed (reading can be expensive).
if err := o.validate(); err != nil { if err := o.validate(); err != nil {
return nil, err return nil, err
} }
@@ -843,14 +927,11 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
} }
}() }()
// Use system timer as a trivial source of transaction numbers,
// don't try hard to safeguard against chunk collisions between
// parallel transactions.
xactNo := time.Now().Unix()
if xactNo < 0 {
xactNo = -xactNo // unlikely but transaction number must be positive
}
baseRemote := remote baseRemote := remote
xactID, errXact := f.newXactID(ctx, baseRemote)
if errXact != nil {
return nil, errXact
}
// Transfer chunks data // Transfer chunks data
for c.chunkNo = 0; !c.done; c.chunkNo++ { for c.chunkNo = 0; !c.done; c.chunkNo++ {
@@ -858,7 +939,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
return nil, ErrChunkOverflow return nil, ErrChunkOverflow
} }
tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactNo) tempRemote := f.makeChunkName(baseRemote, c.chunkNo, "", xactID)
size := c.sizeLeft size := c.sizeLeft
if size > c.chunkSize { if size > c.chunkSize {
size = c.chunkSize size = c.chunkSize
@@ -962,7 +1043,7 @@ func (f *Fs) put(ctx context.Context, in io.Reader, src fs.ObjectInfo, remote st
// Rename data chunks from temporary to final names // Rename data chunks from temporary to final names
for chunkNo, chunk := range c.chunks { for chunkNo, chunk := range c.chunks {
chunkRemote := f.makeChunkName(baseRemote, chunkNo, "", -1) chunkRemote := f.makeChunkName(baseRemote, chunkNo, "", "")
chunkMoved, errMove := f.baseMove(ctx, chunk, chunkRemote, delFailed) chunkMoved, errMove := f.baseMove(ctx, chunk, chunkRemote, delFailed)
if errMove != nil { if errMove != nil {
return nil, errMove return nil, errMove
@@ -1221,11 +1302,6 @@ func (f *Fs) PutUnchecked(ctx context.Context, in io.Reader, src fs.ObjectInfo,
return f.newObject("", o, nil), nil return f.newObject("", o, nil), nil
} }
// Precision returns the precision of this Fs
func (f *Fs) Precision() time.Duration {
return f.base.Precision()
}
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
// Chunker advertises a hash type if and only if it can be calculated // Chunker advertises a hash type if and only if it can be calculated
// for files of any size, non-chunked or composite. // for files of any size, non-chunked or composite.
@@ -1613,8 +1689,8 @@ func (f *Fs) ChangeNotify(ctx context.Context, notifyFunc func(string, fs.EntryT
wrappedNotifyFunc := func(path string, entryType fs.EntryType) { wrappedNotifyFunc := func(path string, entryType fs.EntryType) {
//fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType) //fs.Debugf(f, "ChangeNotify: path %q entryType %d", path, entryType)
if entryType == fs.EntryObject { if entryType == fs.EntryObject {
mainPath, _, _, xactNo := f.parseChunkName(path) mainPath, _, _, xactID := f.parseChunkName(path)
if mainPath != "" && xactNo == -1 { if mainPath != "" && xactID == "" {
path = mainPath path = mainPath
} }
} }
@@ -2063,7 +2139,7 @@ type metaSimpleJSON struct {
// Current implementation creates metadata in three cases: // Current implementation creates metadata in three cases:
// - for files larger than chunk size // - for files larger than chunk size
// - if file contents can be mistaken as meta object // - if file contents can be mistaken as meta object
// - if consistent hashing is on but wrapped remote can't provide given hash // - if consistent hashing is On but wrapped remote can't provide given hash
// //
func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 string) ([]byte, error) { func marshalSimpleJSON(ctx context.Context, size int64, nChunks int, md5, sha1 string) ([]byte, error) {
version := metadataVersion version := metadataVersion
@@ -2177,6 +2253,11 @@ func (f *Fs) String() string {
return fmt.Sprintf("Chunked '%s:%s'", f.name, f.root) return fmt.Sprintf("Chunked '%s:%s'", f.name, f.root)
} }
// Precision returns the precision of this Fs
func (f *Fs) Precision() time.Duration {
return f.base.Precision()
}
// Check the interfaces are satisfied // Check the interfaces are satisfied
var ( var (
_ fs.Fs = (*Fs)(nil) _ fs.Fs = (*Fs)(nil)

View File

@@ -64,35 +64,40 @@ func testChunkNameFormat(t *testing.T, f *Fs) {
assert.Error(t, err) assert.Error(t, err)
} }
assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType string, xactNo int64) { assertMakeName := func(wantChunkName, mainName string, chunkNo int, ctrlType, xactID string) {
gotChunkName := f.makeChunkName(mainName, chunkNo, ctrlType, xactNo) gotChunkName := ""
assert.Equal(t, wantChunkName, gotChunkName) assert.NotPanics(t, func() {
gotChunkName = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%q) must not panic", mainName, chunkNo, ctrlType, xactID)
if gotChunkName != "" {
assert.Equal(t, wantChunkName, gotChunkName)
}
} }
assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType string, xactNo int64) { assertMakeNamePanics := func(mainName string, chunkNo int, ctrlType, xactID string) {
assert.Panics(t, func() { assert.Panics(t, func() {
_ = f.makeChunkName(mainName, chunkNo, ctrlType, xactNo) _ = f.makeChunkName(mainName, chunkNo, ctrlType, xactID)
}, "makeChunkName(%q,%d,%q,%d) should panic", mainName, chunkNo, ctrlType, xactNo) }, "makeChunkName(%q,%d,%q,%q) should panic", mainName, chunkNo, ctrlType, xactID)
} }
assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType string, wantXactNo int64) { assertParseName := func(fileName, wantMainName string, wantChunkNo int, wantCtrlType, wantXactID string) {
gotMainName, gotChunkNo, gotCtrlType, gotXactNo := f.parseChunkName(fileName) gotMainName, gotChunkNo, gotCtrlType, gotXactID := f.parseChunkName(fileName)
assert.Equal(t, wantMainName, gotMainName) assert.Equal(t, wantMainName, gotMainName)
assert.Equal(t, wantChunkNo, gotChunkNo) assert.Equal(t, wantChunkNo, gotChunkNo)
assert.Equal(t, wantCtrlType, gotCtrlType) assert.Equal(t, wantCtrlType, gotCtrlType)
assert.Equal(t, wantXactNo, gotXactNo) assert.Equal(t, wantXactID, gotXactID)
} }
const newFormatSupported = false // support for patterns not starting with base name (*) const newFormatSupported = false // support for patterns not starting with base name (*)
// valid formats // valid formats
assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`) assertFormat(`*.rclone_chunk.###`, `%s.rclone_chunk.%03d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`) assertFormat(`*.rclone_chunk.#`, `%s.rclone_chunk.%d`, `%s.rclone_chunk._%s`, `^(.+?)\.rclone_chunk\.(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`) assertFormat(`*_chunk_#####`, `%s_chunk_%05d`, `%s_chunk__%s`, `^(.+?)_chunk_(?:([0-9]{5,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`) assertFormat(`*-chunk-#`, `%s-chunk-%d`, `%s-chunk-_%s`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z]{3,9}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:\.\.tmp_([0-9]{10,19}))?$`) assertFormat(`*-chunk-#-%^$()[]{}.+-!?:\`, `%s-chunk-%d-%%^$()[]{}.+-!?:\`, `%s-chunk-_%s-%%^$()[]{}.+-!?:\`, `^(.+?)-chunk-(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))-%\^\$\(\)\[\]\{\}\.\+-!\?:\\(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
if newFormatSupported { if newFormatSupported {
assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z]{3,9})),(?:\.\.tmp_([0-9]{10,19}))?$`) assertFormat(`_*-chunk-##,`, `_%s-chunk-%02d,`, `_%s-chunk-_%s,`, `^_(.+?)-chunk-(?:([0-9]{2,})|_([a-z][a-z0-9]{2,6})),(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
} }
// invalid formats // invalid formats
@@ -111,142 +116,223 @@ func testChunkNameFormat(t *testing.T, f *Fs) {
// quick tests // quick tests
if newFormatSupported { if newFormatSupported {
assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`) assertFormat(`part_*_#`, `part_%s_%d`, `part_%s__%s`, `^part_(.+?)_(?:([0-9]+)|_([a-z][a-z0-9]{2,6}))(?:_([0-9][0-9a-z]{3,8})\.\.tmp_([0-9]{10,13}))?$`)
f.opt.StartFrom = 1 f.opt.StartFrom = 1
assertMakeName(`part_fish_1`, "fish", 0, "", -1) assertMakeName(`part_fish_1`, "fish", 0, "", "")
assertParseName(`part_fish_43`, "fish", 42, "", -1) assertParseName(`part_fish_43`, "fish", 42, "", "")
assertMakeName(`part_fish_3..tmp_0000000004`, "fish", 2, "", 4) assertMakeName(`part_fish__locks`, "fish", -2, "locks", "")
assertParseName(`part_fish_4..tmp_0000000005`, "fish", 3, "", 5) assertParseName(`part_fish__locks`, "fish", -1, "locks", "")
assertMakeName(`part_fish__locks`, "fish", -2, "locks", -3) assertMakeName(`part_fish__x2y`, "fish", -2, "x2y", "")
assertParseName(`part_fish__locks`, "fish", -1, "locks", -1) assertParseName(`part_fish__x2y`, "fish", -1, "x2y", "")
assertMakeName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -3, "blockinfo", 1234567890123456789) assertMakeName(`part_fish_3_0004`, "fish", 2, "", "4")
assertParseName(`part_fish__blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789) assertParseName(`part_fish_4_0005`, "fish", 3, "", "0005")
assertMakeName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -3, "blkinfo", "jj5fvo3wr")
assertParseName(`part_fish__blkinfo_zz9fvo3wr`, "fish", -1, "blkinfo", "zz9fvo3wr")
// old-style temporary suffix (parse only)
assertParseName(`part_fish_4..tmp_0000000011`, "fish", 3, "", "000b")
assertParseName(`part_fish__blkinfo_jj5fvo3wr`, "fish", -1, "blkinfo", "jj5fvo3wr")
} }
// prepare format for long tests // prepare format for long tests
assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z]{3,9}))(?:\.\.tmp_([0-9]{10,19}))?$`) assertFormat(`*.chunk.###`, `%s.chunk.%03d`, `%s.chunk._%s`, `^(.+?)\.chunk\.(?:([0-9]{3,})|_([a-z][a-z0-9]{2,6}))(?:_([0-9a-z]{4,9})|\.\.tmp_([0-9]{10,13}))?$`)
f.opt.StartFrom = 2 f.opt.StartFrom = 2
// valid data chunks // valid data chunks
assertMakeName(`fish.chunk.003`, "fish", 1, "", -1) assertMakeName(`fish.chunk.003`, "fish", 1, "", "")
assertMakeName(`fish.chunk.011..tmp_0000054321`, "fish", 9, "", 54321) assertParseName(`fish.chunk.003`, "fish", 1, "", "")
assertMakeName(`fish.chunk.011..tmp_1234567890`, "fish", 9, "", 1234567890) assertMakeName(`fish.chunk.021`, "fish", 19, "", "")
assertMakeName(`fish.chunk.1916..tmp_123456789012345`, "fish", 1914, "", 123456789012345) assertParseName(`fish.chunk.021`, "fish", 19, "", "")
assertParseName(`fish.chunk.003`, "fish", 1, "", -1) // valid temporary data chunks
assertParseName(`fish.chunk.004..tmp_0000000021`, "fish", 2, "", 21) assertMakeName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertParseName(`fish.chunk.021`, "fish", 19, "", -1) assertParseName(`fish.chunk.011_4321`, "fish", 9, "", "4321")
assertParseName(`fish.chunk.323..tmp_1234567890123456789`, "fish", 321, "", 1234567890123456789) assertMakeName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertParseName(`fish.chunk.011_00bc`, "fish", 9, "", "00bc")
assertMakeName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertParseName(`fish.chunk.1916_5jjfvo3wr`, "fish", 1914, "", "5jjfvo3wr")
assertMakeName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
assertParseName(`fish.chunk.1917_zz9fvo3wr`, "fish", 1915, "", "zz9fvo3wr")
// valid temporary data chunks (old temporary suffix, only parse)
assertParseName(`fish.chunk.004..tmp_0000000047`, "fish", 2, "", "001b")
assertParseName(`fish.chunk.323..tmp_9994567890123`, "fish", 321, "", "3jjfvo3wr")
// parsing invalid data chunk names // parsing invalid data chunk names
assertParseName(`fish.chunk.3`, "", -1, "", -1) assertParseName(`fish.chunk.3`, "", -1, "", "")
assertParseName(`fish.chunk.001`, "", -1, "", -1) assertParseName(`fish.chunk.001`, "", -1, "", "")
assertParseName(`fish.chunk.21`, "", -1, "", -1) assertParseName(`fish.chunk.21`, "", -1, "", "")
assertParseName(`fish.chunk.-21`, "", -1, "", -1) assertParseName(`fish.chunk.-21`, "", -1, "", "")
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", -1) assertParseName(`fish.chunk.004abcd`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", -1) assertParseName(`fish.chunk.004__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", -1) assertParseName(`fish.chunk.004_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", -1) assertParseName(`fish.chunk.004_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk.004_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk.004_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk.004_12.3`, "", -1, "", "") // punctuation not allowed
// parsing invalid data chunk names (old temporary suffix)
assertParseName(`fish.chunk.004.tmp_0000000021`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_012345678901234567890123456789`, "", -1, "", "")
assertParseName(`fish.chunk.323..tmp_12345678901234`, "", -1, "", "")
assertParseName(`fish.chunk.003..tmp_-1`, "", -1, "", "")
// valid control chunks // valid control chunks
assertMakeName(`fish.chunk._info`, "fish", -1, "info", -1) assertMakeName(`fish.chunk._info`, "fish", -1, "info", "")
assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", -1) assertMakeName(`fish.chunk._locks`, "fish", -2, "locks", "")
assertMakeName(`fish.chunk._blockinfo`, "fish", -3, "blockinfo", -1) assertMakeName(`fish.chunk._blkinfo`, "fish", -3, "blkinfo", "")
assertMakeName(`fish.chunk._x2y`, "fish", -4, "x2y", "")
assertParseName(`fish.chunk._info`, "fish", -1, "info", -1) assertParseName(`fish.chunk._info`, "fish", -1, "info", "")
assertParseName(`fish.chunk._locks`, "fish", -1, "locks", -1) assertParseName(`fish.chunk._locks`, "fish", -1, "locks", "")
assertParseName(`fish.chunk._blockinfo`, "fish", -1, "blockinfo", -1) assertParseName(`fish.chunk._blkinfo`, "fish", -1, "blkinfo", "")
assertParseName(`fish.chunk._x2y`, "fish", -1, "x2y", "")
// valid temporary control chunks // valid temporary control chunks
assertMakeName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21) assertMakeName(`fish.chunk._info_0001`, "fish", -1, "info", "1")
assertMakeName(`fish.chunk._locks..tmp_0000054321`, "fish", -2, "locks", 54321) assertMakeName(`fish.chunk._locks_4321`, "fish", -2, "locks", "4321")
assertMakeName(`fish.chunk._uploads..tmp_0000000000`, "fish", -3, "uploads", 0) assertMakeName(`fish.chunk._uploads_abcd`, "fish", -3, "uploads", "abcd")
assertMakeName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -4, "blockinfo", 1234567890123456789) assertMakeName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -4, "blkinfo", "xyzabcdef")
assertMakeName(`fish.chunk._x2y_1aaa`, "fish", -5, "x2y", "1aaa")
assertParseName(`fish.chunk._info..tmp_0000000021`, "fish", -1, "info", 21) assertParseName(`fish.chunk._info_0001`, "fish", -1, "info", "0001")
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", 54321) assertParseName(`fish.chunk._locks_4321`, "fish", -1, "locks", "4321")
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", 0) assertParseName(`fish.chunk._uploads_9abc`, "fish", -1, "uploads", "9abc")
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789`, "fish", -1, "blockinfo", 1234567890123456789) assertParseName(`fish.chunk._blkinfo_xyzabcdef`, "fish", -1, "blkinfo", "xyzabcdef")
assertParseName(`fish.chunk._x2y_1aaa`, "fish", -1, "x2y", "1aaa")
// valid temporary control chunks (old temporary suffix, parse only)
assertParseName(`fish.chunk._info..tmp_0000000047`, "fish", -1, "info", "001b")
assertParseName(`fish.chunk._locks..tmp_0000054321`, "fish", -1, "locks", "15wx")
assertParseName(`fish.chunk._uploads..tmp_0000000000`, "fish", -1, "uploads", "0000")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123`, "fish", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._x2y..tmp_0000000000`, "fish", -1, "x2y", "0000")
// parsing invalid control chunk names // parsing invalid control chunk names
assertParseName(`fish.chunk.info`, "", -1, "", -1) assertParseName(`fish.chunk.metadata`, "", -1, "", "") // must be prepended by underscore
assertParseName(`fish.chunk.locks`, "", -1, "", -1) assertParseName(`fish.chunk.info`, "", -1, "", "")
assertParseName(`fish.chunk.uploads`, "", -1, "", -1) assertParseName(`fish.chunk.locks`, "", -1, "", "")
assertParseName(`fish.chunk.blockinfo`, "", -1, "", -1) assertParseName(`fish.chunk.uploads`, "", -1, "", "")
assertParseName(`fish.chunk._os`, "", -1, "", -1) assertParseName(`fish.chunk._os`, "", -1, "", "") // too short
assertParseName(`fish.chunk._futuredata`, "", -1, "", -1) assertParseName(`fish.chunk._metadata`, "", -1, "", "") // too long
assertParseName(`fish.chunk._me_ta`, "", -1, "", -1) assertParseName(`fish.chunk._blockinfo`, "", -1, "", "") // way too long
assertParseName(`fish.chunk._in-fo`, "", -1, "", -1) assertParseName(`fish.chunk._4me`, "", -1, "", "") // cannot start with digit
assertParseName(`fish.chunk._.bin`, "", -1, "", -1) assertParseName(`fish.chunk._567`, "", -1, "", "") // cannot be all digits
assertParseName(`fish.chunk._me_ta`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._in-fo`, "", -1, "", "")
assertParseName(`fish.chunk._.bin`, "", -1, "", "")
assertParseName(`fish.chunk._.2xy`, "", -1, "", "")
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", -1) // parsing invalid temporary control chunks
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", -1) assertParseName(`fish.chunk._blkinfo1234`, "", -1, "", "") // missing underscore delimiter
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", -1) assertParseName(`fish.chunk._info__1234`, "", -1, "", "") // extra underscore delimiter
assertParseName(`fish.chunk._info_123`, "", -1, "", "") // too short temporary suffix
assertParseName(`fish.chunk._info_1234567890`, "", -1, "", "") // too long temporary suffix
assertParseName(`fish.chunk._info_-1234`, "", -1, "", "") // temporary suffix must be positive
assertParseName(`fish.chunk._info_123E`, "", -1, "", "") // uppercase not allowed
assertParseName(`fish.chunk._info_12.3`, "", -1, "", "") // punctuation not allowed
assertParseName(`fish.chunk._locks..tmp_123456789`, "", -1, "", "")
assertParseName(`fish.chunk._meta..tmp_-1`, "", -1, "", "")
assertParseName(`fish.chunk._blockinfo..tmp_012345678901234567890123456789`, "", -1, "", "")
// short control chunk names: 3 letters ok, 1-2 letters not allowed // short control chunk names: 3 letters ok, 1-2 letters not allowed
assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", -1) assertMakeName(`fish.chunk._ext`, "fish", -1, "ext", "")
assertMakeName(`fish.chunk._ext..tmp_0000000021`, "fish", -1, "ext", 21) assertParseName(`fish.chunk._int`, "fish", -1, "int", "")
assertParseName(`fish.chunk._int`, "fish", -1, "int", -1)
assertParseName(`fish.chunk._int..tmp_0000000021`, "fish", -1, "int", 21) assertMakeNamePanics("fish", -1, "in", "")
assertMakeNamePanics("fish", -1, "in", -1) assertMakeNamePanics("fish", -1, "up", "4")
assertMakeNamePanics("fish", -1, "up", 4) assertMakeNamePanics("fish", -1, "x", "")
assertMakeNamePanics("fish", -1, "x", -1) assertMakeNamePanics("fish", -1, "c", "1z")
assertMakeNamePanics("fish", -1, "c", 4)
assertMakeName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0")
assertMakeName(`fish.chunk._ext_0026`, "fish", -1, "ext", "26")
assertMakeName(`fish.chunk._int_0abc`, "fish", -1, "int", "abc")
assertMakeName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertMakeName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._ext_0000`, "fish", -1, "ext", "0000")
assertParseName(`fish.chunk._ext_0026`, "fish", -1, "ext", "0026")
assertParseName(`fish.chunk._int_0abc`, "fish", -1, "int", "0abc")
assertParseName(`fish.chunk._int_9xyz`, "fish", -1, "int", "9xyz")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
assertParseName(`fish.chunk._out_jj5fvo3wr`, "fish", -1, "out", "jj5fvo3wr")
// base file name can sometimes look like a valid chunk name // base file name can sometimes look like a valid chunk name
assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", -1) assertParseName(`fish.chunk.003.chunk.004`, "fish.chunk.003", 2, "", "")
assertParseName(`fish.chunk.003.chunk.005..tmp_0000000021`, "fish.chunk.003", 3, "", 21) assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", "")
assertParseName(`fish.chunk.003.chunk._info`, "fish.chunk.003", -1, "info", -1) assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.003.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.003", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk.003.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", -1) assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000021`, "fish.chunk.004..tmp_0000000021", 3, "", 21) assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", -1) assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk.004..tmp_0000000021", -1, "blockinfo", 1234567890123456789)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", -1)
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", -1)
assertParseName(`fish.chunk._info.chunk.004`, "fish.chunk._info", 2, "", -1) // base file name looking like a valid chunk name (old temporary suffix)
assertParseName(`fish.chunk._info.chunk.005..tmp_0000000021`, "fish.chunk._info", 3, "", 21) assertParseName(`fish.chunk.003.chunk.005..tmp_0000000022`, "fish.chunk.003", 3, "", "000m")
assertParseName(`fish.chunk._info.chunk._info`, "fish.chunk._info", -1, "info", -1) assertParseName(`fish.chunk.003.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._info", -1, "blockinfo", 1234567890123456789) assertParseName(`fish.chunk._info.chunk.005..tmp_0000000023`, "fish.chunk._info", 3, "", "000n")
assertParseName(`fish.chunk._info.chunk._info.chunk._Meta`, "", -1, "", -1) assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._info.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blockinfo..tmp_1234567890123456789", 2, "", -1) assertParseName(`fish.chunk.003.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.003", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk.005..tmp_0000000021`, "fish.chunk._blockinfo..tmp_1234567890123456789", 3, "", 21) assertParseName(`fish.chunk._info.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._info", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "info", -1)
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._blockinfo..tmp_1234567890123456789`, "fish.chunk._blockinfo..tmp_1234567890123456789", -1, "blockinfo", 1234567890123456789) assertParseName(`fish.chunk.004..tmp_0000000021.chunk.004`, "fish.chunk.004..tmp_0000000021", 2, "", "")
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", -1) assertParseName(`fish.chunk.004..tmp_0000000021.chunk.005..tmp_0000000025`, "fish.chunk.004..tmp_0000000021", 3, "", "000p")
assertParseName(`fish.chunk._blockinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", -1) assertParseName(`fish.chunk.004..tmp_0000000021.chunk._info`, "fish.chunk.004..tmp_0000000021", -1, "info", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._blkinfo..tmp_9994567890123`, "fish.chunk.004..tmp_0000000021", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk.004..tmp_0000000021.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.004`, "fish.chunk._blkinfo..tmp_9994567890123", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk.005..tmp_0000000026`, "fish.chunk._blkinfo..tmp_9994567890123", 3, "", "000q")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_9994567890123", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_9994567890123.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.004`, "fish.chunk._blkinfo..tmp_1234567890123456789", 2, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk.005..tmp_0000000022`, "fish.chunk._blkinfo..tmp_1234567890123456789", 3, "", "000m")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "info", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._blkinfo..tmp_9994567890123`, "fish.chunk._blkinfo..tmp_1234567890123456789", -1, "blkinfo", "3jjfvo3wr")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._Meta`, "", -1, "", "")
assertParseName(`fish.chunk._blkinfo..tmp_1234567890123456789.chunk._info.chunk._x..tmp_0000054321`, "", -1, "", "")
// attempts to make invalid chunk names // attempts to make invalid chunk names
assertMakeNamePanics("fish", -1, "", -1) // neither data nor control assertMakeNamePanics("fish", -1, "", "") // neither data nor control
assertMakeNamePanics("fish", 0, "info", -1) // both data and control assertMakeNamePanics("fish", 0, "info", "") // both data and control
assertMakeNamePanics("fish", -1, "futuredata", -1) // control type too long assertMakeNamePanics("fish", -1, "metadata", "") // control type too long
assertMakeNamePanics("fish", -1, "123", -1) // digits not allowed assertMakeNamePanics("fish", -1, "blockinfo", "") // control type way too long
assertMakeNamePanics("fish", -1, "Meta", -1) // only lower case letters allowed assertMakeNamePanics("fish", -1, "2xy", "") // first digit not allowed
assertMakeNamePanics("fish", -1, "in-fo", -1) // punctuation not allowed assertMakeNamePanics("fish", -1, "123", "") // all digits not allowed
assertMakeNamePanics("fish", -1, "_info", -1) assertMakeNamePanics("fish", -1, "Meta", "") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "info_", -1) assertMakeNamePanics("fish", -1, "in-fo", "") // punctuation not allowed
assertMakeNamePanics("fish", -2, ".bind", -3) assertMakeNamePanics("fish", -1, "_info", "")
assertMakeNamePanics("fish", -2, "bind.", -3) assertMakeNamePanics("fish", -1, "info_", "")
assertMakeNamePanics("fish", -2, ".bind", "")
assertMakeNamePanics("fish", -2, "bind.", "")
assertMakeNamePanics("fish", -1, "", 1) // neither data nor control assertMakeNamePanics("fish", -1, "", "1") // neither data nor control
assertMakeNamePanics("fish", 0, "info", 12) // both data and control assertMakeNamePanics("fish", 0, "info", "23") // both data and control
assertMakeNamePanics("fish", -1, "futuredata", 45) // control type too long assertMakeNamePanics("fish", -1, "metadata", "45") // control type too long
assertMakeNamePanics("fish", -1, "123", 123) // digits not allowed assertMakeNamePanics("fish", -1, "blockinfo", "7") // control type way too long
assertMakeNamePanics("fish", -1, "Meta", 456) // only lower case letters allowed assertMakeNamePanics("fish", -1, "2xy", "abc") // first digit not allowed
assertMakeNamePanics("fish", -1, "in-fo", 321) // punctuation not allowed assertMakeNamePanics("fish", -1, "123", "def") // all digits not allowed
assertMakeNamePanics("fish", -1, "_info", 15678) assertMakeNamePanics("fish", -1, "Meta", "mnk") // only lower case letters allowed
assertMakeNamePanics("fish", -1, "info_", 999) assertMakeNamePanics("fish", -1, "in-fo", "xyz") // punctuation not allowed
assertMakeNamePanics("fish", -2, ".bind", 0) assertMakeNamePanics("fish", -1, "_info", "5678")
assertMakeNamePanics("fish", -2, "bind.", 0) assertMakeNamePanics("fish", -1, "info_", "999")
assertMakeNamePanics("fish", -2, ".bind", "0")
assertMakeNamePanics("fish", -2, "bind.", "0")
assertMakeNamePanics("fish", 0, "", "1234567890") // temporary suffix too long
assertMakeNamePanics("fish", 0, "", "123F4") // uppercase not allowed
assertMakeNamePanics("fish", 0, "", "123.") // punctuation not allowed
assertMakeNamePanics("fish", 0, "", "_123")
} }
func testSmallFileInternals(t *testing.T, f *Fs) { func testSmallFileInternals(t *testing.T, f *Fs) {
@@ -383,7 +469,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
billyObj := newFile("billy") billyObj := newFile("billy")
billyChunkName := func(chunkNo int) string { billyChunkName := func(chunkNo int) string {
return f.makeChunkName(billyObj.Remote(), chunkNo, "", -1) return f.makeChunkName(billyObj.Remote(), chunkNo, "", "")
} }
err := f.Mkdir(ctx, billyChunkName(1)) err := f.Mkdir(ctx, billyChunkName(1))
@@ -433,7 +519,7 @@ func testPreventCorruption(t *testing.T, f *Fs) {
// recreate billy in case it was anyhow corrupted // recreate billy in case it was anyhow corrupted
willyObj := newFile("willy") willyObj := newFile("willy")
willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", -1) willyChunkName := f.makeChunkName(willyObj.Remote(), 1, "", "")
f.opt.FailHard = false f.opt.FailHard = false
willyChunk, err := f.NewObject(ctx, willyChunkName) willyChunk, err := f.NewObject(ctx, willyChunkName)
f.opt.FailHard = true f.opt.FailHard = true
@@ -484,7 +570,7 @@ func testChunkNumberOverflow(t *testing.T, f *Fs) {
f.opt.FailHard = false f.opt.FailHard = false
file, fileName := newFile(f, "wreaker") file, fileName := newFile(f, "wreaker")
wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", -1)) wreak, _ := newFile(f.base, f.makeChunkName("wreaker", wreakNumber, "", ""))
f.opt.FailHard = false f.opt.FailHard = false
fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision()) fstest.CheckListingWithRoot(t, f, dir, nil, nil, f.Precision())
@@ -532,7 +618,7 @@ func testMetadataInput(t *testing.T, f *Fs) {
filename := path.Join(dir, name) filename := path.Join(dir, name)
require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct") require.True(t, len(contents) > 2 && len(contents) < minChunkForTest, description+" test data is correct")
part := putFile(f.base, f.makeChunkName(filename, 0, "", -1), "oops", "", true) part := putFile(f.base, f.makeChunkName(filename, 0, "", ""), "oops", "", true)
_ = putFile(f, filename, contents, "upload "+description, false) _ = putFile(f, filename, contents, "upload "+description, false)
obj, err := f.NewObject(ctx, filename) obj, err := f.NewObject(ctx, filename)

View File

@@ -326,6 +326,17 @@ Photos folder" option in your google drive settings. You can then copy
or move the photos locally and use the date the image was taken or move the photos locally and use the date the image was taken
(created) set as the modification date.`, (created) set as the modification date.`,
Advanced: true, Advanced: true,
}, {
Name: "use_shared_date",
Default: false,
Help: `Use date file was shared instead of modified date.
Note that, as with "--drive-use-created-date", this flag may have
unexpected consequences when uploading/downloading files.
If both this flag and "--drive-use-created-date" are set, the created
date is used.`,
Advanced: true,
}, { }, {
Name: "list_chunk", Name: "list_chunk",
Default: 1000, Default: 1000,
@@ -463,6 +474,7 @@ type Options struct {
ImportExtensions string `config:"import_formats"` ImportExtensions string `config:"import_formats"`
AllowImportNameChange bool `config:"allow_import_name_change"` AllowImportNameChange bool `config:"allow_import_name_change"`
UseCreatedDate bool `config:"use_created_date"` UseCreatedDate bool `config:"use_created_date"`
UseSharedDate bool `config:"use_shared_date"`
ListChunk int64 `config:"list_chunk"` ListChunk int64 `config:"list_chunk"`
Impersonate string `config:"impersonate"` Impersonate string `config:"impersonate"`
AlternateExport bool `config:"alternate_export"` AlternateExport bool `config:"alternate_export"`
@@ -694,6 +706,9 @@ func (f *Fs) list(ctx context.Context, dirIDs []string, title string, directorie
if f.opt.AuthOwnerOnly { if f.opt.AuthOwnerOnly {
fields += ",owners" fields += ",owners"
} }
if f.opt.UseSharedDate {
fields += ",sharedWithMeTime"
}
if f.opt.SkipChecksumGphotos { if f.opt.SkipChecksumGphotos {
fields += ",spaces" fields += ",spaces"
} }
@@ -1095,6 +1110,8 @@ func (f *Fs) newBaseObject(remote string, info *drive.File) baseObject {
modifiedDate := info.ModifiedTime modifiedDate := info.ModifiedTime
if f.opt.UseCreatedDate { if f.opt.UseCreatedDate {
modifiedDate = info.CreatedTime modifiedDate = info.CreatedTime
} else if f.opt.UseSharedDate && info.SharedWithMeTime != "" {
modifiedDate = info.SharedWithMeTime
} }
size := info.Size size := info.Size
if f.opt.SizeAsQuota { if f.opt.SizeAsQuota {
@@ -1463,6 +1480,14 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if iErr != nil { if iErr != nil {
return nil, iErr return nil, iErr
} }
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f.isTeamDrive && len(entries) == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx)
if err != nil {
return nil, err
}
}
return entries, nil return entries, nil
} }
@@ -1600,6 +1625,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
out := make(chan error, fs.Config.Checkers) out := make(chan error, fs.Config.Checkers)
list := walk.NewListRHelper(callback) list := walk.NewListRHelper(callback)
overflow := []listREntry{} overflow := []listREntry{}
listed := 0
cb := func(entry fs.DirEntry) error { cb := func(entry fs.DirEntry) error {
mu.Lock() mu.Lock()
@@ -1612,6 +1638,7 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
overflow = append(overflow, listREntry{d.ID(), d.Remote()}) overflow = append(overflow, listREntry{d.ID(), d.Remote()})
} }
} }
listed++
return list.Add(entry) return list.Add(entry)
} }
@@ -1668,7 +1695,21 @@ func (f *Fs) ListR(ctx context.Context, dir string, callback fs.ListRCallback) (
return err return err
} }
return list.Flush() err = list.Flush()
if err != nil {
return err
}
// If listing the root of a teamdrive and got no entries,
// double check we have access
if f.isTeamDrive && listed == 0 && f.root == "" && dir == "" {
err = f.teamDriveOK(ctx)
if err != nil {
return err
}
}
return nil
} }
// itemToDirEntry converts a drive.File to a fs.DirEntry. // itemToDirEntry converts a drive.File to a fs.DirEntry.
@@ -2041,9 +2082,30 @@ func (f *Fs) CleanUp(ctx context.Context) error {
return nil return nil
} }
// teamDriveOK checks to see if we can access the team drive
func (f *Fs) teamDriveOK(ctx context.Context) (err error) {
if !f.isTeamDrive {
return nil
}
var td *drive.Drive
err = f.pacer.Call(func() (bool, error) {
td, err = f.svc.Drives.Get(f.opt.TeamDriveID).Fields("name,id,capabilities,createdTime,restrictions").Context(ctx).Do()
return shouldRetry(err)
})
if err != nil {
return errors.Wrap(err, "failed to get Team/Shared Drive info")
}
fs.Debugf(f, "read info from team drive %q", td.Name)
return err
}
// About gets quota information // About gets quota information
func (f *Fs) About(ctx context.Context) (*fs.Usage, error) { func (f *Fs) About(ctx context.Context) (*fs.Usage, error) {
if f.isTeamDrive { if f.isTeamDrive {
err := f.teamDriveOK(ctx)
if err != nil {
return nil, err
}
// Teamdrives don't appear to have a usage API so just return empty // Teamdrives don't appear to have a usage API so just return empty
return &fs.Usage{}, nil return &fs.Usage{}, nil
} }

View File

@@ -46,13 +46,26 @@ func (t Time) String() string { return time.Time(t).Format(timeFormat) }
// APIString returns Time string in Jottacloud API format // APIString returns Time string in Jottacloud API format
func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) } func (t Time) APIString() string { return time.Time(t).Format(apiTimeFormat) }
// LoginToken is struct representing the login token generated in the WebUI
type LoginToken struct {
Username string `json:"username"`
Realm string `json:"realm"`
WellKnownLink string `json:"well_known_link"`
AuthToken string `json:"auth_token"`
}
// TokenJSON is the struct representing the HTTP response from OAuth2 // TokenJSON is the struct representing the HTTP response from OAuth2
// providers returning a token in JSON form. // providers returning a token in JSON form.
type TokenJSON struct { type TokenJSON struct {
AccessToken string `json:"access_token"` AccessToken string `json:"access_token"`
TokenType string `json:"token_type"` ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number
RefreshToken string `json:"refresh_token"` RefreshExpiresIn int32 `json:"refresh_expires_in"`
ExpiresIn int32 `json:"expires_in"` // at least PayPal returns string, while most return number RefreshToken string `json:"refresh_token"`
TokenType string `json:"token_type"`
IDToken string `json:"id_token"`
NotBeforePolicy int32 `json:"not-before-policy"`
SessionState string `json:"session_state"`
Scope string `json:"scope"`
} }
// JSON structures returned by new API // JSON structures returned by new API

View File

@@ -4,12 +4,13 @@ import (
"bytes" "bytes"
"context" "context"
"crypto/md5" "crypto/md5"
"encoding/base64"
"encoding/hex" "encoding/hex"
"encoding/json"
"fmt" "fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"log" "log"
"math/rand"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
@@ -25,7 +26,6 @@ import (
"github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/config"
"github.com/rclone/rclone/fs/config/configmap" "github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/config/configstruct" "github.com/rclone/rclone/fs/config/configstruct"
"github.com/rclone/rclone/fs/config/obscure"
"github.com/rclone/rclone/fs/encodings" "github.com/rclone/rclone/fs/encodings"
"github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fserrors"
"github.com/rclone/rclone/fs/fshttp" "github.com/rclone/rclone/fs/fshttp"
@@ -41,29 +41,25 @@ const enc = encodings.JottaCloud
// Globals // Globals
const ( const (
minSleep = 10 * time.Millisecond minSleep = 10 * time.Millisecond
maxSleep = 2 * time.Second maxSleep = 2 * time.Second
decayConstant = 2 // bigger for slower decay, exponential decayConstant = 2 // bigger for slower decay, exponential
defaultDevice = "Jotta" defaultDevice = "Jotta"
defaultMountpoint = "Archive" defaultMountpoint = "Archive"
rootURL = "https://www.jottacloud.com/jfs/" rootURL = "https://www.jottacloud.com/jfs/"
apiURL = "https://api.jottacloud.com/" apiURL = "https://api.jottacloud.com/"
baseURL = "https://www.jottacloud.com/" baseURL = "https://www.jottacloud.com/"
tokenURL = "https://api.jottacloud.com/auth/v1/token" tokenURL = "https://id.jottacloud.com/auth/realms/jottacloud/protocol/openid-connect/token"
registerURL = "https://api.jottacloud.com/auth/v1/register" cachePrefix = "rclone-jcmd5-"
cachePrefix = "rclone-jcmd5-" configDevice = "device"
rcloneClientID = "nibfk8biu12ju7hpqomr8b1e40" configMountpoint = "mountpoint"
rcloneEncryptedClientSecret = "Vp8eAv7eVElMnQwN-kgU9cbhgApNDaMqWdlDi5qFydlQoji4JBxrGMF2" configVersion = 1
configClientID = "client_id"
configClientSecret = "client_secret"
configDevice = "device"
configMountpoint = "mountpoint"
charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
) )
var ( var (
// Description of how to auth for this app for a personal account // Description of how to auth for this app for a personal account
oauthConfig = &oauth2.Config{ oauthConfig = &oauth2.Config{
ClientID: "jottacli",
Endpoint: oauth2.Endpoint{ Endpoint: oauth2.Endpoint{
AuthURL: tokenURL, AuthURL: tokenURL,
TokenURL: tokenURL, TokenURL: tokenURL,
@@ -81,43 +77,39 @@ func init() {
NewFs: NewFs, NewFs: NewFs,
Config: func(name string, m configmap.Mapper) { Config: func(name string, m configmap.Mapper) {
ctx := context.TODO() ctx := context.TODO()
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm(false) {
return
}
}
srv := rest.NewClient(fshttp.NewClient(fs.Config)) refresh := false
fmt.Printf("\nDo you want to create a machine specific API key?\n\nRclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.\n\n") if version, ok := m.Get("configVersion"); ok {
if config.Confirm(false) { ver, err := strconv.Atoi(version)
deviceRegistration, err := registerDevice(ctx, srv)
if err != nil { if err != nil {
log.Fatalf("Failed to register device: %v", err) log.Fatalf("Failed to parse config version - corrupted config")
} }
refresh = ver != configVersion
m.Set(configClientID, deviceRegistration.ClientID) } else {
m.Set(configClientSecret, obscure.MustObscure(deviceRegistration.ClientSecret)) refresh = true
fs.Debugf(nil, "Got clientID '%s' and clientSecret '%s'", deviceRegistration.ClientID, deviceRegistration.ClientSecret)
} }
clientID, ok := m.Get(configClientID) if refresh {
if !ok { fmt.Printf("Config outdated - refreshing\n")
clientID = rcloneClientID } else {
tokenString, ok := m.Get("token")
if ok && tokenString != "" {
fmt.Printf("Already have a token - refresh?\n")
if !config.Confirm(false) {
return
}
}
} }
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
fmt.Printf("Username> ") clientConfig := *fs.Config
username := config.ReadLine() clientConfig.UserAgent = "JottaCli 0.6.18626 windows-amd64"
password := config.GetPassword("Your Jottacloud password is only required during setup and will not be stored.") srv := rest.NewClient(fshttp.NewClient(&clientConfig))
token, err := doAuth(ctx, srv, username, password) fmt.Printf("Generate a personal login token here: https://www.jottacloud.com/web/secure\n")
fmt.Printf("Login Token> ")
loginToken := config.ReadLine()
token, err := doAuth(ctx, srv, loginToken)
if err != nil { if err != nil {
log.Fatalf("Failed to get oauth token: %s", err) log.Fatalf("Failed to get oauth token: %s", err)
} }
@@ -143,6 +135,8 @@ func init() {
m.Set(configDevice, device) m.Set(configDevice, device)
m.Set(configMountpoint, mountpoint) m.Set(configMountpoint, mountpoint)
} }
m.Set("configVersion", strconv.Itoa(configVersion))
}, },
Options: []fs.Option{{ Options: []fs.Option{{
Name: "md5_memory_limit", Name: "md5_memory_limit",
@@ -249,67 +243,51 @@ func shouldRetry(resp *http.Response, err error) (bool, error) {
return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err return fserrors.ShouldRetry(err) || fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
} }
// registerDevice register a new device for use with the jottacloud API
func registerDevice(ctx context.Context, srv *rest.Client) (reg *api.DeviceRegistrationResponse, err error) {
// random generator to generate random device names
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
randonDeviceNamePartLength := 21
randomDeviceNamePart := make([]byte, randonDeviceNamePartLength)
for i := range randomDeviceNamePart {
randomDeviceNamePart[i] = charset[seededRand.Intn(len(charset))]
}
randomDeviceName := "rclone-" + string(randomDeviceNamePart)
fs.Debugf(nil, "Trying to register device '%s'", randomDeviceName)
values := url.Values{}
values.Set("device_id", randomDeviceName)
opts := rest.Opts{
Method: "POST",
RootURL: registerURL,
ContentType: "application/x-www-form-urlencoded",
ExtraHeaders: map[string]string{"Authorization": "Bearer c2xrZmpoYWRsZmFramhkc2xma2phaHNkbGZramhhc2xkZmtqaGFzZGxrZmpobGtq"},
Parameters: values,
}
var deviceRegistration *api.DeviceRegistrationResponse
_, err = srv.CallJSON(ctx, &opts, nil, &deviceRegistration)
return deviceRegistration, err
}
// doAuth runs the actual token request // doAuth runs the actual token request
func doAuth(ctx context.Context, srv *rest.Client, username, password string) (token oauth2.Token, err error) { func doAuth(ctx context.Context, srv *rest.Client, loginTokenBase64 string) (token oauth2.Token, err error) {
loginTokenBytes, err := base64.StdEncoding.DecodeString(loginTokenBase64)
if err != nil {
return token, err
}
var loginToken api.LoginToken
decoder := json.NewDecoder(bytes.NewReader(loginTokenBytes))
err = decoder.Decode(&loginToken)
if err != nil {
return token, err
}
// we don't seem to need any data from this link but the API is not happy if skip it
opts := rest.Opts{
Method: "GET",
RootURL: loginToken.WellKnownLink,
NoResponse: true,
}
_, err = srv.Call(ctx, &opts)
if err != nil {
return token, err
}
// prepare out token request with username and password // prepare out token request with username and password
values := url.Values{} values := url.Values{}
values.Set("grant_type", "PASSWORD") values.Set("client_id", "jottacli")
values.Set("password", password) values.Set("grant_type", "password")
values.Set("username", username) values.Set("password", loginToken.AuthToken)
values.Set("client_id", oauthConfig.ClientID) values.Set("scope", "offline_access+openid")
values.Set("client_secret", oauthConfig.ClientSecret) values.Set("username", loginToken.Username)
opts := rest.Opts{ values.Encode()
opts = rest.Opts{
Method: "POST", Method: "POST",
RootURL: oauthConfig.Endpoint.AuthURL, RootURL: oauthConfig.Endpoint.AuthURL,
ContentType: "application/x-www-form-urlencoded", ContentType: "application/x-www-form-urlencoded",
Parameters: values, Body: strings.NewReader(values.Encode()),
} }
// do the first request // do the first request
var jsonToken api.TokenJSON var jsonToken api.TokenJSON
resp, err := srv.CallJSON(ctx, &opts, nil, &jsonToken) _, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
if err != nil { if err != nil {
// if 2fa is enabled the first request is expected to fail. We will do another request with the 2fa code as an additional http header return token, err
if resp != nil {
if resp.Header.Get("X-JottaCloud-OTP") == "required; SMS" {
fmt.Printf("This account uses 2 factor authentication you will receive a verification code via SMS.\n")
fmt.Printf("Enter verification code> ")
authCode := config.ReadLine()
authCode = strings.Replace(authCode, "-", "", -1) // remove any "-" contained in the code so we have a 6 digit number
opts.ExtraHeaders = make(map[string]string)
opts.ExtraHeaders["X-Jottacloud-Otp"] = authCode
resp, err = srv.CallJSON(ctx, &opts, nil, &jsonToken)
}
}
} }
token.AccessToken = jsonToken.AccessToken token.AccessToken = jsonToken.AccessToken
@@ -471,29 +449,6 @@ func (f *Fs) filePath(file string) string {
return urlPathEscape(f.filePathRaw(file)) return urlPathEscape(f.filePathRaw(file))
} }
// Jottacloud requires the grant_type 'refresh_token' string
// to be uppercase and throws a 400 Bad Request if we use the
// lower case used by the oauth2 module
//
// This filter catches all refresh requests, reads the body,
// changes the case and then sends it on
func grantTypeFilter(req *http.Request) {
if tokenURL == req.URL.String() {
// read the entire body
refreshBody, err := ioutil.ReadAll(req.Body)
if err != nil {
return
}
_ = req.Body.Close()
// make the refresh token upper case
refreshBody = []byte(strings.Replace(string(refreshBody), "grant_type=refresh_token", "grant_type=REFRESH_TOKEN", 1))
// set the new ReadCloser (with a dummy Close())
req.Body = ioutil.NopCloser(bytes.NewReader(refreshBody))
}
}
// NewFs constructs an Fs from the path, container:path // NewFs constructs an Fs from the path, container:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) { func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
ctx := context.TODO() ctx := context.TODO()
@@ -504,30 +459,23 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
return nil, err return nil, err
} }
var ok bool
var version string
if version, ok = m.Get("configVersion"); ok {
ver, err := strconv.Atoi(version)
if err != nil {
return nil, errors.New("Failed to parse config version")
}
ok = ver == configVersion
}
if !ok {
return nil, errors.New("Outdated config - please reconfigure this backend")
}
rootIsDir := strings.HasSuffix(root, "/") rootIsDir := strings.HasSuffix(root, "/")
root = parsePath(root) root = parsePath(root)
clientID, ok := m.Get(configClientID)
if !ok {
clientID = rcloneClientID
}
clientSecret, ok := m.Get(configClientSecret)
if !ok {
clientSecret = rcloneEncryptedClientSecret
}
oauthConfig.ClientID = clientID
oauthConfig.ClientSecret = obscure.MustReveal(clientSecret)
// the oauth client for the api servers needs
// a filter to fix the grant_type issues (see above)
baseClient := fshttp.NewClient(fs.Config) baseClient := fshttp.NewClient(fs.Config)
if do, ok := baseClient.Transport.(interface {
SetRequestFilter(f func(req *http.Request))
}); ok {
do.SetRequestFilter(grantTypeFilter)
} else {
fs.Debugf(name+":", "Couldn't add request filter - uploads will fail")
}
oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient) oAuthClient, ts, err := oauthutil.NewClientWithBaseClient(name, m, oauthConfig, baseClient)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client") return nil, errors.Wrap(err, "Failed to configure Jottacloud oauth client")

View File

@@ -820,10 +820,10 @@ func (file *localOpenFile) Read(p []byte) (n int, err error) {
return 0, errors.Wrap(err, "can't read status of source file while transferring") return 0, errors.Wrap(err, "can't read status of source file while transferring")
} }
if file.o.size != fi.Size() { if file.o.size != fi.Size() {
return 0, errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size()) return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (size changed from %d to %d)", file.o.size, fi.Size()))
} }
if !file.o.modTime.Equal(fi.ModTime()) { if !file.o.modTime.Equal(fi.ModTime()) {
return 0, errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime()) return 0, fserrors.NoLowLevelRetryError(errors.Errorf("can't copy - source file is being updated (mod time changed from %v to %v)", file.o.modTime, fi.ModTime()))
} }
} }

View File

@@ -715,6 +715,16 @@ file you can stream upload is 48GB. If you wish to stream upload
larger files then you will need to increase chunk_size.`, larger files then you will need to increase chunk_size.`,
Default: minChunkSize, Default: minChunkSize,
Advanced: true, Advanced: true,
}, {
Name: "copy_cutoff",
Help: `Cutoff for switching to multipart copy
Any files larger than this that need to be server side copied will be
copied in chunks of this size.
The minimum is 0 and the maximum is 5GB.`,
Default: fs.SizeSuffix(maxSizeForCopy),
Advanced: true,
}, { }, {
Name: "disable_checksum", Name: "disable_checksum",
Help: "Don't store MD5 checksum with object metadata", Help: "Don't store MD5 checksum with object metadata",
@@ -809,6 +819,7 @@ type Options struct {
SSEKMSKeyID string `config:"sse_kms_key_id"` SSEKMSKeyID string `config:"sse_kms_key_id"`
StorageClass string `config:"storage_class"` StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"` UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
CopyCutoff fs.SizeSuffix `config:"copy_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"` ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableChecksum bool `config:"disable_checksum"` DisableChecksum bool `config:"disable_checksum"`
SessionToken string `config:"session_token"` SessionToken string `config:"session_token"`
@@ -1387,6 +1398,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
} else { } else {
marker = resp.NextMarker marker = resp.NextMarker
} }
if urlEncodeListings {
*marker, err = url.QueryUnescape(*marker)
if err != nil {
return errors.Wrapf(err, "failed to URL decode NextMarker %q", *marker)
}
}
} }
return nil return nil
} }
@@ -1653,7 +1670,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
req.StorageClass = &f.opt.StorageClass req.StorageClass = &f.opt.StorageClass
} }
if srcSize >= int64(f.opt.UploadCutoff) { if srcSize >= int64(f.opt.CopyCutoff) {
return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize) return f.copyMultipart(ctx, req, dstBucket, dstPath, srcBucket, srcPath, srcSize)
} }
return f.pacer.Call(func() (bool, error) { return f.pacer.Call(func() (bool, error) {
@@ -1666,8 +1683,8 @@ func calculateRange(partSize, partIndex, numParts, totalSize int64) string {
start := partIndex * partSize start := partIndex * partSize
var ends string var ends string
if partIndex == numParts-1 { if partIndex == numParts-1 {
if totalSize >= 0 { if totalSize >= 1 {
ends = strconv.FormatInt(totalSize, 10) ends = strconv.FormatInt(totalSize-1, 10)
} }
} else { } else {
ends = strconv.FormatInt(start+partSize-1, 10) ends = strconv.FormatInt(start+partSize-1, 10)
@@ -1704,7 +1721,7 @@ func (f *Fs) copyMultipart(ctx context.Context, req *s3.CopyObjectInput, dstBuck
} }
}() }()
partSize := int64(f.opt.ChunkSize) partSize := int64(f.opt.CopyCutoff)
numParts := (srcSize-1)/partSize + 1 numParts := (srcSize-1)/partSize + 1
var parts []*s3.CompletedPart var parts []*s3.CompletedPart
@@ -1932,11 +1949,6 @@ func (o *Object) SetModTime(ctx context.Context, modTime time.Time) error {
} }
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime)) o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
if o.bytes >= maxSizeForCopy {
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
return nil
}
// Can't update metadata here, so return this error to force a recopy // Can't update metadata here, so return this error to force a recopy
if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" { if o.storageClass == "GLACIER" || o.storageClass == "DEEP_ARCHIVE" {
return fs.ErrorCantSetModTime return fs.ErrorCantSetModTime
@@ -2040,7 +2052,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// read the md5sum if available for non multpart and if // read the md5sum if available for non multpart and if
// disable checksum isn't present. // disable checksum isn't present.
var md5sum string var md5sum string
if !multipart || !o.fs.opt.DisableChecksum { if !multipart && !o.fs.opt.DisableChecksum {
hash, err := src.Hash(ctx, hash.MD5) hash, err := src.Hash(ctx, hash.MD5)
if err == nil && matchMd5.MatchString(hash) { if err == nil && matchMd5.MatchString(hash) {
hashBytes, err := hex.DecodeString(hash) hashBytes, err := hex.DecodeString(hash)

View File

@@ -156,6 +156,11 @@ Home directory can be found in a shared folder called "home"
Default: "", Default: "",
Help: "The command used to read sha1 hashes. Leave blank for autodetect.", Help: "The command used to read sha1 hashes. Leave blank for autodetect.",
Advanced: true, Advanced: true,
}, {
Name: "skip_links",
Default: false,
Help: "Set to skip any symlinks and any other non regular files.",
Advanced: true,
}}, }},
} }
fs.Register(fsi) fs.Register(fsi)
@@ -177,6 +182,7 @@ type Options struct {
SetModTime bool `config:"set_modtime"` SetModTime bool `config:"set_modtime"`
Md5sumCommand string `config:"md5sum_command"` Md5sumCommand string `config:"md5sum_command"`
Sha1sumCommand string `config:"sha1sum_command"` Sha1sumCommand string `config:"sha1sum_command"`
SkipLinks bool `config:"skip_links"`
} }
// Fs stores the interface to the remote SFTP files // Fs stores the interface to the remote SFTP files
@@ -600,12 +606,16 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
remote := path.Join(dir, info.Name()) remote := path.Join(dir, info.Name())
// If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to // If file is a symlink (not a regular file is the best cross platform test we can do), do a stat to
// pick up the size and type of the destination, instead of the size and type of the symlink. // pick up the size and type of the destination, instead of the size and type of the symlink.
if !info.Mode().IsRegular() { if !info.Mode().IsRegular() && !info.IsDir() {
if f.opt.SkipLinks {
// skip non regular file if SkipLinks is set
continue
}
oldInfo := info oldInfo := info
info, err = f.stat(remote) info, err = f.stat(remote)
if err != nil { if err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
fs.Errorf(remote, "stat of non-regular file/dir failed: %v", err) fs.Errorf(remote, "stat of non-regular file failed: %v", err)
} }
info = oldInfo info = oldInfo
} }

View File

@@ -7,6 +7,7 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"net/url"
"path" "path"
"strconv" "strconv"
"strings" "strings"
@@ -952,8 +953,8 @@ func (o *Object) isStaticLargeObject() (bool, error) {
return o.hasHeader("X-Static-Large-Object") return o.hasHeader("X-Static-Large-Object")
} }
func (o *Object) isInContainerVersioning() (bool, error) { func (o *Object) isInContainerVersioning(container string) (bool, error) {
_, headers, err := o.fs.c.Container(o.fs.root) _, headers, err := o.fs.c.Container(container)
if err != nil { if err != nil {
return false, err return false, err
} }
@@ -1130,6 +1131,10 @@ func (o *Object) getSegmentsDlo() (segmentsContainer string, prefix string, err
return return
} }
dirManifest := o.headers["X-Object-Manifest"] dirManifest := o.headers["X-Object-Manifest"]
dirManifest, err = url.PathUnescape(dirManifest)
if err != nil {
return
}
delimiter := strings.Index(dirManifest, "/") delimiter := strings.Index(dirManifest, "/")
if len(dirManifest) == 0 || delimiter < 0 { if len(dirManifest) == 0 || delimiter < 0 {
err = errors.New("Missing or wrong structure of manifest of Dynamic large object") err = errors.New("Missing or wrong structure of manifest of Dynamic large object")
@@ -1341,7 +1346,7 @@ func (o *Object) Remove(ctx context.Context) (err error) {
} }
// ...then segments if required // ...then segments if required
if isDynamicLargeObject { if isDynamicLargeObject {
isInContainerVersioning, err := o.isInContainerVersioning() isInContainerVersioning, err := o.isInContainerVersioning(container)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -113,7 +113,8 @@ type Fs struct {
canStream bool // set if can stream canStream bool // set if can stream
useOCMtime bool // set if can use X-OC-Mtime useOCMtime bool // set if can use X-OC-Mtime
retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default) retryWithZeroDepth bool // some vendors (sharepoint) won't list files when Depth is 1 (our default)
hasChecksums bool // set if can use owncloud style checksums hasMD5 bool // set if can use owncloud style checksums for MD5
hasSHA1 bool // set if can use owncloud style checksums for SHA1
} }
// Object describes a webdav object // Object describes a webdav object
@@ -215,7 +216,7 @@ func (f *Fs) readMetaDataForPath(ctx context.Context, path string, depth string)
}, },
NoRedirect: true, NoRedirect: true,
} }
if f.hasChecksums { if f.hasMD5 || f.hasSHA1 {
opts.Body = bytes.NewBuffer(owncloudProps) opts.Body = bytes.NewBuffer(owncloudProps)
} }
var result api.Multistatus var result api.Multistatus
@@ -383,7 +384,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// sets the BearerToken up // sets the BearerToken up
func (f *Fs) setBearerToken(token string) { func (f *Fs) setBearerToken(token string) {
f.opt.BearerToken = token f.opt.BearerToken = token
f.srv.SetHeader("Authorization", "BEARER "+token) f.srv.SetHeader("Authorization", "Bearer "+token)
} }
// fetch the bearer token using the command // fetch the bearer token using the command
@@ -430,11 +431,12 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
f.canStream = true f.canStream = true
f.precision = time.Second f.precision = time.Second
f.useOCMtime = true f.useOCMtime = true
f.hasChecksums = true f.hasMD5 = true
f.hasSHA1 = true
case "nextcloud": case "nextcloud":
f.precision = time.Second f.precision = time.Second
f.useOCMtime = true f.useOCMtime = true
f.hasChecksums = true f.hasSHA1 = true
case "sharepoint": case "sharepoint":
// To mount sharepoint, two Cookies are required // To mount sharepoint, two Cookies are required
// They have to be set instead of BasicAuth // They have to be set instead of BasicAuth
@@ -536,7 +538,7 @@ func (f *Fs) listAll(ctx context.Context, dir string, directoriesOnly bool, file
"Depth": depth, "Depth": depth,
}, },
} }
if f.hasChecksums { if f.hasMD5 || f.hasSHA1 {
opts.Body = bytes.NewBuffer(owncloudProps) opts.Body = bytes.NewBuffer(owncloudProps)
} }
var result api.Multistatus var result api.Multistatus
@@ -945,10 +947,14 @@ func (f *Fs) DirMove(ctx context.Context, src fs.Fs, srcRemote, dstRemote string
// Hashes returns the supported hash sets. // Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set { func (f *Fs) Hashes() hash.Set {
if f.hasChecksums { hashes := hash.Set(hash.None)
return hash.NewHashSet(hash.MD5, hash.SHA1) if f.hasMD5 {
hashes.Add(hash.MD5)
} }
return hash.Set(hash.None) if f.hasSHA1 {
hashes.Add(hash.SHA1)
}
return hashes
} }
// About gets quota information // About gets quota information
@@ -1015,13 +1021,11 @@ func (o *Object) Remote() string {
// Hash returns the SHA1 or MD5 of an object returning a lowercase hex string // Hash returns the SHA1 or MD5 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) { func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if o.fs.hasChecksums { if t == hash.MD5 && o.fs.hasMD5 {
switch t { return o.md5, nil
case hash.SHA1: }
return o.sha1, nil if t == hash.SHA1 && o.fs.hasSHA1 {
case hash.MD5: return o.sha1, nil
return o.md5, nil
}
} }
return "", hash.ErrUnsupported return "", hash.ErrUnsupported
} }
@@ -1042,10 +1046,14 @@ func (o *Object) setMetaData(info *api.Prop) (err error) {
o.hasMetaData = true o.hasMetaData = true
o.size = info.Size o.size = info.Size
o.modTime = time.Time(info.Modified) o.modTime = time.Time(info.Modified)
if o.fs.hasChecksums { if o.fs.hasMD5 || o.fs.hasSHA1 {
hashes := info.Hashes() hashes := info.Hashes()
o.sha1 = hashes[hash.SHA1] if o.fs.hasSHA1 {
o.md5 = hashes[hash.MD5] o.sha1 = hashes[hash.SHA1]
}
if o.fs.hasMD5 {
o.md5 = hashes[hash.MD5]
}
} }
return nil return nil
} }
@@ -1126,19 +1134,21 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365 ContentLength: &size, // FIXME this isn't necessary with owncloud - See https://github.com/nextcloud/nextcloud-snap/issues/365
ContentType: fs.MimeType(ctx, src), ContentType: fs.MimeType(ctx, src),
} }
if o.fs.useOCMtime || o.fs.hasChecksums { if o.fs.useOCMtime || o.fs.hasMD5 || o.fs.hasSHA1 {
opts.ExtraHeaders = map[string]string{} opts.ExtraHeaders = map[string]string{}
if o.fs.useOCMtime { if o.fs.useOCMtime {
opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9) opts.ExtraHeaders["X-OC-Mtime"] = fmt.Sprintf("%f", float64(src.ModTime(ctx).UnixNano())/1e9)
} }
if o.fs.hasChecksums { // Set one upload checksum
// Set an upload checksum - prefer SHA1 // Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5
// // Nextcloud stores the checksum you supply (SHA1 or MD5) but only stores one
// This is used as an upload integrity test. If we set if o.fs.hasSHA1 {
// only SHA1 here, owncloud will calculate the MD5 too.
if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" { if sha1, _ := src.Hash(ctx, hash.SHA1); sha1 != "" {
opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1 opts.ExtraHeaders["OC-Checksum"] = "SHA1:" + sha1
} else if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" { }
}
if o.fs.hasMD5 && opts.ExtraHeaders["OC-Checksum"] == "" {
if md5, _ := src.Hash(ctx, hash.MD5); md5 != "" {
opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5 opts.ExtraHeaders["OC-Checksum"] = "MD5:" + md5
} }
} }

View File

@@ -46,10 +46,11 @@ __rclone_custom_func() {
else else
__rclone_init_completion -n : || return __rclone_init_completion -n : || return
fi fi
local rclone=(command rclone --ask-password=false)
if [[ $cur != *:* ]]; then if [[ $cur != *:* ]]; then
local ifs=$IFS local ifs=$IFS
IFS=$'\n' IFS=$'\n'
local remotes=($(command rclone listremotes)) local remotes=($("${rclone[@]}" listremotes 2> /dev/null))
IFS=$ifs IFS=$ifs
local remote local remote
for remote in "${remotes[@]}"; do for remote in "${remotes[@]}"; do
@@ -68,7 +69,7 @@ __rclone_custom_func() {
fi fi
local ifs=$IFS local ifs=$IFS
IFS=$'\n' IFS=$'\n'
local lines=($(rclone lsf "${cur%%:*}:$prefix" 2>/dev/null)) local lines=($("${rclone[@]}" lsf "${cur%%:*}:$prefix" 2> /dev/null))
IFS=$ifs IFS=$ifs
local line local line
for line in "${lines[@]}"; do for line in "${lines[@]}"; do

View File

@@ -263,7 +263,7 @@ Contributors
* garry415 <garry.415@gmail.com> * garry415 <garry.415@gmail.com>
* forgems <forgems@gmail.com> * forgems <forgems@gmail.com>
* Florian Apolloner <florian@apolloner.eu> * Florian Apolloner <florian@apolloner.eu>
* Aleksandar Jankovic <office@ajankovic.com> * Aleksandar Janković <office@ajankovic.com> <ajankovic@users.noreply.github.com>
* Maran <maran@protonmail.com> * Maran <maran@protonmail.com>
* nguyenhuuluan434 <nguyenhuuluan434@gmail.com> * nguyenhuuluan434 <nguyenhuuluan434@gmail.com>
* Laura Hausmann <zotan@zotan.pw> <laura@hausmann.dev> * Laura Hausmann <zotan@zotan.pw> <laura@hausmann.dev>
@@ -313,3 +313,6 @@ Contributors
* Marco Molteni <marco.molteni@mailbox.org> * Marco Molteni <marco.molteni@mailbox.org>
* Ankur Gupta <ankur0493@gmail.com> * Ankur Gupta <ankur0493@gmail.com>
* Maciej Zimnoch <maciej@scylladb.com> * Maciej Zimnoch <maciej@scylladb.com>
* anuar45 <serdaliyev.anuar@gmail.com>
* Fernando <ferferga@users.noreply.github.com>
* David Cole <david.cole@sohonet.com>

View File

@@ -130,10 +130,10 @@ error message in such cases.
#### Chunk names #### Chunk names
The default chunk name format is `*.rclone-chunk.###`, hence by default The default chunk name format is `*.rclone_chunk.###`, hence by default
chunk names are `BIG_FILE_NAME.rclone-chunk.001`, chunk names are `BIG_FILE_NAME.rclone_chunk.001`,
`BIG_FILE_NAME.rclone-chunk.002` etc. You can configure a different name `BIG_FILE_NAME.rclone_chunk.002` etc. You can configure another name format
format using the `--chunker-name-format` option. The format uses asterisk using the `name_format` configuration file option. The format uses asterisk
`*` as a placeholder for the base file name and one or more consecutive `*` as a placeholder for the base file name and one or more consecutive
hash characters `#` as a placeholder for sequential chunk number. hash characters `#` as a placeholder for sequential chunk number.
There must be one and only one asterisk. The number of consecutive hash There must be one and only one asterisk. The number of consecutive hash
@@ -211,6 +211,9 @@ file hashing, configure chunker with `md5all` or `sha1all`. These two modes
guarantee given hash for all files. If wrapped remote doesn't support it, guarantee given hash for all files. If wrapped remote doesn't support it,
chunker will then add metadata to all files, even small. However, this can chunker will then add metadata to all files, even small. However, this can
double the amount of small files in storage and incur additional service charges. double the amount of small files in storage and incur additional service charges.
You can even use chunker to force md5/sha1 support in any other remote
at expence of sidecar meta objects by setting eg. `chunk_type=sha1all`
to force hashsums and `chunk_size=1P` to effectively disable chunking.
Normally, when a file is copied to chunker controlled remote, chunker Normally, when a file is copied to chunker controlled remote, chunker
will ask the file source for compatible file hash and revert to on-the-fly will ask the file source for compatible file hash and revert to on-the-fly
@@ -274,6 +277,14 @@ Chunker requires wrapped remote to support server side `move` (or `copy` +
This is because it internally renames temporary chunk files to their final This is because it internally renames temporary chunk files to their final
names when an operation completes successfully. names when an operation completes successfully.
Chunker encodes chunk number in file name, so with default `name_format`
setting it adds 17 characters. Also chunker adds 7 characters of temporary
suffix during operations. Many file systems limit base file name without path
by 255 characters. Using rclone's crypt remote as a base file system limits
file name by 143 characters. Thus, maximum name length is 231 for most files
and 119 for chunker-over-crypt. A user in need can change name format to
eg. `*.rcc##` and save 10 characters (provided at most 99 chunks per file).
Note that a move implemented using the copy-and-delete method may incur Note that a move implemented using the copy-and-delete method may incur
double charging with some cloud storage providers. double charging with some cloud storage providers.

View File

@@ -65,6 +65,28 @@ infrastructure](https://github.com/billziss-gh/winfsp/wiki/WinFsp-Service-Archit
which creates drives accessible for everyone on the system or which creates drives accessible for everyone on the system or
alternatively using [the nssm service manager](https://nssm.cc/usage). alternatively using [the nssm service manager](https://nssm.cc/usage).
#### Mount as a network drive
By default, rclone will mount the remote as a normal drive. However, you can also mount it as a **Network Drive**
(or **Network Share**, as mentioned in some places)
Unlike other systems, Windows provides a different filesystem type for network drives.
Windows and other programs treat the network drives and fixed/removable drives differently:
In network drives, many I/O operations are optimized, as the high latency and low reliability
(compared to a normal drive) of a network is expected.
Although many people prefer network shares to be mounted as normal system drives, this might cause
some issues, such as programs not working as expected or freezes and errors while operating with the
mounted remote in Windows Explorer. If you experience any of those, consider mounting rclone remotes as network shares,
as Windows expects normal drives to be fast and reliable, while cloud storage is far from that.
See also [Limitations](#limitations) section below for more info
Add `--fuse-flag --VolumePrefix=\server\share` to your `mount` command, **replacing `share` with any other
name of your choice if you are mounting more than one remote**. Otherwise, the mountpoints will conflict and
your mounted filesystems will overlap.
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
### Limitations ### Limitations
Without the use of "--vfs-cache-mode" this can only write files Without the use of "--vfs-cache-mode" this can only write files

View File

@@ -99,7 +99,7 @@ Or instead of htpassword if you just want a single user and password:
The GUI is being developed in the: [rclone/rclone-webui-react respository](https://github.com/rclone/rclone-webui-react). The GUI is being developed in the: [rclone/rclone-webui-react respository](https://github.com/rclone/rclone-webui-react).
Bug reports and contributions very welcome welcome :-) Bug reports and contributions are very welcome :-)
If you have questions then please ask them on the [rclone forum](https://forum.rclone.org/). If you have questions then please ask them on the [rclone forum](https://forum.rclone.org/).

View File

@@ -56,7 +56,14 @@ Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
rclone config rclone config
## macOS installation from precompiled binary ## ## macOS installation with brew ##
brew install rclone
## macOS installation from precompiled binary, using curl ##
To avoid problems with macOS gatekeeper enforcing the binary to be signed and
notarized it is enough to download with `curl`.
Download the latest version of rclone. Download the latest version of rclone.
@@ -81,6 +88,19 @@ Run `rclone config` to setup. See [rclone config docs](/docs/) for more details.
rclone config rclone config
## macOS installation from precompiled binary, using a web browser ##
When downloading a binary with a web browser, the browser will set the macOS
gatekeeper quarantine attribute. Starting from Catalina, when attempting to run
`rclone`, a pop-up will appear saying:
“rclone” cannot be opened because the developer cannot be verified.
macOS cannot verify that this app is free from malware.
The simplest fix is to run
xattr -d com.apple.quarantine rclone
## Install with docker ## ## Install with docker ##
The rclone maintains a [docker image for rclone](https://hub.docker.com/r/rclone/rclone). The rclone maintains a [docker image for rclone](https://hub.docker.com/r/rclone/rclone).

View File

@@ -11,7 +11,7 @@ Paths are specified as `remote:path`
Paths may be as deep as required, eg `remote:directory/subdirectory`. Paths may be as deep as required, eg `remote:directory/subdirectory`.
To configure Jottacloud you will need to enter your username and password and select a mountpoint. To configure Jottacloud you will need to generate a personal security token in the Jottacloud web inteface. You will the option to do in your [account security settings](https://www.jottacloud.com/web/secure). Note that the web inteface may refer to this token as a JottaCli token.
Here is an example of how to make a remote called `remote`. First run: Here is an example of how to make a remote called `remote`. First run:
@@ -42,16 +42,8 @@ n) No
y/n> n y/n> n
Remote config Remote config
Do you want to create a machine specific API key? Generate a personal login token here: https://www.jottacloud.com/web/secure
Login Token> <your token here>
Rclone has it's own Jottacloud API KEY which works fine as long as one only uses rclone on a single machine. When you want to use rclone with this account on more than one machine it's recommended to create a machine specific API key. These keys can NOT be shared between machines.
y) Yes
n) No
y/n> y
Username> 0xC4KE@gmail.com
Your Jottacloud password is only required during setup and will not be stored.
password:
Do you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client? Do you want to use a non standard device/mountpoint e.g. for accessing files uploaded using the official Jottacloud client?
@@ -74,11 +66,10 @@ Mountpoints> 1
[jotta] [jotta]
type = jottacloud type = jottacloud
user = 0xC4KE@gmail.com user = 0xC4KE@gmail.com
client_id = .....
client_secret = ........
token = {........} token = {........}
device = Jotta device = Jotta
mountpoint = Archive mountpoint = Archive
configVersion = 1
-------------------- --------------------
y) Yes this is OK y) Yes this is OK
e) Edit this remote e) Edit this remote
@@ -102,7 +93,7 @@ To copy a local directory to an Jottacloud directory called backup
### Devices and Mountpoints ### ### Devices and Mountpoints ###
The official Jottacloud client registers a device for each computer you install it on and then creates a mountpoint for each folder you select for Backup. The official Jottacloud client registers a device for each computer you install it on and then creates a mountpoint for each folder you select for Backup.
The web interface uses a special device called Jotta for the Archive, Sync and Shared mountpoints. In most cases you'll want to use the Jotta/Archive device/mounpoint however if you want to access files uploaded by the official rclone provides the option to select other devices and mountpoints during config. The web interface uses a special device called Jotta for the Archive, Sync and Shared mountpoints. In most cases you'll want to use the Jotta/Archive device/mounpoint however if you want to access files uploaded by any of the official clients rclone provides the option to select other devices and mountpoints during config.
### --fast-list ### ### --fast-list ###

View File

@@ -292,7 +292,7 @@ func (s *StatsInfo) String() string {
} }
} }
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s", _, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s\n",
dateString, dateString,
fs.SizeSuffix(s.bytes), fs.SizeSuffix(s.bytes),
fs.SizeSuffix(totalSize).Unit("Bytes"), fs.SizeSuffix(totalSize).Unit("Bytes"),
@@ -313,16 +313,23 @@ func (s *StatsInfo) String() string {
errorDetails = " (no need to retry)" errorDetails = " (no need to retry)"
} }
_, _ = fmt.Fprintf(buf, ` // Add only non zero stats
Errors: %10d%s if s.errors != 0 {
Checks: %10d / %d, %s _, _ = fmt.Fprintf(buf, "Errors: %10d%s\n",
Transferred: %10d / %d, %s s.errors, errorDetails)
Elapsed time: %10v }
`, if s.checks != 0 || totalChecks != 0 {
s.errors, errorDetails, _, _ = fmt.Fprintf(buf, "Checks: %10d / %d, %s\n",
s.checks, totalChecks, percent(s.checks, totalChecks), s.checks, totalChecks, percent(s.checks, totalChecks))
s.transfers, totalTransfer, percent(s.transfers, totalTransfer), }
dtRounded) if s.deletes != 0 {
_, _ = fmt.Fprintf(buf, "Deleted: %10d\n", s.deletes)
}
if s.transfers != 0 || totalTransfer != 0 {
_, _ = fmt.Fprintf(buf, "Transferred: %10d / %d, %s\n",
s.transfers, totalTransfer, percent(s.transfers, totalTransfer))
}
_, _ = fmt.Fprintf(buf, "Elapsed time: %10v\n", dtRounded)
} }
// checking and transferring have their own locking so unlock // checking and transferring have their own locking so unlock

View File

@@ -174,6 +174,9 @@ func (a *AsyncReader) WriteTo(w io.Writer) (n int64, err error) {
n = 0 n = 0
for { for {
err = a.fill() err = a.fill()
if err == io.EOF {
return n, nil
}
if err != nil { if err != nil {
return n, err return n, err
} }
@@ -183,6 +186,10 @@ func (a *AsyncReader) WriteTo(w io.Writer) (n int64, err error) {
if err != nil { if err != nil {
return n, err return n, err
} }
if a.cur.err == io.EOF {
a.err = a.cur.err
return n, err
}
if a.cur.err != nil { if a.cur.err != nil {
a.err = a.cur.err a.err = a.cur.err
return n, a.cur.err return n, a.cur.err

View File

@@ -60,12 +60,12 @@ func TestAsyncWriteTo(t *testing.T) {
var dst = &bytes.Buffer{} var dst = &bytes.Buffer{}
n, err := io.Copy(dst, ar) n, err := io.Copy(dst, ar)
assert.Equal(t, io.EOF, err) require.NoError(t, err)
assert.Equal(t, int64(10), n) assert.Equal(t, int64(10), n)
// Should still return EOF // Should still not return any errors
n, err = io.Copy(dst, ar) n, err = io.Copy(dst, ar)
assert.Equal(t, io.EOF, err) require.NoError(t, err)
assert.Equal(t, int64(0), n) assert.Equal(t, int64(0), n)
err = ar.Close() err = ar.Close()

View File

@@ -178,6 +178,53 @@ func IsNoRetryError(err error) (isNoRetry bool) {
return return
} }
// NoLowLevelRetrier is an optional interface for error as to whether
// the operation should not be retried at a low level.
//
// NoLowLevelRetry errors won't be retried by low level retry loops.
type NoLowLevelRetrier interface {
error
NoLowLevelRetry() bool
}
// wrappedNoLowLevelRetryError is an error wrapped so it will satisfy the
// NoLowLevelRetrier interface and return true
type wrappedNoLowLevelRetryError struct {
error
}
// NoLowLevelRetry interface
func (err wrappedNoLowLevelRetryError) NoLowLevelRetry() bool {
return true
}
// Check interface
var _ NoLowLevelRetrier = wrappedNoLowLevelRetryError{error(nil)}
// NoLowLevelRetryError makes an error which indicates the sync
// shouldn't be low level retried.
func NoLowLevelRetryError(err error) error {
return wrappedNoLowLevelRetryError{err}
}
// Cause returns the underlying error
func (err wrappedNoLowLevelRetryError) Cause() error {
return err.error
}
// IsNoLowLevelRetryError returns true if err conforms to the NoLowLevelRetry
// interface and calling the NoLowLevelRetry method returns true.
func IsNoLowLevelRetryError(err error) (isNoLowLevelRetry bool) {
errors.Walk(err, func(err error) bool {
if r, ok := err.(NoLowLevelRetrier); ok {
isNoLowLevelRetry = r.NoLowLevelRetry()
return true
}
return false
})
return
}
// RetryAfter is an optional interface for error as to whether the // RetryAfter is an optional interface for error as to whether the
// operation should be retried after a given delay // operation should be retried after a given delay
// //
@@ -345,6 +392,11 @@ func ShouldRetry(err error) bool {
return false return false
} }
// If error has been marked to NoLowLevelRetry then don't retry
if IsNoLowLevelRetryError(err) {
return false
}
// Find root cause if available // Find root cause if available
retriable, err := Cause(err) retriable, err := Cause(err)
if retriable { if retriable {

View File

@@ -211,12 +211,18 @@ func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) ([][]fs.Directory, er
if err != nil { if err != nil {
return nil, errors.Wrap(err, "find duplicate dirs") return nil, errors.Wrap(err, "find duplicate dirs")
} }
duplicateDirs := [][]fs.Directory{} // make sure parents are before children
for _, ds := range dirs { duplicateNames := []string{}
for name, ds := range dirs {
if len(ds) > 1 { if len(ds) > 1 {
duplicateDirs = append(duplicateDirs, ds) duplicateNames = append(duplicateNames, name)
} }
} }
sort.Strings(duplicateNames)
duplicateDirs := [][]fs.Directory{}
for _, name := range duplicateNames {
duplicateDirs = append(duplicateDirs, dirs[name])
}
return duplicateDirs, nil return duplicateDirs, nil
} }
@@ -235,7 +241,8 @@ func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]fs
fs.Infof(dirs[0], "Merging contents of duplicate directories") fs.Infof(dirs[0], "Merging contents of duplicate directories")
err := mergeDirs(ctx, dirs) err := mergeDirs(ctx, dirs)
if err != nil { if err != nil {
return errors.Wrap(err, "merge duplicate dirs") err = fs.CountError(err)
fs.Errorf(nil, "merge duplicate dirs: %v", err)
} }
} else { } else {
fs.Infof(dirs[0], "NOT Merging contents of duplicate directories as --dry-run") fs.Infof(dirs[0], "NOT Merging contents of duplicate directories as --dry-run")
@@ -251,23 +258,16 @@ func dedupeMergeDuplicateDirs(ctx context.Context, f fs.Fs, duplicateDirs [][]fs
func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error { func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
fs.Infof(f, "Looking for duplicates using %v mode.", mode) fs.Infof(f, "Looking for duplicates using %v mode.", mode)
// Find duplicate directories first and fix them - repeat // Find duplicate directories first and fix them
// until all fixed duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f)
for { if err != nil {
duplicateDirs, err := dedupeFindDuplicateDirs(ctx, f) return err
if err != nil { }
return err if len(duplicateDirs) != 0 {
}
if len(duplicateDirs) == 0 {
break
}
err = dedupeMergeDuplicateDirs(ctx, f, duplicateDirs) err = dedupeMergeDuplicateDirs(ctx, f, duplicateDirs)
if err != nil { if err != nil {
return err return err
} }
if fs.Config.DryRun {
break
}
} }
// find a hash to use // find a hash to use
@@ -275,7 +275,7 @@ func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode) error {
// Now find duplicate files // Now find duplicate files
files := map[string][]fs.Object{} files := map[string][]fs.Object{}
err := walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error { err = walk.ListR(ctx, f, "", true, fs.Config.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
entries.ForObject(func(o fs.Object) { entries.ForObject(func(o fs.Object) {
remote := o.Remote() remote := o.Remote()
files[remote] = append(files[remote], o) files[remote] = append(files[remote], o)

View File

@@ -721,6 +721,9 @@ func (c *checkMarch) DstOnly(dst fs.DirEntry) (recurse bool) {
atomic.AddInt32(&c.srcFilesMissing, 1) atomic.AddInt32(&c.srcFilesMissing, 1)
case fs.Directory: case fs.Directory:
// Do the same thing to the entire contents of the directory // Do the same thing to the entire contents of the directory
if c.oneway {
return false
}
return true return true
default: default:
panic("Bad object in DirEntries") panic("Bad object in DirEntries")

View File

@@ -7,6 +7,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/fserrors"
) )
// reOpen is a wrapper for an object reader which reopens the stream on error // reOpen is a wrapper for an object reader which reopens the stream on error
@@ -104,7 +105,7 @@ func (h *reOpen) Read(p []byte) (n int, err error) {
h.err = err h.err = err
} }
h.read += int64(n) h.read += int64(n)
if err != nil && err != io.EOF { if err != nil && err != io.EOF && !fserrors.IsNoLowLevelRetryError(err) {
// close underlying stream // close underlying stream
h.opened = false h.opened = false
_ = h.rc.Close() _ = h.rc.Close()

View File

@@ -750,10 +750,7 @@ func (m *Mega) addFSNode(itm FSNode) (*Node, error) {
} }
// Shared file // Shared file
default: default:
k, ok := m.FS.skmap[args[0]] k := m.FS.skmap[args[0]]
if !ok {
return nil, errors.New("couldn't find decryption key for shared file")
}
b, err := base64urldecode(k) b, err := base64urldecode(k)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -924,8 +921,7 @@ func (m *Mega) getFileSystem() error {
for _, itm := range res[0].F { for _, itm := range res[0].F {
_, err = m.addFSNode(itm) _, err = m.addFSNode(itm)
if err != nil { if err != nil {
m.debugf("couldn't decode FSNode %#v: %v ", itm, err) return err
continue
} }
} }

View File

@@ -309,6 +309,13 @@ func (c *cache) rename(name string, newName string) (err error) {
if err = os.Rename(osOldPath, osNewPath); err != nil { if err = os.Rename(osOldPath, osNewPath); err != nil {
return errors.Wrapf(err, "Failed to rename in cache: %s to %s", osOldPath, osNewPath) return errors.Wrapf(err, "Failed to rename in cache: %s to %s", osOldPath, osNewPath)
} }
// Rename the cache item
c.itemMu.Lock()
if oldItem, ok := c.item[name]; ok {
c.item[newName] = oldItem
delete(c.item, name)
}
c.itemMu.Unlock()
fs.Infof(name, "Renamed in cache") fs.Infof(name, "Renamed in cache")
return nil return nil
} }

View File

@@ -37,13 +37,19 @@ type File struct {
} }
// newFile creates a new File // newFile creates a new File
//
// o may be nil
func newFile(d *Dir, o fs.Object, leaf string) *File { func newFile(d *Dir, o fs.Object, leaf string) *File {
return &File{ f := &File{
d: d, d: d,
o: o, o: o,
leaf: leaf, leaf: leaf,
inode: newInode(), inode: newInode(),
} }
if o != nil {
f.size = o.Size()
}
return f
} }
// String converts it to printable // String converts it to printable
@@ -89,6 +95,11 @@ func (f *File) Path() string {
return path.Join(f.d.path, f.leaf) return path.Join(f.d.path, f.leaf)
} }
// osPath returns the full path of the file in the cache in OS format
func (f *File) osPath() string {
return f.d.vfs.cache.toOSPath(f.Path())
}
// Sys returns underlying data source (can be nil) - satisfies Node interface // Sys returns underlying data source (can be nil) - satisfies Node interface
func (f *File) Sys() interface{} { func (f *File) Sys() interface{} {
return nil return nil
@@ -126,29 +137,42 @@ func (f *File) applyPendingRename() {
func (f *File) rename(ctx context.Context, destDir *Dir, newName string) error { func (f *File) rename(ctx context.Context, destDir *Dir, newName string) error {
f.mu.RLock() f.mu.RLock()
d := f.d d := f.d
o := f.o
oldPendingRenameFun := f.pendingRenameFun
f.mu.RUnlock() f.mu.RUnlock()
if features := d.f.Features(); features.Move == nil && features.Copy == nil { if features := d.f.Features(); features.Move == nil && features.Copy == nil {
err := errors.Errorf("Fs %q can't rename files (no server side Move or Copy)", d.f) err := errors.Errorf("Fs %q can't rename files (no server side Move or Copy)", d.f)
fs.Errorf(f.Path(), "Dir.Rename error: %v", err) fs.Errorf(f.Path(), "Dir.Rename error: %v", err)
return err return err
} }
newPath := path.Join(destDir.path, newName)
renameCall := func(ctx context.Context) error { renameCall := func(ctx context.Context) error {
newPath := path.Join(destDir.path, newName) f.mu.RLock()
o := f.o
f.mu.RUnlock()
if o == nil {
return errors.New("Cannot rename: file object is not available")
}
// chain rename calls if any
if oldPendingRenameFun != nil {
err := oldPendingRenameFun(ctx)
if err != nil {
return err
}
}
// do the move of the remote object
dstOverwritten, _ := d.f.NewObject(ctx, newPath) dstOverwritten, _ := d.f.NewObject(ctx, newPath)
newObject, err := operations.Move(ctx, d.f, dstOverwritten, newPath, f.o) newObject, err := operations.Move(ctx, d.f, dstOverwritten, newPath, o)
if err != nil { if err != nil {
fs.Errorf(f.Path(), "File.Rename error: %v", err) fs.Errorf(f.Path(), "File.Rename error: %v", err)
return err return err
} }
// Rename in the cache too if it exists
if f.d.vfs.Opt.CacheMode >= CacheModeWrites && f.d.vfs.cache.exists(f.Path()) {
if err := f.d.vfs.cache.rename(f.Path(), newPath); err != nil {
fs.Infof(f.Path(), "File.Rename failed in Cache: %v", err)
}
}
// newObject can be nil here for example if --dry-run // newObject can be nil here for example if --dry-run
if newObject == nil { if newObject == nil {
err = errors.New("rename failed: nil object returned") err = errors.New("rename failed: nil object returned")
@@ -156,25 +180,32 @@ func (f *File) rename(ctx context.Context, destDir *Dir, newName string) error {
return err return err
} }
// Update the node with the new details // Update the node with the new details
fs.Debugf(f.o, "Updating file with %v %p", newObject, f) fs.Debugf(o, "Updating file with %v %p", newObject, f)
// f.rename(destDir, newObject) // f.rename(destDir, newObject)
f.mu.Lock() f.mu.Lock()
f.o = newObject f.o = newObject
f.d = destDir
f.leaf = path.Base(newObject.Remote())
f.pendingRenameFun = nil f.pendingRenameFun = nil
f.mu.Unlock() f.mu.Unlock()
return nil return nil
} }
f.mu.RLock() // Rename in the cache if it exists
if f.d.vfs.Opt.CacheMode != CacheModeOff && f.d.vfs.cache.exists(f.Path()) {
if err := f.d.vfs.cache.rename(f.Path(), newPath); err != nil {
fs.Infof(f.Path(), "File.Rename failed in Cache: %v", err)
}
}
// rename the file object
f.mu.Lock()
f.d = destDir
f.leaf = newName
writing := f._writingInProgress() writing := f._writingInProgress()
f.mu.RUnlock() f.mu.Unlock()
if writing { if writing {
fs.Debugf(f.o, "File is currently open, delaying rename %p", f) fs.Debugf(o, "File is currently open, delaying rename %p", f)
f.mu.Lock() f.mu.Lock()
f.d = destDir
f.leaf = newName
f.pendingRenameFun = renameCall f.pendingRenameFun = renameCall
f.mu.Unlock() f.mu.Unlock()
return nil return nil
@@ -467,7 +498,7 @@ func (f *File) openRW(flags int) (fh *RWFileHandle, err error) {
} }
// fs.Debugf(o, "File.openRW") // fs.Debugf(o, "File.openRW")
fh, err = newRWFileHandle(d, f, f.Path(), flags) fh, err = newRWFileHandle(d, f, flags)
if err != nil { if err != nil {
fs.Errorf(f, "File.openRW failed: %v", err) fs.Errorf(f, "File.openRW failed: %v", err)
return nil, err return nil, err

View File

@@ -6,6 +6,7 @@ import (
"os" "os"
"testing" "testing"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest"
"github.com/rclone/rclone/fstest/mockfs" "github.com/rclone/rclone/fstest/mockfs"
"github.com/rclone/rclone/fstest/mockobject" "github.com/rclone/rclone/fstest/mockobject"
@@ -13,8 +14,10 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func fileCreate(t *testing.T, r *fstest.Run) (*VFS, *File, fstest.Item) { func fileCreate(t *testing.T, r *fstest.Run, mode CacheMode) (*VFS, *File, fstest.Item) {
vfs := New(r.Fremote, nil) opt := DefaultOpt
opt.CacheMode = mode
vfs := New(r.Fremote, &opt)
file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1) file1 := r.WriteObject(context.Background(), "dir/file1", "file1 contents", t1)
fstest.CheckItems(t, r.Fremote, file1) fstest.CheckItems(t, r.Fremote, file1)
@@ -29,7 +32,7 @@ func fileCreate(t *testing.T, r *fstest.Run) (*VFS, *File, fstest.Item) {
func TestFileMethods(t *testing.T) { func TestFileMethods(t *testing.T) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
vfs, file, _ := fileCreate(t, r) vfs, file, _ := fileCreate(t, r, CacheModeOff)
// String // String
assert.Equal(t, "dir/file1", file.String()) assert.Equal(t, "dir/file1", file.String())
@@ -84,7 +87,7 @@ func TestFileSetModTime(t *testing.T) {
return return
} }
defer r.Finalise() defer r.Finalise()
vfs, file, file1 := fileCreate(t, r) vfs, file, file1 := fileCreate(t, r, CacheModeOff)
err := file.SetModTime(t2) err := file.SetModTime(t2)
require.NoError(t, err) require.NoError(t, err)
@@ -97,12 +100,8 @@ func TestFileSetModTime(t *testing.T) {
assert.Equal(t, EROFS, err) assert.Equal(t, EROFS, err)
} }
func TestFileOpenRead(t *testing.T) { func fileCheckContents(t *testing.T, file *File) {
r := fstest.NewRun(t) fd, err := file.Open(os.O_RDONLY)
defer r.Finalise()
_, file, _ := fileCreate(t, r)
fd, err := file.openRead()
require.NoError(t, err) require.NoError(t, err)
contents, err := ioutil.ReadAll(fd) contents, err := ioutil.ReadAll(fd)
@@ -112,6 +111,14 @@ func TestFileOpenRead(t *testing.T) {
require.NoError(t, fd.Close()) require.NoError(t, fd.Close())
} }
func TestFileOpenRead(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
_, file, _ := fileCreate(t, r, CacheModeOff)
fileCheckContents(t, file)
}
func TestFileOpenReadUnknownSize(t *testing.T) { func TestFileOpenReadUnknownSize(t *testing.T) {
var ( var (
contents = []byte("file contents") contents = []byte("file contents")
@@ -160,7 +167,7 @@ func TestFileOpenReadUnknownSize(t *testing.T) {
func TestFileOpenWrite(t *testing.T) { func TestFileOpenWrite(t *testing.T) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
vfs, file, _ := fileCreate(t, r) vfs, file, _ := fileCreate(t, r, CacheModeOff)
fd, err := file.openWrite(os.O_WRONLY | os.O_TRUNC) fd, err := file.openWrite(os.O_WRONLY | os.O_TRUNC)
require.NoError(t, err) require.NoError(t, err)
@@ -181,7 +188,7 @@ func TestFileOpenWrite(t *testing.T) {
func TestFileRemove(t *testing.T) { func TestFileRemove(t *testing.T) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
vfs, file, _ := fileCreate(t, r) vfs, file, _ := fileCreate(t, r, CacheModeOff)
err := file.Remove() err := file.Remove()
require.NoError(t, err) require.NoError(t, err)
@@ -196,7 +203,7 @@ func TestFileRemove(t *testing.T) {
func TestFileRemoveAll(t *testing.T) { func TestFileRemoveAll(t *testing.T) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
vfs, file, _ := fileCreate(t, r) vfs, file, _ := fileCreate(t, r, CacheModeOff)
err := file.RemoveAll() err := file.RemoveAll()
require.NoError(t, err) require.NoError(t, err)
@@ -211,7 +218,7 @@ func TestFileRemoveAll(t *testing.T) {
func TestFileOpen(t *testing.T) { func TestFileOpen(t *testing.T) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()
_, file, _ := fileCreate(t, r) _, file, _ := fileCreate(t, r, CacheModeOff)
fd, err := file.Open(os.O_RDONLY) fd, err := file.Open(os.O_RDONLY)
require.NoError(t, err) require.NoError(t, err)
@@ -233,3 +240,90 @@ func TestFileOpen(t *testing.T) {
fd, err = file.Open(3) fd, err = file.Open(3)
assert.Equal(t, EPERM, err) assert.Equal(t, EPERM, err)
} }
func testFileRename(t *testing.T, mode CacheMode) {
r := fstest.NewRun(t)
defer r.Finalise()
vfs, file, item := fileCreate(t, r, mode)
rootDir, err := vfs.Root()
require.NoError(t, err)
// check file in cache
if mode != CacheModeOff {
// read contents to get file in cache
fileCheckContents(t, file)
assert.True(t, vfs.cache.exists(item.Path))
}
dir := file.Dir()
// start with "dir/file1"
fstest.CheckItems(t, r.Fremote, item)
// rename file to "newLeaf"
err = dir.Rename("file1", "newLeaf", rootDir)
require.NoError(t, err)
item.Path = "newLeaf"
fstest.CheckItems(t, r.Fremote, item)
// check file in cache
if mode != CacheModeOff {
assert.True(t, vfs.cache.exists(item.Path))
}
// check file exists in the vfs layer at its new name
_, err = vfs.Stat("newLeaf")
require.NoError(t, err)
// rename it back to "dir/file1"
err = rootDir.Rename("newLeaf", "file1", dir)
require.NoError(t, err)
item.Path = "dir/file1"
fstest.CheckItems(t, r.Fremote, item)
// check file in cache
if mode != CacheModeOff {
assert.True(t, vfs.cache.exists(item.Path))
}
// now try renaming it with the file open
// first open it and write to it but dont close it
fd, err := file.Open(os.O_WRONLY | os.O_TRUNC)
require.NoError(t, err)
newContents := []byte("this is some new contents")
_, err = fd.Write(newContents)
require.NoError(t, err)
// rename file to "newLeaf"
err = dir.Rename("file1", "newLeaf", rootDir)
require.NoError(t, err)
newItem := fstest.NewItem("newLeaf", string(newContents), item.ModTime)
// check file has been renamed immediately in the cache
if mode != CacheModeOff {
assert.True(t, vfs.cache.exists("newLeaf"))
}
// check file exists in the vfs layer at its new name
_, err = vfs.Stat("newLeaf")
require.NoError(t, err)
// Close the file
require.NoError(t, fd.Close())
// Check file has now been renamed on the remote
item.Path = "newLeaf"
fstest.CheckListingWithPrecision(t, r.Fremote, []fstest.Item{newItem}, nil, fs.ModTimeNotSupported)
}
func TestFileRename(t *testing.T) {
t.Run("CacheModeOff", func(t *testing.T) {
testFileRename(t, CacheModeOff)
})
t.Run("CacheModeFull", func(t *testing.T) {
testFileRename(t, CacheModeFull)
})
}

View File

@@ -24,14 +24,12 @@ type RWFileHandle struct {
*os.File *os.File
mu sync.Mutex mu sync.Mutex
closed bool // set if handle has been closed closed bool // set if handle has been closed
remote string
file *File file *File
d *Dir d *Dir
opened bool opened bool
flags int // open flags flags int // open flags
osPath string // path to the file in the cache writeCalled bool // if any Write() methods have been called
writeCalled bool // if any Write() methods have been called changed bool // file contents was changed in any other way
changed bool // file contents was changed in any other way
} }
// Check interfaces // Check interfaces
@@ -44,26 +42,25 @@ var (
_ io.Closer = (*RWFileHandle)(nil) _ io.Closer = (*RWFileHandle)(nil)
) )
func newRWFileHandle(d *Dir, f *File, remote string, flags int) (fh *RWFileHandle, err error) { func newRWFileHandle(d *Dir, f *File, flags int) (fh *RWFileHandle, err error) {
// if O_CREATE and O_EXCL are set and if path already exists, then return EEXIST // if O_CREATE and O_EXCL are set and if path already exists, then return EEXIST
if flags&(os.O_CREATE|os.O_EXCL) == os.O_CREATE|os.O_EXCL && f.exists() { if flags&(os.O_CREATE|os.O_EXCL) == os.O_CREATE|os.O_EXCL && f.exists() {
return nil, EEXIST return nil, EEXIST
} }
fh = &RWFileHandle{ fh = &RWFileHandle{
file: f, file: f,
d: d, d: d,
remote: remote, flags: flags,
flags: flags,
} }
// mark the file as open in the cache - must be done before the mkdir // mark the file as open in the cache - must be done before the mkdir
fh.d.vfs.cache.open(fh.remote) fh.d.vfs.cache.open(fh.file.Path())
// Make a place for the file // Make a place for the file
fh.osPath, err = d.vfs.cache.mkdir(remote) _, err = d.vfs.cache.mkdir(fh.file.Path())
if err != nil { if err != nil {
fh.d.vfs.cache.close(fh.remote) fh.d.vfs.cache.close(fh.file.Path())
return nil, errors.Wrap(err, "open RW handle failed to make cache directory") return nil, errors.Wrap(err, "open RW handle failed to make cache directory")
} }
@@ -113,9 +110,9 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) {
// If the remote object exists AND its cached file exists locally AND there are no // If the remote object exists AND its cached file exists locally AND there are no
// other RW handles with it open, then attempt to update it. // other RW handles with it open, then attempt to update it.
if o != nil && fh.file.rwOpens() == 0 { if o != nil && fh.file.rwOpens() == 0 {
cacheObj, err := fh.d.vfs.cache.f.NewObject(context.TODO(), fh.remote) cacheObj, err := fh.d.vfs.cache.f.NewObject(context.TODO(), fh.file.Path())
if err == nil && cacheObj != nil { if err == nil && cacheObj != nil {
_, err = copyObj(fh.d.vfs.cache.f, cacheObj, fh.remote, o) _, err = copyObj(fh.d.vfs.cache.f, cacheObj, fh.file.Path(), o)
if err != nil { if err != nil {
return errors.Wrap(err, "open RW handle failed to update cached file") return errors.Wrap(err, "open RW handle failed to update cached file")
} }
@@ -123,12 +120,12 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) {
} }
// try to open a exising cache file // try to open a exising cache file
fd, err = file.OpenFile(fh.osPath, cacheFileOpenFlags&^os.O_CREATE, 0600) fd, err = file.OpenFile(fh.file.osPath(), cacheFileOpenFlags&^os.O_CREATE, 0600)
if os.IsNotExist(err) { if os.IsNotExist(err) {
// cache file does not exist, so need to fetch it if we have an object to fetch // cache file does not exist, so need to fetch it if we have an object to fetch
// it from // it from
if o != nil { if o != nil {
_, err = copyObj(fh.d.vfs.cache.f, nil, fh.remote, o) _, err = copyObj(fh.d.vfs.cache.f, nil, fh.file.Path(), o)
if err != nil { if err != nil {
cause := errors.Cause(err) cause := errors.Cause(err)
if cause != fs.ErrorObjectNotFound && cause != fs.ErrorDirNotFound { if cause != fs.ErrorObjectNotFound && cause != fs.ErrorDirNotFound {
@@ -162,7 +159,7 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) {
fh.changed = true fh.changed = true
if fh.flags&os.O_CREATE == 0 && fh.file.exists() { if fh.flags&os.O_CREATE == 0 && fh.file.exists() {
// create an empty file if it exists on the source // create an empty file if it exists on the source
err = ioutil.WriteFile(fh.osPath, []byte{}, 0600) err = ioutil.WriteFile(fh.file.osPath(), []byte{}, 0600)
if err != nil { if err != nil {
return errors.Wrap(err, "cache open failed to create zero length file") return errors.Wrap(err, "cache open failed to create zero length file")
} }
@@ -172,9 +169,9 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) {
// exists in these cases. // exists in these cases.
if runtime.GOOS == "windows" && fh.flags&os.O_APPEND != 0 { if runtime.GOOS == "windows" && fh.flags&os.O_APPEND != 0 {
cacheFileOpenFlags &^= os.O_TRUNC cacheFileOpenFlags &^= os.O_TRUNC
_, err = os.Stat(fh.osPath) _, err = os.Stat(fh.file.osPath())
if err == nil { if err == nil {
err = os.Truncate(fh.osPath, 0) err = os.Truncate(fh.file.osPath(), 0)
if err != nil { if err != nil {
return errors.Wrap(err, "cache open failed to truncate") return errors.Wrap(err, "cache open failed to truncate")
} }
@@ -184,7 +181,7 @@ func (fh *RWFileHandle) openPending(truncate bool) (err error) {
if fd == nil { if fd == nil {
fs.Debugf(fh.logPrefix(), "Opening cached copy with flags=%s", decodeOpenFlags(fh.flags)) fs.Debugf(fh.logPrefix(), "Opening cached copy with flags=%s", decodeOpenFlags(fh.flags))
fd, err = file.OpenFile(fh.osPath, cacheFileOpenFlags, 0600) fd, err = file.OpenFile(fh.file.osPath(), cacheFileOpenFlags, 0600)
if err != nil { if err != nil {
return errors.Wrap(err, "cache open file failed") return errors.Wrap(err, "cache open file failed")
} }
@@ -280,14 +277,14 @@ func (fh *RWFileHandle) flushWrites(closeFile bool) error {
if isCopied { if isCopied {
// Transfer the temp file to the remote // Transfer the temp file to the remote
cacheObj, err := fh.d.vfs.cache.f.NewObject(context.TODO(), fh.remote) cacheObj, err := fh.d.vfs.cache.f.NewObject(context.TODO(), fh.file.Path())
if err != nil { if err != nil {
err = errors.Wrap(err, "failed to find cache file") err = errors.Wrap(err, "failed to find cache file")
fs.Errorf(fh.logPrefix(), "%v", err) fs.Errorf(fh.logPrefix(), "%v", err)
return err return err
} }
o, err := copyObj(fh.d.vfs.f, fh.file.getObject(), fh.remote, cacheObj) o, err := copyObj(fh.d.vfs.f, fh.file.getObject(), fh.file.Path(), cacheObj)
if err != nil { if err != nil {
err = errors.Wrap(err, "failed to transfer file from cache to remote") err = errors.Wrap(err, "failed to transfer file from cache to remote")
fs.Errorf(fh.logPrefix(), "%v", err) fs.Errorf(fh.logPrefix(), "%v", err)
@@ -320,7 +317,7 @@ func (fh *RWFileHandle) close() (err error) {
if fh.opened { if fh.opened {
fh.file.delRWOpen() fh.file.delRWOpen()
} }
fh.d.vfs.cache.close(fh.remote) fh.d.vfs.cache.close(fh.file.Path())
}() }()
return fh.flushWrites(true) return fh.flushWrites(true)
@@ -549,5 +546,5 @@ func (fh *RWFileHandle) Sync() error {
} }
func (fh *RWFileHandle) logPrefix() string { func (fh *RWFileHandle) logPrefix() string {
return fmt.Sprintf("%s(%p)", fh.remote, fh) return fmt.Sprintf("%s(%p)", fh.file.Path(), fh)
} }

View File

@@ -21,16 +21,18 @@ func cleanup(t *testing.T, r *fstest.Run, vfs *VFS) {
r.Finalise() r.Finalise()
} }
// Open a file for write // Create a file and open it with the flags passed in
func rwHandleCreateReadOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) { func rwHandleCreateFlags(t *testing.T, r *fstest.Run, create bool, filename string, flags int) (*VFS, *RWFileHandle) {
opt := DefaultOpt opt := DefaultOpt
opt.CacheMode = CacheModeFull opt.CacheMode = CacheModeFull
vfs := New(r.Fremote, &opt) vfs := New(r.Fremote, &opt)
file1 := r.WriteObject(context.Background(), "dir/file1", "0123456789abcdef", t1) if create {
fstest.CheckItems(t, r.Fremote, file1) file1 := r.WriteObject(context.Background(), filename, "0123456789abcdef", t1)
fstest.CheckItems(t, r.Fremote, file1)
}
h, err := vfs.OpenFile("dir/file1", os.O_RDONLY, 0777) h, err := vfs.OpenFile(filename, flags, 0777)
require.NoError(t, err) require.NoError(t, err)
fh, ok := h.(*RWFileHandle) fh, ok := h.(*RWFileHandle)
require.True(t, ok) require.True(t, ok)
@@ -38,18 +40,14 @@ func rwHandleCreateReadOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) {
return vfs, fh return vfs, fh
} }
// Open a file for read
func rwHandleCreateReadOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) {
return rwHandleCreateFlags(t, r, true, "dir/file1", os.O_RDONLY)
}
// Open a file for write // Open a file for write
func rwHandleCreateWriteOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) { func rwHandleCreateWriteOnly(t *testing.T, r *fstest.Run) (*VFS, *RWFileHandle) {
opt := DefaultOpt return rwHandleCreateFlags(t, r, false, "file1", os.O_WRONLY|os.O_CREATE)
opt.CacheMode = CacheModeFull
vfs := New(r.Fremote, &opt)
h, err := vfs.OpenFile("file1", os.O_WRONLY|os.O_CREATE, 0777)
require.NoError(t, err)
fh, ok := h.(*RWFileHandle)
require.True(t, ok)
return vfs, fh
} }
// read data from the string // read data from the string
@@ -494,6 +492,96 @@ func TestRWFileHandleReleaseWrite(t *testing.T) {
assert.True(t, fh.closed) assert.True(t, fh.closed)
} }
// check the size of the file through the open file (if not nil) and via stat
func assertSize(t *testing.T, vfs *VFS, fh *RWFileHandle, filepath string, size int64) {
if fh != nil {
assert.Equal(t, size, fh.Size())
}
fi, err := vfs.Stat(filepath)
require.NoError(t, err)
assert.Equal(t, size, fi.Size())
}
func TestRWFileHandleSizeTruncateExisting(t *testing.T) {
r := fstest.NewRun(t)
vfs, fh := rwHandleCreateFlags(t, r, true, "dir/file1", os.O_WRONLY|os.O_TRUNC)
defer cleanup(t, r, vfs)
// check initial size after opening
assertSize(t, vfs, fh, "dir/file1", 0)
// write some bytes
n, err := fh.Write([]byte("hello"))
assert.NoError(t, err)
assert.Equal(t, 5, n)
// check size after writing
assertSize(t, vfs, fh, "dir/file1", 5)
// close
assert.NoError(t, fh.Close())
// check size after close
assertSize(t, vfs, nil, "dir/file1", 5)
}
func TestRWFileHandleSizeCreateExisting(t *testing.T) {
r := fstest.NewRun(t)
vfs, fh := rwHandleCreateFlags(t, r, true, "dir/file1", os.O_WRONLY|os.O_CREATE)
defer cleanup(t, r, vfs)
// check initial size after opening
assertSize(t, vfs, fh, "dir/file1", 16)
// write some bytes
n, err := fh.Write([]byte("hello"))
assert.NoError(t, err)
assert.Equal(t, 5, n)
// check size after writing
assertSize(t, vfs, fh, "dir/file1", 16)
// write some more bytes
n, err = fh.Write([]byte("helloHELLOhello"))
assert.NoError(t, err)
assert.Equal(t, 15, n)
// check size after writing
assertSize(t, vfs, fh, "dir/file1", 20)
// close
assert.NoError(t, fh.Close())
// check size after close
assertSize(t, vfs, nil, "dir/file1", 20)
}
func TestRWFileHandleSizeCreateNew(t *testing.T) {
r := fstest.NewRun(t)
vfs, fh := rwHandleCreateFlags(t, r, false, "file1", os.O_WRONLY|os.O_CREATE)
defer cleanup(t, r, vfs)
// check initial size after opening
assertSize(t, vfs, fh, "file1", 0)
// write some bytes
n, err := fh.Write([]byte("hello"))
assert.NoError(t, err)
assert.Equal(t, 5, n)
// check size after writing
assertSize(t, vfs, fh, "file1", 5)
// check size after writing
assertSize(t, vfs, fh, "file1", 5)
// close
assert.NoError(t, fh.Close())
// check size after close
assertSize(t, vfs, nil, "file1", 5)
}
func testRWFileHandleOpenTest(t *testing.T, vfs *VFS, test *openTest) { func testRWFileHandleOpenTest(t *testing.T, vfs *VFS, test *openTest) {
fileName := "open-test-file" fileName := "open-test-file"
@@ -610,7 +698,7 @@ func TestRWFileModTimeWithOpenWriters(t *testing.T) {
} }
} }
func TestCacheRename(t *testing.T) { func TestRWCacheRename(t *testing.T) {
r := fstest.NewRun(t) r := fstest.NewRun(t)
defer r.Finalise() defer r.Finalise()