1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-21 20:03:22 +00:00

Compare commits

...

16 Commits

Author SHA1 Message Date
Nick Craig-Wood
f8436468ae ftp: Update ftp library to support PRET for drftpd
See: https://forum.rclone.org/t/ftp-client-error/18983
2020-09-13 11:13:58 +01:00
Nick Craig-Wood
a2a94344f4 drive: don't stop server side copy if couldn't read description
Before this change we would error out of a server side copy if we
couldn't read the description for a google doc.

This patch just logs an ERROR message and carries on.

See: https://forum.rclone.org/t/rclone-google-drive-to-google-drive-migration-for-multiple-users/19024/3
2020-09-11 17:22:40 +01:00
Nick Craig-Wood
6a56ac1032 vfs,local: Log an ERROR if we fail to set the file to be sparse
See: https://forum.rclone.org/t/rclone-1-53-release/18880/73
2020-09-11 15:36:47 +01:00
Nick Craig-Wood
96299629b4 Add wjielai to contributors 2020-09-11 15:36:38 +01:00
Nick Craig-Wood
75de30cfa8 sync: fix --cutoff-mode soft & cautious so it doesn't end the transfer early
Before ths fix --cutoff-mode soft and cautious would emit a Fatal
error which stopped the sync immediately.

This fix introduces a new error which is checked in the sync error
processing which stops the sync gracefully.

Fixes #4576
2020-09-09 12:53:21 +01:00
buengese
233bed6a73 dropbox: implement IDer - fixes #2928 2020-09-08 19:04:32 +02:00
buengese
b3964efe4d docs/dropbox: update docs with information regarding the new flags to access shared files and folders 2020-09-08 19:02:35 +02:00
buengese
575f061629 dropbox: add support for viewing shared files and folders 2020-09-08 19:02:35 +02:00
Evan Harris
640d7d3b4e opendrive: Do not retry 400 errors
This type of error is unlikely to be an error that can be resolved by a retry,
and is triggered in #2296 by files with a timestamp before the unix epoch.
2020-09-08 17:15:35 +01:00
Evan Harris
e92294b482 docs: Updated mount command to reflect that it requires Go 1.13 or newer 2020-09-08 16:40:43 +01:00
wjielai
22937e8982 docs: add Tencent COS to s3 provider list - fixes #4468
* add Tencent COS to s3 provider list.

Co-authored-by: wjielai <wjielai@tencent.com>
2020-09-08 16:34:25 +01:00
edwardxml
c3d1474eb9 docs: Add full stops for consistency in rclone --help
closes #4560 closes #4561 closes #4562 closes #4563 closes #4564
2020-09-08 16:26:09 +01:00
Nick Craig-Wood
e2426ea87b docs: note --log-file does append 2020-09-08 16:13:33 +01:00
Nick Craig-Wood
e58a61175f build: fix architecture name in ARMv7 build - fixes #4571
After introducing the arm-v7 build we are accidentally making debs
and rpms with the architecture arm-v7.

This fixes the problem by stripping the version off.
2020-09-08 16:10:52 +01:00
Nick Craig-Wood
05bea46c3e accounting: remove new line from end of --stats-one-line display 2020-09-08 16:10:52 +01:00
Chaitanya Bankanhal
c8a719ae0d webui: Prompt user for updating webui if an update is available 2020-09-07 16:45:00 +01:00
29 changed files with 661 additions and 64 deletions

View File

@@ -64,6 +64,7 @@ Rclone *("rsync for cloud storage")* is a command line program to sync files and
* StackPath [:page_facing_up:](https://rclone.org/s3/#stackpath)
* SugarSync [:page_facing_up:](https://rclone.org/sugarsync/)
* Tardigrade [:page_facing_up:](https://rclone.org/tardigrade/)
* Tencent Cloud Object Storage (COS) [:page_facing_up:](https://rclone.org/s3/#tencent-cos)
* Wasabi [:page_facing_up:](https://rclone.org/s3/#wasabi)
* WebDAV [:page_facing_up:](https://rclone.org/webdav/)
* Yandex Disk [:page_facing_up:](https://rclone.org/yandex/)

View File

@@ -2302,9 +2302,10 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (fs.Object,
// preserve the description on copy for docs
info, err := f.getFile(actualID(srcObj.id), "description")
if err != nil {
return nil, errors.Wrap(err, "failed to read description for Google Doc")
fs.Errorf(srcObj, "Failed to read description for Google Doc: %v", err)
} else {
createInfo.Description = info.Description
}
createInfo.Description = info.Description
} else {
// don't overwrite the description on copy for files
// this should work for docs but it doesn't - it is probably a bug in Google Drive

View File

@@ -142,6 +142,31 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
Help: "Impersonate this user when using a business account.",
Default: "",
Advanced: true,
}, {
Name: "shared_files",
Help: `Instructs rclone to work on individual shared files.
In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
operations and read operations (e.g. downloading) are supported in this mode.
All other operations will be disabled.`,
Default: false,
Advanced: true,
}, {
Name: "shared_folders",
Help: `Instructs rclone to work on shared folders.
When this flag is used with no path only the List operation is supported and
all available shared folders will be listed. If you specify a path the first part
will be interpreted as the name of shared folder. Rclone will then try to mount this
shared to the root namespace. On success shared folder rclone proceeds normally.
The shared folder is now pretty much a normal folder and all normal operations
are supported.
Note that we don't unmount the shared folder afterwards so the
--dropbox-shared-folders can be omitted after the first use of a particular
shared folder.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -161,9 +186,11 @@ memory. It can be set smaller if you are tight on memory.`, maxChunkSize),
// Options defines the configuration for this backend
type Options struct {
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
Enc encoder.MultiEncoder `config:"encoding"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
Impersonate string `config:"impersonate"`
SharedFiles bool `config:"shared_files"`
SharedFolders bool `config:"shared_folders"`
Enc encoder.MultiEncoder `config:"encoding"`
}
// Fs represents a remote dropbox server
@@ -186,7 +213,9 @@ type Fs struct {
//
// Dropbox Objects always have full metadata
type Object struct {
fs *Fs // what this object is part of
fs *Fs // what this object is part of
id string
url string
remote string // The remote path
bytes int64 // size of the object
modTime time.Time // time it was last modified
@@ -332,8 +361,60 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
CaseInsensitive: true,
ReadMimeType: true,
CanHaveEmptyDirectories: true,
}).Fill(f)
f.setRoot(root)
})
// do not fill features yet
if f.opt.SharedFiles {
f.setRoot(root)
if f.root == "" {
return f, nil
}
_, err := f.findSharedFile(f.root)
f.root = ""
if err == nil {
return f, fs.ErrorIsFile
}
return f, nil
}
if f.opt.SharedFolders {
f.setRoot(root)
if f.root == "" {
return f, nil // our root it empty so we probably want to list shared folders
}
dir := path.Dir(f.root)
if dir == "." {
dir = f.root
}
// root is not empty so we have find the right shared folder if it exists
id, err := f.findSharedFolder(dir)
if err != nil {
// if we didn't find the specified shared folder we have to bail out here
return nil, err
}
// we found the specified shared folder so let's mount it
// this will add it to the users normal root namespace and allows us
// to actually perform operations on it using the normal api endpoints.
err = f.mountSharedFolder(id)
if err != nil {
switch e := err.(type) {
case sharing.MountFolderAPIError:
if e.EndpointError == nil || (e.EndpointError != nil && e.EndpointError.Tag != sharing.MountFolderErrorAlreadyMounted) {
return nil, err
}
default:
return nil, err
}
// if the moint failed we have to abort here
}
// if the mount succeeded it's now a normal folder in the users root namespace
// we disable shared folder mode and proceed normally
f.opt.SharedFolders = false
}
f.features.Fill(f)
// If root starts with / then use the actual root
if strings.HasPrefix(root, "/") {
@@ -355,6 +436,7 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
}
fs.Debugf(f, "Using root namespace %q", f.ns)
}
f.setRoot(root)
// See if the root is actually an object
_, err = f.getFileMetadata(f.slashRoot)
@@ -465,9 +547,150 @@ func (f *Fs) newObjectWithInfo(remote string, info *files.FileMetadata) (fs.Obje
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
if f.opt.SharedFiles {
return f.findSharedFile(remote)
}
return f.newObjectWithInfo(remote, nil)
}
// listSharedFoldersApi lists all available shared folders mounted and not mounted
// we'll need the id later so we have to return them in original format
func (f *Fs) listSharedFolders() (entries fs.DirEntries, err error) {
started := false
var res *sharing.ListFoldersResult
for {
if !started {
arg := sharing.ListFoldersArgs{
Limit: 100,
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListFolders(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
started = true
} else {
arg := sharing.ListFoldersContinueArg{
Cursor: res.Cursor,
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListFoldersContinue(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
}
}
for _, entry := range res.Entries {
leaf := f.opt.Enc.ToStandardName(entry.Name)
d := fs.NewDir(leaf, time.Now()).SetID(entry.SharedFolderId)
entries = append(entries, d)
if err != nil {
return nil, err
}
}
if res.Cursor == "" {
break
}
}
return entries, nil
}
// findSharedFolder find the id for a given shared folder name
// somewhat annoyingly there is no endpoint to query a shared folder by it's name
// so our only option is to iterate over all shared folders
func (f *Fs) findSharedFolder(name string) (id string, err error) {
entries, err := f.listSharedFolders()
if err != nil {
return "", err
}
for _, entry := range entries {
if entry.(*fs.Dir).Remote() == name {
return entry.(*fs.Dir).ID(), nil
}
}
return "", fs.ErrorDirNotFound
}
// mountSharedFolders mount a shared folder to the root namespace
func (f *Fs) mountSharedFolder(id string) error {
arg := sharing.MountFolderArg{
SharedFolderId: id,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.sharing.MountFolder(&arg)
return shouldRetry(err)
})
return err
}
// listSharedFolders lists shared the user as access to (note this means individual
// files not files contained in shared folders)
func (f *Fs) listReceivedFiles() (entries fs.DirEntries, err error) {
started := false
var res *sharing.ListFilesResult
for {
if !started {
arg := sharing.ListFilesArg{
Limit: 100,
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListReceivedFiles(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
started = true
} else {
arg := sharing.ListFilesContinueArg{
Cursor: res.Cursor,
}
err := f.pacer.Call(func() (bool, error) {
res, err = f.sharing.ListReceivedFilesContinue(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, errors.Wrap(err, "list continue")
}
}
for _, entry := range res.Entries {
fmt.Printf("%+v\n", entry)
entryPath := entry.Name
o := &Object{
fs: f,
url: entry.PreviewUrl,
remote: entryPath,
modTime: entry.TimeInvited,
}
if err != nil {
return nil, err
}
entries = append(entries, o)
}
if res.Cursor == "" {
break
}
}
return entries, nil
}
func (f *Fs) findSharedFile(name string) (o *Object, err error) {
files, err := f.listReceivedFiles()
if err != nil {
return nil, err
}
for _, entry := range files {
if entry.(*Object).remote == name {
return entry.(*Object), nil
}
}
return nil, fs.ErrorObjectNotFound
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
@@ -478,6 +701,13 @@ func (f *Fs) NewObject(ctx context.Context, remote string) (fs.Object, error) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
if f.opt.SharedFiles {
return f.listReceivedFiles()
}
if f.opt.SharedFolders {
return f.listSharedFolders()
}
root := f.slashRoot
if dir != "" {
root += "/" + dir
@@ -541,7 +771,7 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
leaf := f.opt.Enc.ToStandardName(path.Base(entryPath))
remote := path.Join(dir, leaf)
if folderInfo != nil {
d := fs.NewDir(remote, time.Now())
d := fs.NewDir(remote, time.Now()).SetID(folderInfo.Id)
entries = append(entries, d)
} else if fileInfo != nil {
o, err := f.newObjectWithInfo(remote, fileInfo)
@@ -564,6 +794,9 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
//
// The new object may have been created if an error is returned
func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
if f.opt.SharedFiles || f.opt.SharedFolders {
return nil, fserrors.NoRetryError(errors.New("not support in shared files mode"))
}
// Temporary Object under construction
o := &Object{
fs: f,
@@ -579,6 +812,9 @@ func (f *Fs) PutStream(ctx context.Context, in io.Reader, src fs.ObjectInfo, opt
// Mkdir creates the container if it doesn't exist
func (f *Fs) Mkdir(ctx context.Context, dir string) error {
if f.opt.SharedFiles || f.opt.SharedFolders {
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
}
root := path.Join(f.slashRoot, dir)
// can't create or run metadata on root
@@ -656,6 +892,9 @@ func (f *Fs) purgeCheck(ctx context.Context, dir string, check bool) (err error)
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(ctx context.Context, dir string) error {
if f.opt.SharedFiles || f.opt.SharedFolders {
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
}
return f.purgeCheck(ctx, dir, true)
}
@@ -927,8 +1166,16 @@ func (o *Object) Remote() string {
return o.remote
}
// ID returns the object id
func (o *Object) ID() string {
return o.id
}
// Hash returns the dropbox special hash
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
return "", fserrors.NoRetryError(errors.New("not support in shared files mode"))
}
if t != DbHashType {
return "", hash.ErrUnsupported
}
@@ -948,6 +1195,7 @@ func (o *Object) Size() int64 {
//
// This isn't a complete set of metadata and has an inacurate date
func (o *Object) setMetadataFromEntry(info *files.FileMetadata) error {
o.id = info.Id
o.bytes = int64(info.Size)
o.modTime = info.ClientModified
o.hash = info.ContentHash
@@ -1016,10 +1264,27 @@ func (o *Object) Storable() bool {
// Open an object for read
func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.ReadCloser, err error) {
if o.fs.opt.SharedFiles {
if len(options) != 0 {
return nil, errors.New("OpenOptions not supported for shared files")
}
arg := sharing.GetSharedLinkMetadataArg{
Url: o.url,
}
err = o.fs.pacer.Call(func() (bool, error) {
_, in, err = o.fs.sharing.GetSharedLinkFile(&arg)
return shouldRetry(err)
})
if err != nil {
return nil, err
}
return
}
fs.FixRangeOption(options, o.bytes)
headers := fs.OpenOptionHeaders(options)
arg := files.DownloadArg{
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
Path: o.id,
ExtraHeaders: headers,
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1153,6 +1418,9 @@ func (o *Object) uploadChunked(in0 io.Reader, commitInfo *files.CommitInfo, size
//
// The new object may have been created if an error is returned
func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
}
remote := o.remotePath()
if ignoredFiles.MatchString(remote) {
return fserrors.NoRetryError(errors.Errorf("file name %q is disallowed - not uploading", path.Base(remote)))
@@ -1181,6 +1449,9 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
// Remove an object
func (o *Object) Remove(ctx context.Context) (err error) {
if o.fs.opt.SharedFiles || o.fs.opt.SharedFolders {
return fserrors.NoRetryError(errors.New("not support in shared files mode"))
}
err = o.fs.pacer.Call(func() (bool, error) {
_, err = o.fs.srv.DeleteV2(&files.DeleteArg{
Path: o.fs.opt.Enc.FromStandardPath(o.remotePath()),
@@ -1201,4 +1472,5 @@ var (
_ fs.DirMover = (*Fs)(nil)
_ fs.Abouter = (*Fs)(nil)
_ fs.Object = (*Object)(nil)
_ fs.IDer = (*Object)(nil)
)

View File

@@ -1213,7 +1213,7 @@ func (f *Fs) OpenWriterAt(ctx context.Context, remote string, size int64) (fs.Wr
// Set the file to be a sparse file (important on Windows)
err = file.SetSparse(out)
if err != nil {
fs.Debugf(o, "Failed to set sparse: %v", err)
fs.Errorf(o, "Failed to set sparse: %v", err)
}
}

View File

@@ -646,7 +646,6 @@ func (f *Fs) Put(ctx context.Context, in io.Reader, src fs.ObjectInfo, options .
// retryErrorCodes is a slice of error codes that we will retry
var retryErrorCodes = []int{
400, // Bad request (seen in "Next token is expired")
401, // Unauthorized (seen in "Token has expired")
408, // Request Timeout
423, // Locked - get this on folders sometimes

View File

@@ -58,7 +58,7 @@ import (
func init() {
fs.Register(&fs.RegInfo{
Name: "s3",
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)",
NewFs: NewFs,
CommandHelp: commandHelp,
Options: []fs.Option{{
@@ -94,6 +94,9 @@ func init() {
}, {
Value: "StackPath",
Help: "StackPath Object Storage",
}, {
Value: "TencentCOS",
Help: "Tencent Cloud Object Storage (COS)",
}, {
Value: "Wasabi",
Help: "Wasabi Object Storage",
@@ -185,7 +188,7 @@ func init() {
}, {
Name: "region",
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba,Scaleway",
Provider: "!AWS,Alibaba,Scaleway,TencentCOS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
@@ -476,10 +479,73 @@ func init() {
Value: "s3.eu-central-1.stackpathstorage.com",
Help: "EU Endpoint",
}},
}, {
// cos endpoints: https://intl.cloud.tencent.com/document/product/436/6224
Name: "endpoint",
Help: "Endpoint for Tencent COS API.",
Provider: "TencentCOS",
Examples: []fs.OptionExample{{
Value: "cos.ap-beijing.myqcloud.com",
Help: "Beijing Region.",
}, {
Value: "cos.ap-nanjing.myqcloud.com",
Help: "Nanjing Region.",
}, {
Value: "cos.ap-shanghai.myqcloud.com",
Help: "Shanghai Region.",
}, {
Value: "cos.ap-guangzhou.myqcloud.com",
Help: "Guangzhou Region.",
}, {
Value: "cos.ap-nanjing.myqcloud.com",
Help: "Nanjing Region.",
}, {
Value: "cos.ap-chengdu.myqcloud.com",
Help: "Chengdu Region.",
}, {
Value: "cos.ap-chongqing.myqcloud.com",
Help: "Chongqing Region.",
}, {
Value: "cos.ap-hongkong.myqcloud.com",
Help: "Hong Kong (China) Region.",
}, {
Value: "cos.ap-singapore.myqcloud.com",
Help: "Singapore Region.",
}, {
Value: "cos.ap-mumbai.myqcloud.com",
Help: "Mumbai Region.",
}, {
Value: "cos.ap-seoul.myqcloud.com",
Help: "Seoul Region.",
}, {
Value: "cos.ap-bangkok.myqcloud.com",
Help: "Bangkok Region.",
}, {
Value: "cos.ap-tokyo.myqcloud.com",
Help: "Tokyo Region.",
}, {
Value: "cos.na-siliconvalley.myqcloud.com",
Help: "Silicon Valley Region.",
}, {
Value: "cos.na-ashburn.myqcloud.com",
Help: "Virginia Region.",
}, {
Value: "cos.na-toronto.myqcloud.com",
Help: "Toronto Region.",
}, {
Value: "cos.eu-frankfurt.myqcloud.com",
Help: "Frankfurt Region.",
}, {
Value: "cos.eu-moscow.myqcloud.com",
Help: "Moscow Region.",
}, {
Value: "cos.accelerate.myqcloud.com",
Help: "Use Tencent COS Accelerate Endpoint.",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
Provider: "!AWS,IBMCOS,TencentCOS,Alibaba,Scaleway,StackPath",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
@@ -666,7 +732,7 @@ func init() {
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath",
Provider: "!AWS,IBMCOS,Alibaba,Scaleway,StackPath,TencentCOS",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
@@ -678,9 +744,13 @@ For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview
Note that this ACL is applied when server side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.`,
Examples: []fs.OptionExample{{
Value: "default",
Help: "Owner gets Full_CONTROL. No one else has access rights (default).",
Provider: "TencentCOS",
}, {
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
Provider: "!IBMCOS",
Provider: "!IBMCOS,TencentCOS",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
@@ -842,6 +912,24 @@ isn't set then "acl" is used instead.`,
Value: "STANDARD_IA",
Help: "Infrequent access storage mode.",
}},
}, {
// Mapping from here: https://intl.cloud.tencent.com/document/product/436/30925
Name: "storage_class",
Help: "The storage class to use when storing new objects in Tencent COS.",
Provider: "TencentCOS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "STANDARD",
Help: "Standard storage class",
}, {
Value: "ARCHIVE",
Help: "Archive storage mode.",
}, {
Value: "STANDARD_IA",
Help: "Infrequent access storage mode.",
}},
}, {
// Mapping from here: https://www.scaleway.com/en/docs/object-storage-glacier/#-Scaleway-Storage-Classes
Name: "storage_class",
@@ -975,7 +1063,7 @@ if false then rclone will use virtual path style. See [the AWS S3
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
for more info.
Some providers (eg AWS, Aliyun OSS or Netease COS) require this set to
Some providers (eg AWS, Aliyun OSS, Netease COS or Tencent COS) require this set to
false - rclone will do this automatically based on the provider
setting.`,
Default: true,
@@ -1305,7 +1393,7 @@ func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
if opt.Region == "" {
opt.Region = "us-east-1"
}
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.UseAccelerateEndpoint {
if opt.Provider == "AWS" || opt.Provider == "Alibaba" || opt.Provider == "Netease" || opt.Provider == "Scaleway" || opt.Provider == "TencentCOS" || opt.UseAccelerateEndpoint {
opt.ForcePathStyle = false
}
if opt.Provider == "Scaleway" && opt.MaxUploadParts > 1000 {
@@ -1587,7 +1675,7 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
//
// So we enable only on providers we know supports it properly, all others can retry when a
// XML Syntax error is detected.
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio")
var urlEncodeListings = (f.opt.Provider == "AWS" || f.opt.Provider == "Wasabi" || f.opt.Provider == "Alibaba" || f.opt.Provider == "Minio" || f.opt.Provider == "TencentCOS")
for {
// FIXME need to implement ALL loop
req := s3.ListObjectsInput{

View File

@@ -324,7 +324,7 @@ func compileArch(version, goos, goarch, dir string) bool {
artifacts := []string{buildZip(dir)}
// build a .deb and .rpm if appropriate
if goos == "linux" {
artifacts = append(artifacts, buildDebAndRpm(dir, version, goarch)...)
artifacts = append(artifacts, buildDebAndRpm(dir, version, stripVersion(goarch))...)
}
if *copyAs != "" {
for _, artifact := range artifacts {

View File

@@ -14,7 +14,7 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "cleanup remote:path",
Short: `Clean up the remote if possible`,
Short: `Clean up the remote if possible.`,
Long: `
Clean up the remote if possible. Empty the trash or delete old file
versions. Not supported by all remotes.

View File

@@ -22,7 +22,7 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "copy source:path dest:path",
Short: `Copy files from source to dest, skipping already copied`,
Short: `Copy files from source to dest, skipping already copied.`,
Long: `
Copy the source to the destination. Doesn't transfer
unchanged files, testing by size and modification time or

View File

@@ -15,7 +15,7 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "copyto source:path dest:path",
Short: `Copy files from source to dest, skipping already copied`,
Short: `Copy files from source to dest, skipping already copied.`,
Long: `
If source:path is a file or directory then it copies it to a file or
directory named dest:path.

View File

@@ -44,7 +44,7 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "lsf remote:path",
Short: `List directories and objects in remote:path formatted for parsing`,
Short: `List directories and objects in remote:path formatted for parsing.`,
Long: `
List the contents of the source path (directories and objects) to
standard output in a form which is easy to parse by scripts. By

View File

@@ -192,6 +192,9 @@ Stopping the mount manually:
# OS X
umount /path/to/local/mount
**Note**: As of ` + "`rclone` 1.52.2, `rclone mount`" + ` now requires Go version 1.13
or newer on some platforms depending on the underlying FUSE library in use.
### Installing on Windows
To run rclone ` + commandName + ` on Windows, you will need to

View File

@@ -17,7 +17,7 @@ func init() {
var commandDefinition = &cobra.Command{
Use: "obscure password",
Short: `Obscure password for use in the rclone config file`,
Short: `Obscure password for use in the rclone config file.`,
Long: `In the rclone config file, human readable passwords are
obscured. Obscuring them is done by encrypting them and writing them
out in base64. This is **not** a secure way of encrypting these

View File

@@ -148,6 +148,7 @@ WebDAV or S3, that work out of the box.)
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
{{< provider name="SugarSync" home="https://sugarsync.com/" config="/sugarsync/" >}}
{{< provider name="Tardigrade" home="https://tardigrade.io/" config="/tardigrade/" >}}
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" >}}
{{< provider name="WebDAV" home="https://en.wikipedia.org/wiki/WebDAV" config="/webdav/" >}}
{{< provider name="Yandex Disk" home="https://disk.yandex.com/" config="/yandex/" >}}

View File

@@ -409,3 +409,4 @@ put them back in again.` >}}
* Lucas Kanashiro <lucas.kanashiro@canonical.com>
* WarpedPixel <WarpedPixel@users.noreply.github.com>
* Sam Edwards <sam@samedwards.ca>
* wjielai <gouki0123@gmail.com>

View File

@@ -757,6 +757,8 @@ This can be useful for tracking down problems with syncs in
combination with the `-v` flag. See the [Logging section](#logging)
for more info.
If FILE exists then rclone will append to it.
Note that if you are using the `logrotate` program to manage rclone's
logs, then you should use the `copytruncate` option as rclone doesn't
have a signal to rotate logs.

View File

@@ -202,6 +202,39 @@ Impersonate this user when using a business account.
- Type: string
- Default: ""
#### --dropbox-shared-files
Instructs rclone to work on individual shared files.
In this mode rclone's features are extremely limited - only list (ls, lsl, etc.)
operations and read operations (e.g. downloading) are supported in this mode.
All other operations will be disabled.
- Config: shared_files
- Env Var: RCLONE_DROPBOX_SHARED_FILES
- Type: bool
- Default: false
#### --dropbox-shared-folders
Instructs rclone to work on shared folders.
When this flag is used with no path only the List operation is supported and
all available shared folders will be listed. If you specify a path the first part
will be interpreted as the name of shared folder. Rclone will then try to mount this
shared to the root namespace. On success shared folder rclone proceeds normally.
The shared folder is now pretty much a normal folder and all normal operations
are supported.
Note that we don't unmount the shared folder afterwards so the
--dropbox-shared-folders can be omitted after the first use of a particular
shared folder.
- Config: shared_folders
- Env Var: RCLONE_DROPBOX_SHARED_FOLDERS
- Type: bool
- Default: false
#### --dropbox-encoding
This sets the encoding for the backend.

View File

@@ -18,6 +18,7 @@ The S3 backend can be used with a number of different providers:
{{< provider name="Minio" home="https://www.minio.io/" config="/s3/#minio" >}}
{{< provider name="Scaleway" home="https://www.scaleway.com/en/object-storage/" config="/s3/#scaleway" >}}
{{< provider name="StackPath" home="https://www.stackpath.com/products/object-storage/" config="/s3/#stackpath" >}}
{{< provider name="Tencent Cloud Object Storage (COS)" home="https://intl.cloud.tencent.com/product/cos" config="/s3/#tencent-cos" >}}
{{< provider name="Wasabi" home="https://wasabi.com/" config="/s3/#wasabi" end="true" >}}
{{< /provider_list >}}
@@ -488,6 +489,8 @@ Choose your S3 provider.
- StackPath Object Storage
- "Wasabi"
- Wasabi Object Storage
- "TencentCOS"
- Tencent Cloud Object Storage (COS)
- "Other"
- Any other S3 compatible provider
@@ -1122,7 +1125,7 @@ The storage class to use when storing new objects in S3.
### Advanced Options
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)).
Here are the advanced options specific to s3 (Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)).
#### --s3-bucket-acl
@@ -2212,6 +2215,138 @@ d) Delete this remote
y/e/d> y
```
### Tencent COS {#tencent-cos}
[Tencent Cloud Object Storage (COS)](https://intl.cloud.tencent.com/product/cos) is a distributed storage service offered by Tencent Cloud for unstructured data. It is secure, stable, massive, convenient, low-delay and low-cost.
To configure access to Tencent COS, follow the steps below:
1. Run `rclone config` and select `n` for a new remote.
```
rclone config
No remotes found - make a new one
n) New remote
s) Set configuration password
q) Quit config
n/s/q> n
```
2. Give the name of the configuration. For example, name it 'cos'.
```
name> cos
```
3. Select `s3` storage.
```
Choose a number from below, or type in your own value
1 / 1Fichier
\ "fichier"
2 / Alias for an existing remote
\ "alias"
3 / Amazon Drive
\ "amazon cloud drive"
4 / Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, Tencent COS, etc)
\ "s3"
[snip]
Storage> s3
```
4. Select `TencentCOS` provider.
```
Choose a number from below, or type in your own value
1 / Amazon Web Services (AWS) S3
\ "AWS"
[snip]
11 / Tencent Cloud Object Storage (COS)
\ "TencentCOS"
[snip]
provider> TencentCOS
```
5. Enter your SecretId and SecretKey of Tencent Cloud.
```
Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).
Only applies if access_key_id and secret_access_key is blank.
Enter a boolean value (true or false). Press Enter for the default ("false").
Choose a number from below, or type in your own value
1 / Enter AWS credentials in the next step
\ "false"
2 / Get AWS credentials from the environment (env vars or IAM)
\ "true"
env_auth> 1
AWS Access Key ID.
Leave blank for anonymous access or runtime credentials.
Enter a string value. Press Enter for the default ("").
access_key_id> AKIDxxxxxxxxxx
AWS Secret Access Key (password)
Leave blank for anonymous access or runtime credentials.
Enter a string value. Press Enter for the default ("").
secret_access_key> xxxxxxxxxxx
```
6. Select endpoint for Tencent COS. This is the standard endpoint for different region.
```
1 / Beijing Region.
\ "cos.ap-beijing.myqcloud.com"
2 / Nanjing Region.
\ "cos.ap-nanjing.myqcloud.com"
3 / Shanghai Region.
\ "cos.ap-shanghai.myqcloud.com"
4 / Guangzhou Region.
\ "cos.ap-guangzhou.myqcloud.com"
[snip]
endpoint> 4
```
7. Choose acl and storage class.
```
Note that this ACL is applied when server side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
1 / Owner gets Full_CONTROL. No one else has access rights (default).
\ "default"
[snip]
acl> 1
The storage class to use when storing new objects in Tencent COS.
Enter a string value. Press Enter for the default ("").
Choose a number from below, or type in your own value
1 / Default
\ ""
[snip]
storage_class> 1
Edit advanced config? (y/n)
y) Yes
n) No (default)
y/n> n
Remote config
--------------------
[cos]
type = s3
provider = TencentCOS
env_auth = false
access_key_id = xxx
secret_access_key = xxx
endpoint = cos.ap-guangzhou.myqcloud.com
acl = default
--------------------
y) Yes this is OK (default)
e) Edit this remote
d) Delete this remote
y/e/d> y
Current remotes:
Name Type
==== ====
cos s3
```
### Netease NOS ###
For Netease NOS configure as per the configurator `rclone config`

View File

@@ -26,6 +26,10 @@ var ErrorMaxTransferLimitReached = errors.New("Max transfer limit reached as set
// transfer limit is reached.
var ErrorMaxTransferLimitReachedFatal = fserrors.FatalError(ErrorMaxTransferLimitReached)
// ErrorMaxTransferLimitReachedGraceful is returned from operations.Copy when the max
// transfer limit is reached and a graceful stop is required.
var ErrorMaxTransferLimitReachedGraceful = fserrors.NoRetryError(ErrorMaxTransferLimitReached)
// Account limits and accounts for one transfer
type Account struct {
stats *StatsInfo

View File

@@ -272,7 +272,7 @@ func (s *StatsInfo) String() string {
}
}
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s\n",
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s",
dateString,
fs.SizeSuffix(s.bytes),
fs.SizeSuffix(totalSize).Unit("Bytes"),
@@ -283,6 +283,7 @@ func (s *StatsInfo) String() string {
)
if !fs.Config.StatsOneLine {
_, _ = buf.WriteRune('\n')
errorDetails := ""
switch {
case s.fatalError:
@@ -291,6 +292,7 @@ func (s *StatsInfo) String() string {
errorDetails = " (retrying may help)"
case s.errors != 0:
errorDetails = " (no need to retry)"
}
// Add only non zero stats

View File

@@ -363,7 +363,7 @@ func Copy(ctx context.Context, f fs.Fs, dst fs.Object, remote string, src fs.Obj
actionTaken = "Copied (server side copy)"
if fs.Config.MaxTransfer >= 0 && (accounting.Stats(ctx).GetBytes() >= int64(fs.Config.MaxTransfer) ||
(fs.Config.CutoffMode == fs.CutoffModeCautious && accounting.Stats(ctx).GetBytesWithPending()+src.Size() >= int64(fs.Config.MaxTransfer))) {
return nil, accounting.ErrorMaxTransferLimitReachedFatal
return nil, accounting.ErrorMaxTransferLimitReachedGraceful
}
if doCopy := f.Features().Copy; doCopy != nil && (SameConfig(src.Fs(), f) || (SameRemoteType(src.Fs(), f) && f.Features().ServerSideAcrossConfigs)) {
in := tr.Account(ctx, nil) // account the transfer

View File

@@ -1440,7 +1440,7 @@ func TestCopyFileMaxTransfer(t *testing.T) {
err = operations.CopyFile(ctx, r.Fremote, r.Flocal, file3.Path, file3.Path)
require.NotNil(t, err)
assert.Contains(t, err.Error(), "Max transfer limit reached")
assert.True(t, fserrors.IsFatalError(err))
assert.True(t, fserrors.IsNoRetryError(err))
fstest.CheckItems(t, r.Flocal, file1, file2, file3, file4)
fstest.CheckItems(t, r.Fremote, file1)

View File

@@ -59,17 +59,30 @@ func CheckAndDownloadWebGUIRelease(checkUpdate bool, forceUpdate bool, fetchURL
return errors.New("Web GUI path exists, but is a file instead of folder. Please check the path " + extractPath)
}
// Get the latest release details
WebUIURL, tag, size, err := GetLatestReleaseURL(fetchURL)
if err != nil {
return errors.Wrap(err, "Error checking for web gui release update, skipping update")
}
dat, err := ioutil.ReadFile(tagPath)
tagsMatch := false
if err != nil {
fs.Errorf(nil, "Error reading tag file at %s ", tagPath)
checkUpdate = true
} else if string(dat) == tag {
tagsMatch = true
}
fs.Debugf(nil, "Current tag: %s, Release tag: %s", string(dat), tag)
if !tagsMatch {
fs.Infof(nil, "A release (%s) for gui is present at %s. Use --rc-web-gui-update to update. Your current version is (%s)", tag, WebUIURL, string(dat))
}
// if the old file exists does not exist or forced update is enforced.
// TODO: Add hashing to check integrity of the previous update.
if !extractPathExist || checkUpdate || forceUpdate {
// Get the latest release details
WebUIURL, tag, size, err := GetLatestReleaseURL(fetchURL)
if err != nil {
return err
}
dat, err := ioutil.ReadFile(tagPath)
if err == nil && string(dat) == tag {
if tagsMatch {
fs.Logf(nil, "No update to Web GUI available.")
if !forceUpdate {
return nil
@@ -91,7 +104,7 @@ func CheckAndDownloadWebGUIRelease(checkUpdate bool, forceUpdate bool, fetchURL
return errors.New("Web GUI path is a file instead of folder. Please check it " + extractPath)
}
fs.Logf(nil, "A new release for gui is present at "+WebUIURL)
fs.Logf(nil, "A new release for gui (%s) is present at %s", tag, WebUIURL)
fs.Logf(nil, "Downloading webgui binary. Please wait. [Size: %s, Path : %s]\n", strconv.Itoa(size), zipPath)
// download the zip from latest url

View File

@@ -32,6 +32,8 @@ type syncCopyMove struct {
// internal state
ctx context.Context // internal context for controlling go-routines
cancel func() // cancel the context
inCtx context.Context // internal context for controlling march
inCancel func() // cancel the march context
noTraverse bool // if set don't traverse the dst
noCheckDest bool // if set transfer all objects regardless without checking dst
noUnicodeNormalization bool // don't normalize unicode characters in filenames
@@ -144,6 +146,8 @@ func newSyncCopyMove(ctx context.Context, fdst, fsrc fs.Fs, deleteMode fs.Delete
} else {
s.ctx, s.cancel = context.WithCancel(ctx)
}
// Input context - cancel this for graceful stop
s.inCtx, s.inCancel = context.WithCancel(s.ctx)
if s.noTraverse && s.deleteMode != fs.DeleteModeOff {
fs.Errorf(nil, "Ignoring --no-traverse with sync")
s.noTraverse = false
@@ -248,6 +252,12 @@ func (s *syncCopyMove) processError(err error) {
}
if err == context.DeadlineExceeded {
err = fserrors.NoRetryError(err)
} else if err == accounting.ErrorMaxTransferLimitReachedGraceful {
if s.inCtx.Err() == nil {
fs.Logf(nil, "%v - stopping transfers", err)
// Cancel the march and stop the pipes
s.inCancel()
}
}
s.errorMu.Lock()
defer s.errorMu.Unlock()
@@ -287,7 +297,7 @@ func (s *syncCopyMove) currentError() error {
func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.WaitGroup) {
defer wg.Done()
for {
pair, ok := in.GetMax(s.ctx, fraction)
pair, ok := in.GetMax(s.inCtx, fraction)
if !ok {
return
}
@@ -343,7 +353,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W
func (s *syncCopyMove) pairRenamer(in *pipe, out *pipe, fraction int, wg *sync.WaitGroup) {
defer wg.Done()
for {
pair, ok := in.GetMax(s.ctx, fraction)
pair, ok := in.GetMax(s.inCtx, fraction)
if !ok {
return
}
@@ -363,7 +373,7 @@ func (s *syncCopyMove) pairCopyOrMove(ctx context.Context, in *pipe, fdst fs.Fs,
defer wg.Done()
var err error
for {
pair, ok := in.GetMax(s.ctx, fraction)
pair, ok := in.GetMax(s.inCtx, fraction)
if !ok {
return
}
@@ -809,7 +819,7 @@ func (s *syncCopyMove) run() error {
// set up a march over fdst and fsrc
m := &march.March{
Ctx: s.ctx,
Ctx: s.inCtx,
Fdst: s.fdst,
Fsrc: s.fsrc,
Dir: s.dir,

View File

@@ -1851,38 +1851,51 @@ func TestSyncIgnoreCase(t *testing.T) {
fstest.CheckItems(t, r.Fremote, file2)
}
// Test that aborting on max upload works
func TestAbort(t *testing.T) {
r := fstest.NewRun(t)
defer r.Finalise()
if r.Fremote.Name() != "local" {
t.Skip("This test only runs on local")
}
// Test that aborting on --max-transfer works
func TestMaxTransfer(t *testing.T) {
oldMaxTransfer := fs.Config.MaxTransfer
oldTransfers := fs.Config.Transfers
oldCheckers := fs.Config.Checkers
oldCutoff := fs.Config.CutoffMode
fs.Config.MaxTransfer = 3 * 1024
fs.Config.Transfers = 1
fs.Config.Checkers = 1
fs.Config.CutoffMode = fs.CutoffModeHard
defer func() {
fs.Config.MaxTransfer = oldMaxTransfer
fs.Config.Transfers = oldTransfers
fs.Config.Checkers = oldCheckers
fs.Config.CutoffMode = oldCutoff
}()
// Create file on source
file1 := r.WriteFile("file1", string(make([]byte, 5*1024)), t1)
file2 := r.WriteFile("file2", string(make([]byte, 2*1024)), t1)
file3 := r.WriteFile("file3", string(make([]byte, 3*1024)), t1)
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
fstest.CheckItems(t, r.Fremote)
test := func(t *testing.T, cutoff fs.CutoffMode) {
r := fstest.NewRun(t)
defer r.Finalise()
fs.Config.CutoffMode = cutoff
accounting.GlobalStats().ResetCounters()
if r.Fremote.Name() != "local" {
t.Skip("This test only runs on local")
}
err := Sync(context.Background(), r.Fremote, r.Flocal, false)
expectedErr := fserrors.FsError(accounting.ErrorMaxTransferLimitReachedFatal)
fserrors.Count(expectedErr)
assert.Equal(t, expectedErr, err)
// Create file on source
file1 := r.WriteFile("file1", string(make([]byte, 5*1024)), t1)
file2 := r.WriteFile("file2", string(make([]byte, 2*1024)), t1)
file3 := r.WriteFile("file3", string(make([]byte, 3*1024)), t1)
fstest.CheckItems(t, r.Flocal, file1, file2, file3)
fstest.CheckItems(t, r.Fremote)
accounting.GlobalStats().ResetCounters()
err := Sync(context.Background(), r.Fremote, r.Flocal, false)
expectedErr := fserrors.FsError(accounting.ErrorMaxTransferLimitReachedFatal)
if cutoff != fs.CutoffModeHard {
expectedErr = accounting.ErrorMaxTransferLimitReachedGraceful
}
fserrors.Count(expectedErr)
assert.Equal(t, expectedErr, err)
}
t.Run("Hard", func(t *testing.T) { test(t, fs.CutoffModeHard) })
t.Run("Soft", func(t *testing.T) { test(t, fs.CutoffModeSoft) })
t.Run("Cautious", func(t *testing.T) { test(t, fs.CutoffModeCautious) })
}

2
go.mod
View File

@@ -72,3 +72,5 @@ require (
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect
storj.io/uplink v1.2.0
)
replace github.com/jlaffaye/ftp => github.com/ncw/ftp v0.0.0-20200913100848-1d4a278dbc96

12
go.sum
View File

@@ -238,6 +238,8 @@ github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M
github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
github.com/jlaffaye/ftp v0.0.0-20200720194710-13949d38913e h1:itZyHiOkiB8mIGouegRNLM9LttGQ3yrgRmp/J/6H/0g=
github.com/jlaffaye/ftp v0.0.0-20200720194710-13949d38913e/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
github.com/jlaffaye/ftp v0.0.0-20200812143550-39e3779af0db h1:e30IC+OuZIeMVK33/zE7wDvxDaRmGuRt/ps67pzcxAw=
github.com/jlaffaye/ftp v0.0.0-20200812143550-39e3779af0db/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
@@ -306,6 +308,16 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE+eKJXWVjKXM4ck2QobLqTDytGJbLLhJg=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/ncw/ftp v0.0.0-20200910202626-83bf7f3051fe h1:vJHusE940z05SKcdSy67YvPeZi9DkV7k1awGFKhCwXs=
github.com/ncw/ftp v0.0.0-20200910202626-83bf7f3051fe/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
github.com/ncw/ftp v0.0.0-20200911161945-13c89676f4e4 h1:eLu30+/U5nLCkFegOUsGTFJ7wojikvdNESGEmA3ZnuM=
github.com/ncw/ftp v0.0.0-20200911161945-13c89676f4e4/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
github.com/ncw/ftp v0.0.0-20200912103546-f95f53b8ef3e h1:UUzL92dVtNe6OQfRo+xxubN4Mj+chJro87Qa0bM+3Kg=
github.com/ncw/ftp v0.0.0-20200912103546-f95f53b8ef3e/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
github.com/ncw/ftp v0.0.0-20200913095915-9bc640dd2108 h1:SBBlbVBRqSAb4KBfpQkevTEAmWOsX8exrX3XxNY/QOQ=
github.com/ncw/ftp v0.0.0-20200913095915-9bc640dd2108/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
github.com/ncw/ftp v0.0.0-20200913100848-1d4a278dbc96 h1:DIvfe9Wqr8cuJulz8+guLCGyZRO22MDRjCk4fTvJ+7U=
github.com/ncw/ftp v0.0.0-20200913100848-1d4a278dbc96/go.mod h1:2lmrmq866uF2tnje75wQHzmPXhmSWUt7Gyx2vgK1RCU=
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2 h1:VlXvEx6JbFp7F9iz92zXP2Ew+9VupSpfybr+TxmjdH0=
github.com/ncw/go-acd v0.0.0-20171120105400-887eb06ab6a2/go.mod h1:MLIrzg7gp/kzVBxRE1olT7CWYMCklcUWU+ekoxOD9x0=
github.com/ncw/swift v1.0.52 h1:ACF3JufDGgeKp/9mrDgQlEgS8kRYC4XKcuzj/8EJjQU=

View File

@@ -166,6 +166,11 @@ whereas the --vfs-read-ahead is buffered on disk.
When using this mode it is recommended that --buffer-size is not set
too big and --vfs-read-ahead is set large if required.
**IMPORTANT** not all file systems support sparse files. In particular
FAT/exFAT do not. Rclone will perform very badly if the cache
directory is on a filesystem which doesn't support sparse files and it
will log an ERROR message if one is detected.
### VFS Performance
These flags may be used to enable/disable features of the VFS for

View File

@@ -249,7 +249,7 @@ func (item *Item) _truncate(size int64) (err error) {
err = file.SetSparse(fd)
if err != nil {
fs.Debugf(item.name, "vfs cache: truncate: failed to set as a sparse file: %v", err)
fs.Errorf(item.name, "vfs cache: truncate: failed to set as a sparse file: %v", err)
}
}
@@ -446,7 +446,7 @@ func (item *Item) _createFile(osPath string) (err error) {
}
err = file.SetSparse(fd)
if err != nil {
fs.Debugf(item.name, "vfs cache: failed to set as a sparse file: %v", err)
fs.Errorf(item.name, "vfs cache: failed to set as a sparse file: %v", err)
}
item.fd = fd