mirror of
https://github.com/rclone/rclone.git
synced 2025-12-16 16:23:22 +00:00
Compare commits
1 Commits
mount-wind
...
fix-s3-end
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
31cb3beb7b |
@@ -1221,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
|||||||
fs.Errorf(object.Name, "Can't create object %v", err)
|
fs.Errorf(object.Name, "Can't create object %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||||
checkErr(err)
|
checkErr(err)
|
||||||
tr.Done(ctx, err)
|
tr.Done(ctx, err)
|
||||||
@@ -1235,7 +1235,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(object, "Can't create object %+v", err)
|
fs.Errorf(object, "Can't create object %+v", err)
|
||||||
}
|
}
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||||
if oldOnly && last != remote {
|
if oldOnly && last != remote {
|
||||||
// Check current version of the file
|
// Check current version of the file
|
||||||
if object.Action == "hide" {
|
if object.Action == "hide" {
|
||||||
|
|||||||
@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
|
|||||||
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
||||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||||
}
|
}
|
||||||
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
|
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
|
||||||
doneCount++
|
doneCount++
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -83,17 +83,6 @@ than permanently deleting them. If you specify this then rclone will
|
|||||||
permanently delete objects instead.`,
|
permanently delete objects instead.`,
|
||||||
Default: false,
|
Default: false,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "use_https",
|
|
||||||
Help: `Use HTTPS for transfers.
|
|
||||||
|
|
||||||
MEGA uses plain text HTTP connections by default.
|
|
||||||
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
|
|
||||||
Enabling this will force MEGA to use HTTPS for all transfers.
|
|
||||||
HTTPS is normally not necesary since all data is already encrypted anyway.
|
|
||||||
Enabling it will increase CPU usage and add network overhead.`,
|
|
||||||
Default: false,
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -111,7 +100,6 @@ type Options struct {
|
|||||||
Pass string `config:"pass"`
|
Pass string `config:"pass"`
|
||||||
Debug bool `config:"debug"`
|
Debug bool `config:"debug"`
|
||||||
HardDelete bool `config:"hard_delete"`
|
HardDelete bool `config:"hard_delete"`
|
||||||
UseHTTPS bool `config:"use_https"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -216,7 +204,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
if srv == nil {
|
if srv == nil {
|
||||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||||
srv.SetHTTPS(opt.UseHTTPS)
|
|
||||||
srv.SetLogger(func(format string, v ...interface{}) {
|
srv.SetLogger(func(format string, v ...interface{}) {
|
||||||
fs.Infof("*go-mega*", format, v...)
|
fs.Infof("*go-mega*", format, v...)
|
||||||
})
|
})
|
||||||
|
|||||||
@@ -126,7 +126,6 @@ type HashesType struct {
|
|||||||
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
|
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
|
||||||
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
|
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
|
||||||
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
|
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
|
||||||
Sha256Hash string `json:"sha256Hash"` // hex encoded SHA256 value of the file (if available)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileFacet groups file-related data on OneDrive into a single structure.
|
// FileFacet groups file-related data on OneDrive into a single structure.
|
||||||
|
|||||||
@@ -259,48 +259,6 @@ this flag there.
|
|||||||
At the time of writing this only works with OneDrive personal paid accounts.
|
At the time of writing this only works with OneDrive personal paid accounts.
|
||||||
`,
|
`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
}, {
|
|
||||||
Name: "hash_type",
|
|
||||||
Default: "auto",
|
|
||||||
Help: `Specify the hash in use for the backend.
|
|
||||||
|
|
||||||
This specifies the hash type in use. If set to "auto" it will use the
|
|
||||||
default hash which is is QuickXorHash.
|
|
||||||
|
|
||||||
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
|
|
||||||
Personal. For 1.62 and later the default is to use a QuickXorHash for
|
|
||||||
all onedrive types. If an SHA1 hash is desired then set this option
|
|
||||||
accordingly.
|
|
||||||
|
|
||||||
From July 2023 QuickXorHash will be the only available hash for
|
|
||||||
both OneDrive for Business and OneDriver Personal.
|
|
||||||
|
|
||||||
This can be set to "none" to not use any hashes.
|
|
||||||
|
|
||||||
If the hash requested does not exist on the object, it will be
|
|
||||||
returned as an empty string which is treated as a missing hash by
|
|
||||||
rclone.
|
|
||||||
`,
|
|
||||||
Examples: []fs.OptionExample{{
|
|
||||||
Value: "auto",
|
|
||||||
Help: "Rclone chooses the best hash",
|
|
||||||
}, {
|
|
||||||
Value: "quickxor",
|
|
||||||
Help: "QuickXor",
|
|
||||||
}, {
|
|
||||||
Value: "sha1",
|
|
||||||
Help: "SHA1",
|
|
||||||
}, {
|
|
||||||
Value: "sha256",
|
|
||||||
Help: "SHA256",
|
|
||||||
}, {
|
|
||||||
Value: "crc32",
|
|
||||||
Help: "CRC32",
|
|
||||||
}, {
|
|
||||||
Value: "none",
|
|
||||||
Help: "None - don't use any hashes",
|
|
||||||
}},
|
|
||||||
Advanced: true,
|
|
||||||
}, {
|
}, {
|
||||||
Name: config.ConfigEncoding,
|
Name: config.ConfigEncoding,
|
||||||
Help: config.ConfigEncodingHelp,
|
Help: config.ConfigEncodingHelp,
|
||||||
@@ -639,7 +597,6 @@ type Options struct {
|
|||||||
LinkScope string `config:"link_scope"`
|
LinkScope string `config:"link_scope"`
|
||||||
LinkType string `config:"link_type"`
|
LinkType string `config:"link_type"`
|
||||||
LinkPassword string `config:"link_password"`
|
LinkPassword string `config:"link_password"`
|
||||||
HashType string `config:"hash_type"`
|
|
||||||
Enc encoder.MultiEncoder `config:"encoding"`
|
Enc encoder.MultiEncoder `config:"encoding"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -656,7 +613,6 @@ type Fs struct {
|
|||||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||||
driveID string // ID to use for querying Microsoft Graph
|
driveID string // ID to use for querying Microsoft Graph
|
||||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||||
hashType hash.Type // type of the hash we are using
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a OneDrive object
|
// Object describes a OneDrive object
|
||||||
@@ -670,7 +626,8 @@ type Object struct {
|
|||||||
size int64 // size of the object
|
size int64 // size of the object
|
||||||
modTime time.Time // modification time of the object
|
modTime time.Time // modification time of the object
|
||||||
id string // ID of the object
|
id string // ID of the object
|
||||||
hash string // Hash of the content, usually QuickXorHash but set as hash_type
|
sha1 string // SHA-1 of the object content
|
||||||
|
quickxorhash string // QuickXorHash of the object content
|
||||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -925,7 +882,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
driveType: opt.DriveType,
|
driveType: opt.DriveType,
|
||||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||||
hashType: QuickXorHashType,
|
|
||||||
}
|
}
|
||||||
f.features = (&fs.Features{
|
f.features = (&fs.Features{
|
||||||
CaseInsensitive: true,
|
CaseInsensitive: true,
|
||||||
@@ -935,15 +891,6 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
|||||||
}).Fill(ctx, f)
|
}).Fill(ctx, f)
|
||||||
f.srv.SetErrorHandler(errorHandler)
|
f.srv.SetErrorHandler(errorHandler)
|
||||||
|
|
||||||
// Set the user defined hash
|
|
||||||
if opt.HashType == "auto" || opt.HashType == "" {
|
|
||||||
opt.HashType = QuickXorHashType.String()
|
|
||||||
}
|
|
||||||
err = f.hashType.Set(opt.HashType)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disable change polling in China region
|
// Disable change polling in China region
|
||||||
// See: https://github.com/rclone/rclone/issues/6444
|
// See: https://github.com/rclone/rclone/issues/6444
|
||||||
if f.opt.Region == regionCN {
|
if f.opt.Region == regionCN {
|
||||||
@@ -1609,7 +1556,10 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
|||||||
|
|
||||||
// Hashes returns the supported hash sets.
|
// Hashes returns the supported hash sets.
|
||||||
func (f *Fs) Hashes() hash.Set {
|
func (f *Fs) Hashes() hash.Set {
|
||||||
return hash.Set(f.hashType)
|
if f.driveType == driveTypePersonal {
|
||||||
|
return hash.Set(hash.SHA1)
|
||||||
|
}
|
||||||
|
return hash.Set(QuickXorHashType)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PublicLink returns a link for downloading without account.
|
// PublicLink returns a link for downloading without account.
|
||||||
@@ -1818,8 +1768,14 @@ func (o *Object) rootPath() string {
|
|||||||
|
|
||||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||||
if t == o.fs.hashType {
|
if o.fs.driveType == driveTypePersonal {
|
||||||
return o.hash, nil
|
if t == hash.SHA1 {
|
||||||
|
return o.sha1, nil
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if t == QuickXorHashType {
|
||||||
|
return o.quickxorhash, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return "", hash.ErrUnsupported
|
return "", hash.ErrUnsupported
|
||||||
}
|
}
|
||||||
@@ -1850,23 +1806,16 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
|||||||
file := info.GetFile()
|
file := info.GetFile()
|
||||||
if file != nil {
|
if file != nil {
|
||||||
o.mimeType = file.MimeType
|
o.mimeType = file.MimeType
|
||||||
o.hash = ""
|
if file.Hashes.Sha1Hash != "" {
|
||||||
switch o.fs.hashType {
|
o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
|
||||||
case QuickXorHashType:
|
}
|
||||||
if file.Hashes.QuickXorHash != "" {
|
if file.Hashes.QuickXorHash != "" {
|
||||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||||
} else {
|
} else {
|
||||||
o.hash = hex.EncodeToString(h)
|
o.quickxorhash = hex.EncodeToString(h)
|
||||||
}
|
|
||||||
}
|
}
|
||||||
case hash.SHA1:
|
|
||||||
o.hash = strings.ToLower(file.Hashes.Sha1Hash)
|
|
||||||
case hash.SHA256:
|
|
||||||
o.hash = strings.ToLower(file.Hashes.Sha256Hash)
|
|
||||||
case hash.CRC32:
|
|
||||||
o.hash = strings.ToLower(file.Hashes.Crc32Hash)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
fileSystemInfo := info.GetFileSystemInfo()
|
fileSystemInfo := info.GetFileSystemInfo()
|
||||||
|
|||||||
@@ -2266,11 +2266,6 @@ rclone's choice here.
|
|||||||
Help: `Suppress setting and reading of system metadata`,
|
Help: `Suppress setting and reading of system metadata`,
|
||||||
Advanced: true,
|
Advanced: true,
|
||||||
Default: false,
|
Default: false,
|
||||||
}, {
|
|
||||||
Name: "sts_endpoint",
|
|
||||||
Help: "Endpoint for STS.\n\nLeave blank if using AWS to use the default endpoint for the region.",
|
|
||||||
Provider: "AWS",
|
|
||||||
Advanced: true,
|
|
||||||
},
|
},
|
||||||
}})
|
}})
|
||||||
}
|
}
|
||||||
@@ -2357,7 +2352,6 @@ type Options struct {
|
|||||||
SecretAccessKey string `config:"secret_access_key"`
|
SecretAccessKey string `config:"secret_access_key"`
|
||||||
Region string `config:"region"`
|
Region string `config:"region"`
|
||||||
Endpoint string `config:"endpoint"`
|
Endpoint string `config:"endpoint"`
|
||||||
STSEndpoint string `config:"sts_endpoint"`
|
|
||||||
LocationConstraint string `config:"location_constraint"`
|
LocationConstraint string `config:"location_constraint"`
|
||||||
ACL string `config:"acl"`
|
ACL string `config:"acl"`
|
||||||
BucketACL string `config:"bucket_acl"`
|
BucketACL string `config:"bucket_acl"`
|
||||||
@@ -2534,7 +2528,7 @@ func parsePath(path string) (root string) {
|
|||||||
// split returns bucket and bucketPath from the rootRelativePath
|
// split returns bucket and bucketPath from the rootRelativePath
|
||||||
// relative to f.root
|
// relative to f.root
|
||||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2566,38 +2560,6 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Default name resolver
|
|
||||||
var defaultResolver = endpoints.DefaultResolver()
|
|
||||||
|
|
||||||
// resolve (service, region) to endpoint
|
|
||||||
//
|
|
||||||
// Used to set endpoint for s3 services and not for other services
|
|
||||||
type resolver map[string]string
|
|
||||||
|
|
||||||
// Add a service to the resolver, ignoring empty urls
|
|
||||||
func (r resolver) addService(service, url string) {
|
|
||||||
if url == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(url, "http") {
|
|
||||||
url = "https://" + url
|
|
||||||
}
|
|
||||||
r[service] = url
|
|
||||||
}
|
|
||||||
|
|
||||||
// EndpointFor return the endpoint for s3 if set or the default if not
|
|
||||||
func (r resolver) EndpointFor(service, region string, opts ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
|
|
||||||
fs.Debugf(nil, "Resolving service %q region %q", service, region)
|
|
||||||
url, ok := r[service]
|
|
||||||
if ok {
|
|
||||||
return endpoints.ResolvedEndpoint{
|
|
||||||
URL: url,
|
|
||||||
SigningRegion: region,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
return defaultResolver.EndpointFor(service, region, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// s3Connection makes a connection to s3
|
// s3Connection makes a connection to s3
|
||||||
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) {
|
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) {
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
@@ -2676,12 +2638,8 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
|||||||
if opt.Region != "" {
|
if opt.Region != "" {
|
||||||
awsConfig.WithRegion(opt.Region)
|
awsConfig.WithRegion(opt.Region)
|
||||||
}
|
}
|
||||||
if opt.Endpoint != "" || opt.STSEndpoint != "" {
|
if opt.Endpoint != "" {
|
||||||
// If endpoints are set, override the relevant services only
|
awsConfig.WithEndpoint(opt.Endpoint)
|
||||||
r := make(resolver)
|
|
||||||
r.addService("s3", opt.Endpoint)
|
|
||||||
r.addService("sts", opt.STSEndpoint)
|
|
||||||
awsConfig.WithEndpointResolver(r)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||||
@@ -2699,7 +2657,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
|||||||
}
|
}
|
||||||
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
|
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
|
||||||
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
|
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
|
||||||
awsSessionOpts.Config.Credentials = nil
|
// awsSessionOpts.Config.Credentials = nil
|
||||||
}
|
}
|
||||||
ses, err := session.NewSessionWithOptions(awsSessionOpts)
|
ses, err := session.NewSessionWithOptions(awsSessionOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -3468,16 +3426,15 @@ var errEndList = errors.New("end list")
|
|||||||
|
|
||||||
// list options
|
// list options
|
||||||
type listOpt struct {
|
type listOpt struct {
|
||||||
bucket string // bucket to list
|
bucket string // bucket to list
|
||||||
directory string // directory with bucket
|
directory string // directory with bucket
|
||||||
prefix string // prefix to remove from listing
|
prefix string // prefix to remove from listing
|
||||||
addBucket bool // if set, the bucket is added to the start of the remote
|
addBucket bool // if set, the bucket is added to the start of the remote
|
||||||
recurse bool // if set, recurse to read sub directories
|
recurse bool // if set, recurse to read sub directories
|
||||||
withVersions bool // if set, versions are produced
|
withVersions bool // if set, versions are produced
|
||||||
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
|
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
|
||||||
findFile bool // if set, it will look for files called (bucket, directory)
|
findFile bool // if set, it will look for files called (bucket, directory)
|
||||||
versionAt fs.Time // if set only show versions <= this time
|
versionAt fs.Time // if set only show versions <= this time
|
||||||
noSkipMarkers bool // if set return dir marker objects
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// list lists the objects into the function supplied with the opt
|
// list lists the objects into the function supplied with the opt
|
||||||
@@ -3590,7 +3547,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
|||||||
}
|
}
|
||||||
remote = remote[len(opt.prefix):]
|
remote = remote[len(opt.prefix):]
|
||||||
if opt.addBucket {
|
if opt.addBucket {
|
||||||
remote = bucket.Join(opt.bucket, remote)
|
remote = path.Join(opt.bucket, remote)
|
||||||
}
|
}
|
||||||
remote = strings.TrimSuffix(remote, "/")
|
remote = strings.TrimSuffix(remote, "/")
|
||||||
err = fn(remote, &s3.Object{Key: &remote}, nil, true)
|
err = fn(remote, &s3.Object{Key: &remote}, nil, true)
|
||||||
@@ -3619,10 +3576,10 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
|||||||
remote = remote[len(opt.prefix):]
|
remote = remote[len(opt.prefix):]
|
||||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||||
if opt.addBucket {
|
if opt.addBucket {
|
||||||
remote = bucket.Join(opt.bucket, remote)
|
remote = path.Join(opt.bucket, remote)
|
||||||
}
|
}
|
||||||
// is this a directory marker?
|
// is this a directory marker?
|
||||||
if isDirectory && object.Size != nil && *object.Size == 0 && !opt.noSkipMarkers {
|
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||||
continue // skip directory marker
|
continue // skip directory marker
|
||||||
}
|
}
|
||||||
if versionIDs != nil {
|
if versionIDs != nil {
|
||||||
@@ -3912,7 +3869,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
|||||||
req.Bucket = &dstBucket
|
req.Bucket = &dstBucket
|
||||||
req.ACL = stringPointerOrNil(f.opt.ACL)
|
req.ACL = stringPointerOrNil(f.opt.ACL)
|
||||||
req.Key = &dstPath
|
req.Key = &dstPath
|
||||||
source := pathEscape(bucket.Join(srcBucket, srcPath))
|
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||||
if src.versionID != nil {
|
if src.versionID != nil {
|
||||||
source += fmt.Sprintf("?versionId=%s", *src.versionID)
|
source += fmt.Sprintf("?versionId=%s", *src.versionID)
|
||||||
}
|
}
|
||||||
@@ -4569,14 +4526,13 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
|||||||
delErr <- operations.DeleteFiles(ctx, delChan)
|
delErr <- operations.DeleteFiles(ctx, delChan)
|
||||||
}()
|
}()
|
||||||
checkErr(f.list(ctx, listOpt{
|
checkErr(f.list(ctx, listOpt{
|
||||||
bucket: bucket,
|
bucket: bucket,
|
||||||
directory: directory,
|
directory: directory,
|
||||||
prefix: f.rootDirectory,
|
prefix: f.rootDirectory,
|
||||||
addBucket: f.rootBucket == "",
|
addBucket: f.rootBucket == "",
|
||||||
recurse: true,
|
recurse: true,
|
||||||
withVersions: versioned,
|
withVersions: versioned,
|
||||||
hidden: true,
|
hidden: true,
|
||||||
noSkipMarkers: true,
|
|
||||||
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
|
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
|
||||||
if isDirectory {
|
if isDirectory {
|
||||||
return nil
|
return nil
|
||||||
@@ -4586,7 +4542,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
|||||||
fs.Errorf(object, "Can't create object %+v", err)
|
fs.Errorf(object, "Can't create object %+v", err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||||
// Work out whether the file is the current version or not
|
// Work out whether the file is the current version or not
|
||||||
isCurrentVersion := !versioned || !version.Match(remote)
|
isCurrentVersion := !versioned || !version.Match(remote)
|
||||||
fs.Debugf(nil, "%q version %v", remote, version.Match(remote))
|
fs.Debugf(nil, "%q version %v", remote, version.Match(remote))
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
package seafile
|
package seafile
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@@ -16,19 +17,19 @@ func TestShouldAllowShutdownTwice(t *testing.T) {
|
|||||||
renew.Shutdown()
|
renew.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRenewalInTimeLimit(t *testing.T) {
|
func TestRenewal(t *testing.T) {
|
||||||
var count int64
|
var count int64
|
||||||
|
|
||||||
renew := NewRenew(100*time.Millisecond, func() error {
|
wg := sync.WaitGroup{}
|
||||||
|
wg.Add(2) // run the renewal twice
|
||||||
|
renew := NewRenew(time.Millisecond, func() error {
|
||||||
atomic.AddInt64(&count, 1)
|
atomic.AddInt64(&count, 1)
|
||||||
|
wg.Done()
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
time.Sleep(time.Second)
|
wg.Wait()
|
||||||
renew.Shutdown()
|
renew.Shutdown()
|
||||||
|
|
||||||
// there's no guarantee the CI agent can handle a simple goroutine
|
// it is technically possible that a third renewal gets triggered between Wait() and Shutdown()
|
||||||
renewCount := atomic.LoadInt64(&count)
|
assert.GreaterOrEqual(t, atomic.LoadInt64(&count), int64(2))
|
||||||
t.Logf("renew count = %d", renewCount)
|
|
||||||
assert.Greater(t, renewCount, int64(0))
|
|
||||||
assert.Less(t, renewCount, int64(11))
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -401,15 +401,9 @@ func initConfig() {
|
|||||||
// Start accounting
|
// Start accounting
|
||||||
accounting.Start(ctx)
|
accounting.Start(ctx)
|
||||||
|
|
||||||
// Configure console
|
// Hide console window
|
||||||
if ci.NoConsole {
|
if ci.NoConsole {
|
||||||
// Hide the console window
|
|
||||||
terminal.HideConsole()
|
terminal.HideConsole()
|
||||||
} else {
|
|
||||||
// Enable color support on stdout if possible.
|
|
||||||
// This enables virtual terminal processing on Windows 10,
|
|
||||||
// adding native support for ANSI/VT100 escape sequences.
|
|
||||||
terminal.EnableColorsStdout()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load filters
|
// Load filters
|
||||||
|
|||||||
@@ -26,6 +26,5 @@ func getMountpoint(f fs.Fs, mountPath string, opt *mountlib.Options) (string, er
|
|||||||
if err = mountlib.CheckAllowNonEmpty(mountPath, opt); err != nil {
|
if err = mountlib.CheckAllowNonEmpty(mountPath, opt); err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
opt.VolumeName = mountlib.MakeVolumeNameValidOnUnix(opt.VolumeName)
|
|
||||||
return mountPath, nil
|
return mountPath, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,11 +9,9 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd/mountlib"
|
"github.com/rclone/rclone/cmd/mountlib"
|
||||||
"github.com/rclone/rclone/fs"
|
"github.com/rclone/rclone/fs"
|
||||||
"github.com/rclone/rclone/lib/encoder"
|
|
||||||
"github.com/rclone/rclone/lib/file"
|
"github.com/rclone/rclone/lib/file"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -21,13 +19,10 @@ var isDriveRegex = regexp.MustCompile(`^[a-zA-Z]\:$`)
|
|||||||
var isDriveRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\$`)
|
var isDriveRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\$`)
|
||||||
var isDriveOrRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\?$`)
|
var isDriveOrRootPathRegex = regexp.MustCompile(`^[a-zA-Z]\:\\?$`)
|
||||||
var isNetworkSharePathRegex = regexp.MustCompile(`^\\\\[^\\\?]+\\[^\\]`)
|
var isNetworkSharePathRegex = regexp.MustCompile(`^\\\\[^\\\?]+\\[^\\]`)
|
||||||
var isAnyPathSeparatorRegex = regexp.MustCompile(`[/\\]+`) // Matches any path separators, slash or backslash, or sequences of them
|
|
||||||
|
|
||||||
// isNetworkSharePath returns true if the given string is a network share path,
|
// isNetworkSharePath returns true if the given string is a valid network share path,
|
||||||
// in the basic UNC format "\\Server\Share\Path". The first two path components
|
// in the basic UNC format "\\Server\Share\Path", where the first two path components
|
||||||
// are required ("\\Server\Share"), and represents the volume. The rest of the
|
// are required ("\\Server\Share", which represents the volume).
|
||||||
// string can be anything, i.e. can be a nested path ("\\Server\Share\Path\Path\Path").
|
|
||||||
// Actual validity of the path, e.g. if it contains invalid characters, is not considered.
|
|
||||||
// Extended-length UNC format "\\?\UNC\Server\Share\Path" is not considered, as it is
|
// Extended-length UNC format "\\?\UNC\Server\Share\Path" is not considered, as it is
|
||||||
// not supported by cgofuse/winfsp, so returns false for any paths with prefix "\\?\".
|
// not supported by cgofuse/winfsp, so returns false for any paths with prefix "\\?\".
|
||||||
// Note: There is a UNCPath function in lib/file, but it refers to any extended-length
|
// Note: There is a UNCPath function in lib/file, but it refers to any extended-length
|
||||||
@@ -116,7 +111,7 @@ func handleLocalMountpath(f fs.Fs, mountpath string, opt *mountlib.Options) (str
|
|||||||
// Drive letter string can be used as is, since we have already checked it does not exist,
|
// Drive letter string can be used as is, since we have already checked it does not exist,
|
||||||
// but directory path needs more checks.
|
// but directory path needs more checks.
|
||||||
if opt.NetworkMode {
|
if opt.NetworkMode {
|
||||||
fs.Debugf(nil, "Ignoring --network-mode as it is not supported with directory mountpoint")
|
fs.Errorf(nil, "Ignoring --network-mode as it is not supported with directory mountpoint")
|
||||||
opt.NetworkMode = false
|
opt.NetworkMode = false
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
@@ -137,47 +132,30 @@ func handleLocalMountpath(f fs.Fs, mountpath string, opt *mountlib.Options) (str
|
|||||||
return mountpath, nil
|
return mountpath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// networkSharePathEncoder is an encoder used to make strings valid as (part of) Windows network share UNC paths
|
|
||||||
const networkSharePathEncoder = (encoder.EncodeZero | // NUL(0x00)
|
|
||||||
encoder.EncodeCtl | // CTRL(0x01-0x1F)
|
|
||||||
encoder.EncodeDel | // DEL(0x7F)
|
|
||||||
encoder.EncodeWin | // :?"*<>|
|
|
||||||
encoder.EncodeInvalidUtf8) // Also encode invalid UTF-8 bytes as Go can't convert them to UTF-16.
|
|
||||||
|
|
||||||
// encodeNetworkSharePath makes a string valid to use as (part of) a Windows network share UNC path.
|
|
||||||
// Using backslash as path separator here, but forward slashes would also be treated as
|
|
||||||
// path separators by the library, and therefore does not encode either of them. For convenience,
|
|
||||||
// normalizes to backslashes-only. UNC paths always start with two path separators, but WinFsp
|
|
||||||
// requires volume prefix as UNC-like path but with only a single backslash prefix, and multiple
|
|
||||||
// separators are not valid in any other parts of network share paths, so therefore (unlike what
|
|
||||||
// filepath.FromSlash would do) replaces multiple separators with a single one (like filpath.Clean
|
|
||||||
// would do, but it does also more). A trailing path separator would just be ignored, but we
|
|
||||||
// remove it here as well for convenience.
|
|
||||||
func encodeNetworkSharePath(volumeName string) string {
|
|
||||||
return networkSharePathEncoder.Encode(strings.TrimRight(isAnyPathSeparatorRegex.ReplaceAllString(volumeName, `\`), `\`))
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleVolumeName handles the volume name option.
|
// handleVolumeName handles the volume name option.
|
||||||
func handleVolumeName(opt *mountlib.Options) {
|
func handleVolumeName(opt *mountlib.Options, volumeName string) {
|
||||||
// Ensure the volume name option is a valid network share UNC path if network mode,
|
// If volumeName parameter is set, then just set that into options replacing any existing value.
|
||||||
|
// Else, ensure the volume name option is a valid network share UNC path if network mode,
|
||||||
// and ensure network mode if configured volume name is already UNC path.
|
// and ensure network mode if configured volume name is already UNC path.
|
||||||
if opt.VolumeName != "" { // Should always be true due to code in mountlib caller
|
if volumeName != "" {
|
||||||
|
opt.VolumeName = volumeName
|
||||||
|
} else if opt.VolumeName != "" { // Should always be true due to code in mountlib caller
|
||||||
// Use value of given volume name option, but check if it is disk volume name or network volume prefix
|
// Use value of given volume name option, but check if it is disk volume name or network volume prefix
|
||||||
if isNetworkSharePath(opt.VolumeName) {
|
if isNetworkSharePath(opt.VolumeName) {
|
||||||
// Specified volume name is network share UNC path, assume network mode and use it as volume prefix
|
// Specified volume name is network share UNC path, assume network mode and use it as volume prefix
|
||||||
opt.VolumeName = encodeNetworkSharePath(opt.VolumeName[1:]) // We know from isNetworkSharePath it has a duplicate path separator prefix, so removes that right away (but encodeNetworkSharePath would remove it also)
|
opt.VolumeName = opt.VolumeName[1:] // WinFsp requires volume prefix as UNC-like path but with only a single backslash
|
||||||
if !opt.NetworkMode {
|
if !opt.NetworkMode {
|
||||||
// Specified volume name is network share UNC path, force network mode and use it as volume prefix
|
// Specified volume name is network share UNC path, force network mode and use it as volume prefix
|
||||||
fs.Debugf(nil, "Forcing network mode due to network share (UNC) volume name")
|
fs.Debugf(nil, "Forcing network mode due to network share (UNC) volume name")
|
||||||
opt.NetworkMode = true
|
opt.NetworkMode = true
|
||||||
}
|
}
|
||||||
} else if opt.NetworkMode {
|
} else if opt.NetworkMode {
|
||||||
// Specified volume name is not a valid network share UNC path, but network mode is enabled, so append to a hard coded server prefix and use it as volume prefix
|
// Plain volume name treated as share name in network mode, append to hard coded "\\server" prefix to get full volume prefix.
|
||||||
opt.VolumeName = `\server\` + strings.TrimLeft(encodeNetworkSharePath(opt.VolumeName), `\`)
|
opt.VolumeName = "\\server\\" + opt.VolumeName
|
||||||
}
|
}
|
||||||
} else if opt.NetworkMode {
|
} else if opt.NetworkMode {
|
||||||
// Use hard coded default
|
// Hard coded default
|
||||||
opt.VolumeName = `\server\share`
|
opt.VolumeName = "\\server\\share"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -196,27 +174,22 @@ func getMountpoint(f fs.Fs, mountpath string, opt *mountlib.Options) (mountpoint
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Handle mountpath
|
// Handle mountpath
|
||||||
|
var volumeName string
|
||||||
if isDefaultPath(mountpath) {
|
if isDefaultPath(mountpath) {
|
||||||
// Mount path indicates defaults, which will automatically pick an unused drive letter.
|
// Mount path indicates defaults, which will automatically pick an unused drive letter.
|
||||||
if mountpoint, err = handleDefaultMountpath(); err != nil {
|
mountpoint, err = handleDefaultMountpath()
|
||||||
return
|
|
||||||
}
|
|
||||||
} else if isNetworkSharePath(mountpath) {
|
} else if isNetworkSharePath(mountpath) {
|
||||||
// Mount path is a valid network share path (UNC format, "\\Server\Share" prefix).
|
// Mount path is a valid network share path (UNC format, "\\Server\Share" prefix).
|
||||||
if mountpoint, err = handleNetworkShareMountpath(mountpath, opt); err != nil {
|
mountpoint, err = handleNetworkShareMountpath(mountpath, opt)
|
||||||
return
|
// In this case the volume name is taken from the mount path, will replace any existing volume name option.
|
||||||
}
|
volumeName = mountpath[1:] // WinFsp requires volume prefix as UNC-like path but with only a single backslash
|
||||||
// In this case the volume name is taken from the mount path, it replaces any existing volume name option.
|
|
||||||
opt.VolumeName = mountpath
|
|
||||||
} else {
|
} else {
|
||||||
// Mount path is drive letter or directory path.
|
// Mount path is drive letter or directory path.
|
||||||
if mountpoint, err = handleLocalMountpath(f, mountpath, opt); err != nil {
|
mountpoint, err = handleLocalMountpath(f, mountpath, opt)
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle volume name
|
// Handle volume name
|
||||||
handleVolumeName(opt)
|
handleVolumeName(opt, volumeName)
|
||||||
|
|
||||||
// Done, return mountpoint to be used, together with updated mount options.
|
// Done, return mountpoint to be used, together with updated mount options.
|
||||||
if opt.NetworkMode {
|
if opt.NetworkMode {
|
||||||
|
|||||||
@@ -6,7 +6,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/rclone/rclone/cmd"
|
"github.com/rclone/rclone/cmd"
|
||||||
"github.com/rclone/rclone/fs"
|
|
||||||
"github.com/rclone/rclone/fs/operations"
|
"github.com/rclone/rclone/fs/operations"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
@@ -28,12 +27,12 @@ it will always be removed.
|
|||||||
},
|
},
|
||||||
Run: func(command *cobra.Command, args []string) {
|
Run: func(command *cobra.Command, args []string) {
|
||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
f, fileName := cmd.NewFsFile(args[0])
|
fs, fileName := cmd.NewFsFile(args[0])
|
||||||
cmd.Run(true, false, command, func() error {
|
cmd.Run(true, false, command, func() error {
|
||||||
if fileName == "" {
|
if fileName == "" {
|
||||||
return fmt.Errorf("%s is a directory or doesn't exist: %w", args[0], fs.ErrorObjectNotFound)
|
return fmt.Errorf("%s is a directory or doesn't exist", args[0])
|
||||||
}
|
}
|
||||||
fileObj, err := f.NewObject(context.Background(), fileName)
|
fileObj, err := fs.NewObject(context.Background(), fileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -79,7 +79,6 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
|||||||
if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil {
|
if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
opt.VolumeName = mountlib.MakeVolumeNameValidOnUnix(opt.VolumeName)
|
|
||||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||||
|
|
||||||
if opt.DebugFUSE {
|
if opt.DebugFUSE {
|
||||||
|
|||||||
@@ -151,7 +151,6 @@ func mount(VFS *vfs.VFS, mountpoint string, opt *mountlib.Options) (<-chan error
|
|||||||
if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil {
|
if err := mountlib.CheckAllowNonEmpty(mountpoint, opt); err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
opt.VolumeName = mountlib.MakeVolumeNameValidOnUnix(opt.VolumeName)
|
|
||||||
fs.Debugf(f, "Mounting on %q", mountpoint)
|
fs.Debugf(f, "Mounting on %q", mountpoint)
|
||||||
|
|
||||||
fsys := NewFS(VFS, opt)
|
fsys := NewFS(VFS, opt)
|
||||||
|
|||||||
@@ -57,7 +57,6 @@ var DefaultOpt = Options{
|
|||||||
NoAppleDouble: true, // use noappledouble by default
|
NoAppleDouble: true, // use noappledouble by default
|
||||||
NoAppleXattr: false, // do not use noapplexattr by default
|
NoAppleXattr: false, // do not use noapplexattr by default
|
||||||
AsyncRead: true, // do async reads by default
|
AsyncRead: true, // do async reads by default
|
||||||
NetworkMode: true, // use network mode by default (Windows only)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type (
|
type (
|
||||||
@@ -240,12 +239,8 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
|||||||
func (m *MountPoint) Mount() (daemon *os.Process, err error) {
|
func (m *MountPoint) Mount() (daemon *os.Process, err error) {
|
||||||
|
|
||||||
// Ensure sensible defaults
|
// Ensure sensible defaults
|
||||||
if m.MountOpt.VolumeName == "" {
|
m.SetVolumeName(m.MountOpt.VolumeName)
|
||||||
m.MountOpt.VolumeName = fs.ConfigString(m.Fs)
|
m.SetDeviceName(m.MountOpt.DeviceName)
|
||||||
}
|
|
||||||
if m.MountOpt.DeviceName == "" {
|
|
||||||
m.MountOpt.DeviceName = fs.ConfigString(m.Fs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start background task if --daemon is specified
|
// Start background task if --daemon is specified
|
||||||
if m.MountOpt.Daemon {
|
if m.MountOpt.Daemon {
|
||||||
|
|||||||
@@ -97,10 +97,29 @@ func checkMountEmpty(mountpoint string) error {
|
|||||||
return fmt.Errorf(msg+": %w", mountpoint, err)
|
return fmt.Errorf(msg+": %w", mountpoint, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// MakeVolumeNameValidOnUnix takes a volume name and returns a variant that is valid on unix systems.
|
// SetVolumeName with sensible default
|
||||||
func MakeVolumeNameValidOnUnix(volumeName string) string {
|
func (m *MountPoint) SetVolumeName(vol string) {
|
||||||
volumeName = strings.ReplaceAll(volumeName, ":", " ")
|
if vol == "" {
|
||||||
volumeName = strings.ReplaceAll(volumeName, "/", " ")
|
vol = fs.ConfigString(m.Fs)
|
||||||
volumeName = strings.TrimSpace(volumeName)
|
}
|
||||||
return volumeName
|
m.MountOpt.SetVolumeName(vol)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetVolumeName removes special characters from volume name if necessary
|
||||||
|
func (o *Options) SetVolumeName(vol string) {
|
||||||
|
vol = strings.ReplaceAll(vol, ":", " ")
|
||||||
|
vol = strings.ReplaceAll(vol, "/", " ")
|
||||||
|
vol = strings.TrimSpace(vol)
|
||||||
|
if runtime.GOOS == "windows" && len(vol) > 32 {
|
||||||
|
vol = vol[:32]
|
||||||
|
}
|
||||||
|
o.VolumeName = vol
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetDeviceName with sensible default
|
||||||
|
func (m *MountPoint) SetDeviceName(dev string) {
|
||||||
|
if dev == "" {
|
||||||
|
dev = fs.ConfigString(m.Fs)
|
||||||
|
}
|
||||||
|
m.MountOpt.DeviceName = dev
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -60,14 +60,12 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
|
|||||||
case "":
|
case "":
|
||||||
continue
|
continue
|
||||||
case "remote", "fs":
|
case "remote", "fs":
|
||||||
if str != "" {
|
p, err := fspath.Parse(str)
|
||||||
p, err := fspath.Parse(str)
|
if err != nil || p.Name == ":" {
|
||||||
if err != nil || p.Name == ":" {
|
return fmt.Errorf("cannot parse path %q: %w", str, err)
|
||||||
return fmt.Errorf("cannot parse path %q: %w", str, err)
|
|
||||||
}
|
|
||||||
fsName, fsPath, fsOpt = p.Name, p.Path, p.Config
|
|
||||||
vol.Fs = str
|
|
||||||
}
|
}
|
||||||
|
fsName, fsPath, fsOpt = p.Name, p.Path, p.Config
|
||||||
|
vol.Fs = str
|
||||||
case "type":
|
case "type":
|
||||||
fsType = str
|
fsType = str
|
||||||
vol.Type = str
|
vol.Type = str
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ import (
|
|||||||
"github.com/rclone/rclone/fs/dirtree"
|
"github.com/rclone/rclone/fs/dirtree"
|
||||||
"github.com/rclone/rclone/fs/log"
|
"github.com/rclone/rclone/fs/log"
|
||||||
"github.com/rclone/rclone/fs/walk"
|
"github.com/rclone/rclone/fs/walk"
|
||||||
"github.com/rclone/rclone/lib/terminal"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -101,26 +100,22 @@ For a more interactive navigation of the remote see the
|
|||||||
RunE: func(command *cobra.Command, args []string) error {
|
RunE: func(command *cobra.Command, args []string) error {
|
||||||
cmd.CheckArgs(1, 1, command, args)
|
cmd.CheckArgs(1, 1, command, args)
|
||||||
fsrc := cmd.NewFsSrc(args)
|
fsrc := cmd.NewFsSrc(args)
|
||||||
ci := fs.GetConfig(context.Background())
|
outFile := os.Stdout
|
||||||
var outFile io.Writer
|
|
||||||
if outFileName != "" {
|
if outFileName != "" {
|
||||||
var err error
|
var err error
|
||||||
outFile, err = os.Create(outFileName)
|
outFile, err = os.Create(outFileName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create output file: %w", err)
|
return fmt.Errorf("failed to create output file: %w", err)
|
||||||
}
|
}
|
||||||
opts.Colorize = false
|
|
||||||
} else {
|
|
||||||
terminal.Start()
|
|
||||||
outFile = terminal.Out
|
|
||||||
opts.Colorize = true
|
|
||||||
}
|
}
|
||||||
opts.VerSort = opts.VerSort || sort == "version"
|
opts.VerSort = opts.VerSort || sort == "version"
|
||||||
opts.ModSort = opts.ModSort || sort == "mtime"
|
opts.ModSort = opts.ModSort || sort == "mtime"
|
||||||
opts.CTimeSort = opts.CTimeSort || sort == "ctime"
|
opts.CTimeSort = opts.CTimeSort || sort == "ctime"
|
||||||
opts.NameSort = sort == "name"
|
opts.NameSort = sort == "name"
|
||||||
opts.SizeSort = sort == "size"
|
opts.SizeSort = sort == "size"
|
||||||
|
ci := fs.GetConfig(context.Background())
|
||||||
opts.UnitSize = ci.HumanReadable
|
opts.UnitSize = ci.HumanReadable
|
||||||
|
opts.Colorize = ci.TerminalColorMode != fs.TerminalColorModeNever
|
||||||
if opts.DeepLevel == 0 {
|
if opts.DeepLevel == 0 {
|
||||||
opts.DeepLevel = ci.MaxDepth
|
opts.DeepLevel = ci.MaxDepth
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -684,8 +684,3 @@ put them back in again.` >}}
|
|||||||
* happyxhw <44490504+happyxhw@users.noreply.github.com>
|
* happyxhw <44490504+happyxhw@users.noreply.github.com>
|
||||||
* Simmon Li (he/him) <hello@crespire.dev>
|
* Simmon Li (he/him) <hello@crespire.dev>
|
||||||
* Matthias Baur <baurmatt@users.noreply.github.com>
|
* Matthias Baur <baurmatt@users.noreply.github.com>
|
||||||
* Hunter Wittenborn <hunter@hunterwittenborn.com>
|
|
||||||
* logopk <peter@kreuser.name>
|
|
||||||
* Gerard Bosch <30733556+gerardbosch@users.noreply.github.com>
|
|
||||||
* ToBeFree <github@tfrei.de>
|
|
||||||
* NodudeWasTaken <75137537+NodudeWasTaken@users.noreply.github.com>
|
|
||||||
|
|||||||
@@ -146,9 +146,9 @@ The base directories on the both Path1 and Path2 filesystems must exist
|
|||||||
or bisync will fail. This is required for safety - that bisync can verify
|
or bisync will fail. This is required for safety - that bisync can verify
|
||||||
that both paths are valid.
|
that both paths are valid.
|
||||||
|
|
||||||
When using `--resync`, a newer version of a file either on Path1 or Path2
|
When using `--resync` a newer version of a file on the Path2 filesystem
|
||||||
filesystem, will overwrite the file on the other path (only the last version
|
will be overwritten by the Path1 filesystem version.
|
||||||
will be kept). Carefully evaluate deltas using [--dry-run](/flags/#non-backend-flags).
|
Carefully evaluate deltas using [--dry-run](/flags/#non-backend-flags).
|
||||||
|
|
||||||
For a resync run, one of the paths may be empty (no files in the path tree).
|
For a resync run, one of the paths may be empty (no files in the path tree).
|
||||||
The resync run should result in files on both paths, else a normal non-resync
|
The resync run should result in files on both paths, else a normal non-resync
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ custom salt is effectively a second password that must be memorized.
|
|||||||
based on XSalsa20 cipher and Poly1305 for integrity.
|
based on XSalsa20 cipher and Poly1305 for integrity.
|
||||||
[Names](#name-encryption) (file- and directory names) are also encrypted
|
[Names](#name-encryption) (file- and directory names) are also encrypted
|
||||||
by default, but this has some implications and is therefore
|
by default, but this has some implications and is therefore
|
||||||
possible to be turned off.
|
possible to turned off.
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
|
|||||||
@@ -168,19 +168,10 @@ OneDrive allows modification times to be set on objects accurate to 1
|
|||||||
second. These will be used to detect whether objects need syncing or
|
second. These will be used to detect whether objects need syncing or
|
||||||
not.
|
not.
|
||||||
|
|
||||||
OneDrive Personal, OneDrive for Business and Sharepoint Server support
|
OneDrive personal supports SHA1 type hashes. OneDrive for business and
|
||||||
|
Sharepoint Server support
|
||||||
[QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash).
|
[QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash).
|
||||||
|
|
||||||
Before rclone 1.62 the default hash for Onedrive Personal was `SHA1`.
|
|
||||||
For rclone 1.62 and above the default for all Onedrive backends is
|
|
||||||
`QuickXorHash`.
|
|
||||||
|
|
||||||
Starting from July 2023 `SHA1` support is being phased out in Onedrive
|
|
||||||
Personal in favour of `QuickXorHash`. If necessary the
|
|
||||||
`--onedrive-hash-type` flag (or `hash_type` config option) can be used
|
|
||||||
to select `SHA1` during the transition period if this is important
|
|
||||||
your workflow.
|
|
||||||
|
|
||||||
For all types of OneDrive you can use the `--checksum` flag.
|
For all types of OneDrive you can use the `--checksum` flag.
|
||||||
|
|
||||||
### Restricted filename characters
|
### Restricted filename characters
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ Here is an overview of the major features of each cloud storage system.
|
|||||||
| Mega | - | - | No | Yes | - | - |
|
| Mega | - | - | No | Yes | - | - |
|
||||||
| Memory | MD5 | R/W | No | No | - | - |
|
| Memory | MD5 | R/W | No | No | - | - |
|
||||||
| Microsoft Azure Blob Storage | MD5 | R/W | No | No | R/W | - |
|
| Microsoft Azure Blob Storage | MD5 | R/W | No | No | R/W | - |
|
||||||
| Microsoft OneDrive | QuickXorHash ⁵ | R/W | Yes | No | R | - |
|
| Microsoft OneDrive | SHA1 ⁵ | R/W | Yes | No | R | - |
|
||||||
| OpenDrive | MD5 | R/W | Yes | Partial ⁸ | - | - |
|
| OpenDrive | MD5 | R/W | Yes | Partial ⁸ | - | - |
|
||||||
| OpenStack Swift | MD5 | R/W | No | No | R/W | - |
|
| OpenStack Swift | MD5 | R/W | No | No | R/W | - |
|
||||||
| Oracle Object Storage | MD5 | R/W | No | No | R/W | - |
|
| Oracle Object Storage | MD5 | R/W | No | No | R/W | - |
|
||||||
@@ -72,7 +72,9 @@ This is an SHA256 sum of all the 4 MiB block SHA256s.
|
|||||||
|
|
||||||
⁴ WebDAV supports modtimes when used with Owncloud and Nextcloud only.
|
⁴ WebDAV supports modtimes when used with Owncloud and Nextcloud only.
|
||||||
|
|
||||||
⁵ [QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash) is Microsoft's own hash.
|
⁵ Microsoft OneDrive Personal supports SHA1 hashes, whereas OneDrive
|
||||||
|
for business and SharePoint server support Microsoft's own
|
||||||
|
[QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash).
|
||||||
|
|
||||||
⁶ Mail.ru uses its own modified SHA1 hash
|
⁶ Mail.ru uses its own modified SHA1 hash
|
||||||
|
|
||||||
|
|||||||
@@ -689,8 +689,8 @@ func (s *StatsInfo) RetryAfter() time.Time {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewCheckingTransfer adds a checking transfer to the stats, from the object.
|
// NewCheckingTransfer adds a checking transfer to the stats, from the object.
|
||||||
func (s *StatsInfo) NewCheckingTransfer(obj fs.DirEntry, what string) *Transfer {
|
func (s *StatsInfo) NewCheckingTransfer(obj fs.DirEntry) *Transfer {
|
||||||
tr := newCheckingTransfer(s, obj, what)
|
tr := newCheckingTransfer(s, obj)
|
||||||
s.checking.add(tr)
|
s.checking.add(tr)
|
||||||
return tr
|
return tr
|
||||||
}
|
}
|
||||||
@@ -720,7 +720,7 @@ func (s *StatsInfo) NewTransfer(obj fs.DirEntry) *Transfer {
|
|||||||
|
|
||||||
// NewTransferRemoteSize adds a transfer to the stats based on remote and size.
|
// NewTransferRemoteSize adds a transfer to the stats based on remote and size.
|
||||||
func (s *StatsInfo) NewTransferRemoteSize(remote string, size int64) *Transfer {
|
func (s *StatsInfo) NewTransferRemoteSize(remote string, size int64) *Transfer {
|
||||||
tr := newTransferRemoteSize(s, remote, size, false, "")
|
tr := newTransferRemoteSize(s, remote, size, false)
|
||||||
s.transferring.add(tr)
|
s.transferring.add(tr)
|
||||||
s.startAverageLoop()
|
s.startAverageLoop()
|
||||||
return tr
|
return tr
|
||||||
|
|||||||
@@ -50,7 +50,6 @@ type Transfer struct {
|
|||||||
size int64
|
size int64
|
||||||
startedAt time.Time
|
startedAt time.Time
|
||||||
checking bool
|
checking bool
|
||||||
what string // what kind of transfer this is
|
|
||||||
|
|
||||||
// Protects all below
|
// Protects all below
|
||||||
//
|
//
|
||||||
@@ -64,23 +63,22 @@ type Transfer struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newCheckingTransfer instantiates new checking of the object.
|
// newCheckingTransfer instantiates new checking of the object.
|
||||||
func newCheckingTransfer(stats *StatsInfo, obj fs.DirEntry, what string) *Transfer {
|
func newCheckingTransfer(stats *StatsInfo, obj fs.DirEntry) *Transfer {
|
||||||
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), true, what)
|
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newTransfer instantiates new transfer.
|
// newTransfer instantiates new transfer.
|
||||||
func newTransfer(stats *StatsInfo, obj fs.DirEntry) *Transfer {
|
func newTransfer(stats *StatsInfo, obj fs.DirEntry) *Transfer {
|
||||||
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), false, "")
|
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTransferRemoteSize(stats *StatsInfo, remote string, size int64, checking bool, what string) *Transfer {
|
func newTransferRemoteSize(stats *StatsInfo, remote string, size int64, checking bool) *Transfer {
|
||||||
tr := &Transfer{
|
tr := &Transfer{
|
||||||
stats: stats,
|
stats: stats,
|
||||||
remote: remote,
|
remote: remote,
|
||||||
size: size,
|
size: size,
|
||||||
startedAt: time.Now(),
|
startedAt: time.Now(),
|
||||||
checking: checking,
|
checking: checking,
|
||||||
what: what,
|
|
||||||
}
|
}
|
||||||
stats.AddTransfer(tr)
|
stats.AddTransfer(tr)
|
||||||
return tr
|
return tr
|
||||||
|
|||||||
@@ -98,7 +98,6 @@ func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude
|
|||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
stringList := make([]string, 0, len(tm.items))
|
stringList := make([]string, 0, len(tm.items))
|
||||||
for _, tr := range tm._sortedSlice() {
|
for _, tr := range tm._sortedSlice() {
|
||||||
var what = tr.what
|
|
||||||
if exclude != nil {
|
if exclude != nil {
|
||||||
exclude.mu.RLock()
|
exclude.mu.RLock()
|
||||||
_, found := exclude.items[tr.remote]
|
_, found := exclude.items[tr.remote]
|
||||||
@@ -110,17 +109,11 @@ func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude
|
|||||||
var out string
|
var out string
|
||||||
if acc := progress.get(tr.remote); acc != nil {
|
if acc := progress.get(tr.remote); acc != nil {
|
||||||
out = acc.String()
|
out = acc.String()
|
||||||
if what != "" {
|
|
||||||
out += ", " + what
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
if what == "" {
|
|
||||||
what = tm.name
|
|
||||||
}
|
|
||||||
out = fmt.Sprintf("%*s: %s",
|
out = fmt.Sprintf("%*s: %s",
|
||||||
ci.StatsFileNameLength,
|
ci.StatsFileNameLength,
|
||||||
shortenName(tr.remote, ci.StatsFileNameLength),
|
shortenName(tr.remote, ci.StatsFileNameLength),
|
||||||
what,
|
tm.name,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
stringList = append(stringList, " * "+out)
|
stringList = append(stringList, " * "+out)
|
||||||
|
|||||||
@@ -120,7 +120,7 @@ func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
|
|||||||
// check to see if two objects are identical using the check function
|
// check to see if two objects are identical using the check function
|
||||||
func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(src, "checking")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(ctx, err)
|
tr.Done(ctx, err)
|
||||||
}()
|
}()
|
||||||
@@ -450,7 +450,7 @@ func (c *checkMarch) checkSum(ctx context.Context, obj fs.Object, download bool,
|
|||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(obj, "hashing")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(obj)
|
||||||
defer tr.Done(ctx, err)
|
defer tr.Done(ctx, err)
|
||||||
|
|
||||||
if !sumFound {
|
if !sumFound {
|
||||||
|
|||||||
@@ -286,7 +286,7 @@ func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) (duplicateDirs [][]*d
|
|||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
err = walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListAll, func(entries fs.DirEntries) error {
|
err = walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListAll, func(entries fs.DirEntries) error {
|
||||||
for _, entry := range entries {
|
for _, entry := range entries {
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(entry, "merging")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(entry)
|
||||||
|
|
||||||
remote := entry.Remote()
|
remote := entry.Remote()
|
||||||
parentRemote := path.Dir(remote)
|
parentRemote := path.Dir(remote)
|
||||||
@@ -438,7 +438,7 @@ func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode, byHash bool
|
|||||||
files := map[string][]fs.Object{}
|
files := map[string][]fs.Object{}
|
||||||
err := walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
err := walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||||
entries.ForObject(func(o fs.Object) {
|
entries.ForObject(func(o fs.Object) {
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "checking")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
|
||||||
defer tr.Done(ctx, nil)
|
defer tr.Done(ctx, nil)
|
||||||
|
|
||||||
var remote string
|
var remote string
|
||||||
|
|||||||
@@ -544,7 +544,7 @@ func SameObject(src, dst fs.Object) bool {
|
|||||||
// be nil.
|
// be nil.
|
||||||
func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
|
func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(src, "moving")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
|
||||||
defer func() {
|
defer func() {
|
||||||
if err == nil {
|
if err == nil {
|
||||||
accounting.Stats(ctx).Renames(1)
|
accounting.Stats(ctx).Renames(1)
|
||||||
@@ -633,7 +633,7 @@ func SuffixName(ctx context.Context, remote string) string {
|
|||||||
// deleting
|
// deleting
|
||||||
func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) {
|
func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) {
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(dst, "deleting")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(dst)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(ctx, err)
|
tr.Done(ctx, err)
|
||||||
}()
|
}()
|
||||||
@@ -678,11 +678,11 @@ func DeleteFile(ctx context.Context, dst fs.Object) (err error) {
|
|||||||
func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error {
|
func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
wg.Add(ci.Checkers)
|
wg.Add(ci.Transfers)
|
||||||
var errorCount int32
|
var errorCount int32
|
||||||
var fatalErrorCount int32
|
var fatalErrorCount int32
|
||||||
|
|
||||||
for i := 0; i < ci.Checkers; i++ {
|
for i := 0; i < ci.Transfers; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
for dst := range toBeDeleted {
|
for dst := range toBeDeleted {
|
||||||
@@ -938,7 +938,7 @@ func List(ctx context.Context, f fs.Fs, w io.Writer) error {
|
|||||||
func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error {
|
func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error {
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
return ListFn(ctx, f, func(o fs.Object) {
|
return ListFn(ctx, f, func(o fs.Object) {
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "listing")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(ctx, nil)
|
tr.Done(ctx, nil)
|
||||||
}()
|
}()
|
||||||
@@ -996,7 +996,7 @@ func hashSum(ctx context.Context, ht hash.Type, base64Encoded bool, downloadFlag
|
|||||||
return "ERROR", fmt.Errorf("hasher returned an error: %w", err)
|
return "ERROR", fmt.Errorf("hasher returned an error: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "hashing")
|
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
|
||||||
defer func() {
|
defer func() {
|
||||||
tr.Done(ctx, err)
|
tr.Done(ctx, err)
|
||||||
}()
|
}()
|
||||||
@@ -1022,12 +1022,7 @@ func hashSum(ctx context.Context, ht hash.Type, base64Encoded bool, downloadFlag
|
|||||||
// Updated to perform multiple hashes concurrently
|
// Updated to perform multiple hashes concurrently
|
||||||
func HashLister(ctx context.Context, ht hash.Type, outputBase64 bool, downloadFlag bool, f fs.Fs, w io.Writer) error {
|
func HashLister(ctx context.Context, ht hash.Type, outputBase64 bool, downloadFlag bool, f fs.Fs, w io.Writer) error {
|
||||||
width := hash.Width(ht, outputBase64)
|
width := hash.Width(ht, outputBase64)
|
||||||
// Use --checkers concurrency unless downloading in which case use --transfers
|
concurrencyControl := make(chan struct{}, fs.GetConfig(ctx).Transfers)
|
||||||
concurrency := fs.GetConfig(ctx).Checkers
|
|
||||||
if downloadFlag {
|
|
||||||
concurrency = fs.GetConfig(ctx).Transfers
|
|
||||||
}
|
|
||||||
concurrencyControl := make(chan struct{}, concurrency)
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
err := ListFn(ctx, f, func(o fs.Object) {
|
err := ListFn(ctx, f, func(o fs.Object) {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
@@ -1178,7 +1173,7 @@ func Purge(ctx context.Context, f fs.Fs, dir string) (err error) {
|
|||||||
// obeys includes and excludes.
|
// obeys includes and excludes.
|
||||||
func Delete(ctx context.Context, f fs.Fs) error {
|
func Delete(ctx context.Context, f fs.Fs) error {
|
||||||
ci := fs.GetConfig(ctx)
|
ci := fs.GetConfig(ctx)
|
||||||
delChan := make(fs.ObjectsChan, ci.Checkers)
|
delChan := make(fs.ObjectsChan, ci.Transfers)
|
||||||
delErr := make(chan error, 1)
|
delErr := make(chan error, 1)
|
||||||
go func() {
|
go func() {
|
||||||
delErr <- DeleteFiles(ctx, delChan)
|
delErr <- DeleteFiles(ctx, delChan)
|
||||||
@@ -1934,6 +1929,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
|||||||
|
|
||||||
_, err = Op(ctx, fdst, dstObj, dstFileName, srcObj)
|
_, err = Op(ctx, fdst, dstObj, dstFileName, srcObj)
|
||||||
} else {
|
} else {
|
||||||
|
tr := accounting.Stats(ctx).NewCheckingTransfer(srcObj)
|
||||||
if !cp {
|
if !cp {
|
||||||
if ci.IgnoreExisting {
|
if ci.IgnoreExisting {
|
||||||
fs.Debugf(srcObj, "Not removing source file as destination file exists and --ignore-existing is set")
|
fs.Debugf(srcObj, "Not removing source file as destination file exists and --ignore-existing is set")
|
||||||
@@ -1941,6 +1937,7 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
|||||||
err = DeleteFile(ctx, srcObj)
|
err = DeleteFile(ctx, srcObj)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
tr.Done(ctx, err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -2192,9 +2189,9 @@ func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err err
|
|||||||
o fs.Object
|
o fs.Object
|
||||||
newPath string
|
newPath string
|
||||||
}
|
}
|
||||||
renames := make(chan rename, ci.Checkers)
|
renames := make(chan rename, ci.Transfers)
|
||||||
g, gCtx := errgroup.WithContext(context.Background())
|
g, gCtx := errgroup.WithContext(context.Background())
|
||||||
for i := 0; i < ci.Checkers; i++ {
|
for i := 0; i < ci.Transfers; i++ {
|
||||||
g.Go(func() error {
|
g.Go(func() error {
|
||||||
for job := range renames {
|
for job := range renames {
|
||||||
dstOverwritten, _ := f.NewObject(gCtx, job.newPath)
|
dstOverwritten, _ := f.NewObject(gCtx, job.newPath)
|
||||||
|
|||||||
@@ -329,7 +329,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W
|
|||||||
}
|
}
|
||||||
src := pair.Src
|
src := pair.Src
|
||||||
var err error
|
var err error
|
||||||
tr := accounting.Stats(s.ctx).NewCheckingTransfer(src, "checking")
|
tr := accounting.Stats(s.ctx).NewCheckingTransfer(src)
|
||||||
// Check to see if can store this
|
// Check to see if can store this
|
||||||
if src.Storable() {
|
if src.Storable() {
|
||||||
needTransfer := operations.NeedTransfer(s.ctx, pair.Dst, pair.Src)
|
needTransfer := operations.NeedTransfer(s.ctx, pair.Dst, pair.Src)
|
||||||
@@ -537,7 +537,7 @@ func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete the spare files
|
// Delete the spare files
|
||||||
toDelete := make(fs.ObjectsChan, s.ci.Checkers)
|
toDelete := make(fs.ObjectsChan, s.ci.Transfers)
|
||||||
go func() {
|
go func() {
|
||||||
outer:
|
outer:
|
||||||
for remote, o := range s.dstFiles {
|
for remote, o := range s.dstFiles {
|
||||||
@@ -772,14 +772,14 @@ func (s *syncCopyMove) makeRenameMap() {
|
|||||||
// now make a map of size,hash for all dstFiles
|
// now make a map of size,hash for all dstFiles
|
||||||
s.renameMap = make(map[string][]fs.Object)
|
s.renameMap = make(map[string][]fs.Object)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(s.ci.Checkers)
|
wg.Add(s.ci.Transfers)
|
||||||
for i := 0; i < s.ci.Checkers; i++ {
|
for i := 0; i < s.ci.Transfers; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
for obj := range in {
|
for obj := range in {
|
||||||
// only create hash for dst fs.Object if its size could match
|
// only create hash for dst fs.Object if its size could match
|
||||||
if _, found := possibleSizes[obj.Size()]; found {
|
if _, found := possibleSizes[obj.Size()]; found {
|
||||||
tr := accounting.Stats(s.ctx).NewCheckingTransfer(obj, "renaming")
|
tr := accounting.Stats(s.ctx).NewCheckingTransfer(obj)
|
||||||
hash := s.renameID(obj, s.trackRenamesStrategy, s.modifyWindow)
|
hash := s.renameID(obj, s.trackRenamesStrategy, s.modifyWindow)
|
||||||
|
|
||||||
if hash != "" {
|
if hash != "" {
|
||||||
|
|||||||
2
go.mod
2
go.mod
@@ -56,7 +56,7 @@ require (
|
|||||||
github.com/spf13/cobra v1.6.1
|
github.com/spf13/cobra v1.6.1
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/stretchr/testify v1.8.1
|
github.com/stretchr/testify v1.8.1
|
||||||
github.com/t3rm1n4l/go-mega v0.0.0-20230228171823-a01a2cda13ca
|
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf
|
||||||
github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0
|
github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0
|
||||||
github.com/xanzy/ssh-agent v0.3.3
|
github.com/xanzy/ssh-agent v0.3.3
|
||||||
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a
|
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a
|
||||||
|
|||||||
2
go.sum
2
go.sum
@@ -462,8 +462,6 @@ github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKs
|
|||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf h1:Y43S3e9P1NPs/QF4R5/SdlXj2d31540hP4Gk8VKNvDg=
|
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf h1:Y43S3e9P1NPs/QF4R5/SdlXj2d31540hP4Gk8VKNvDg=
|
||||||
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf/go.mod h1:c+cGNU1qi9bO7ZF4IRMYk+KaZTNiQ/gQrSbyMmGFq1Q=
|
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf/go.mod h1:c+cGNU1qi9bO7ZF4IRMYk+KaZTNiQ/gQrSbyMmGFq1Q=
|
||||||
github.com/t3rm1n4l/go-mega v0.0.0-20230228171823-a01a2cda13ca h1:I9rVnNXdIkij4UvMT7OmKhH9sOIvS8iXkxfPdnn9wQA=
|
|
||||||
github.com/t3rm1n4l/go-mega v0.0.0-20230228171823-a01a2cda13ca/go.mod h1:suDIky6yrK07NnaBadCB4sS0CqFOvUK91lH7CR+JlDA=
|
|
||||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||||
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
|
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
|
||||||
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
||||||
|
|||||||
@@ -29,19 +29,6 @@ func Split(absPath string) (bucket, bucketPath string) {
|
|||||||
return absPath[:slash], absPath[slash+1:]
|
return absPath[:slash], absPath[slash+1:]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Join path1 and path2
|
|
||||||
//
|
|
||||||
// Like path.Join but does not clean the path - useful to preserve trailing /
|
|
||||||
func Join(path1, path2 string) string {
|
|
||||||
if path1 == "" {
|
|
||||||
return path2
|
|
||||||
}
|
|
||||||
if path2 == "" {
|
|
||||||
return path1
|
|
||||||
}
|
|
||||||
return strings.TrimSuffix(path1, "/") + "/" + strings.TrimPrefix(path2, "/")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cache stores whether buckets are available and their IDs
|
// Cache stores whether buckets are available and their IDs
|
||||||
type Cache struct {
|
type Cache struct {
|
||||||
mu sync.Mutex // mutex to protect created and deleted
|
mu sync.Mutex // mutex to protect created and deleted
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package bucket
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -25,26 +24,6 @@ func TestSplit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestJoin(t *testing.T) {
|
|
||||||
for _, test := range []struct {
|
|
||||||
in1, in2 string
|
|
||||||
want string
|
|
||||||
}{
|
|
||||||
{in1: "", in2: "", want: ""},
|
|
||||||
{in1: "in1", in2: "", want: "in1"},
|
|
||||||
{in1: "", in2: "in2", want: "in2"},
|
|
||||||
{in1: "in1", in2: "in2", want: "in1/in2"},
|
|
||||||
{in1: "in1/", in2: "in2", want: "in1/in2"},
|
|
||||||
{in1: "in1", in2: "/in2", want: "in1/in2"},
|
|
||||||
{in1: "in1", in2: "in2/", want: "in1/in2/"},
|
|
||||||
{in1: "/in1", in2: "/in2", want: "/in1/in2"},
|
|
||||||
{in1: "/in1", in2: "../in2", want: "/in1/../in2"},
|
|
||||||
} {
|
|
||||||
got := Join(test.in1, test.in2)
|
|
||||||
assert.Equal(t, test.want, got, fmt.Sprintf("in1=%q, in2=%q", test.in1, test.in2))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestCache(t *testing.T) {
|
func TestCache(t *testing.T) {
|
||||||
c := NewCache()
|
c := NewCache()
|
||||||
errBoom := errors.New("boom")
|
errBoom := errors.New("boom")
|
||||||
|
|||||||
@@ -111,14 +111,3 @@ func Write(out []byte) {
|
|||||||
Start()
|
Start()
|
||||||
_, _ = Out.Write(out)
|
_, _ = Out.Write(out)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EnableColorsStdout enable colors if possible.
|
|
||||||
// This enables virtual terminal processing on Windows 10 console,
|
|
||||||
// adding native support for VT100 escape codes. When this terminal
|
|
||||||
// package is used for output, the result is that the colorable library
|
|
||||||
// don't have to decode the escapes and explicitely write text with color
|
|
||||||
// formatting to the console using Windows API functions, but can simply
|
|
||||||
// relay everything to stdout.
|
|
||||||
func EnableColorsStdout() {
|
|
||||||
_ = colorable.EnableColorsStdout(nil)
|
|
||||||
}
|
|
||||||
|
|||||||
Reference in New Issue
Block a user