mirror of
https://github.com/rclone/rclone.git
synced 2026-01-26 22:33:35 +00:00
Compare commits
43 Commits
fix-s3-end
...
fix-vfs-mo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1a66ed9315 | ||
|
|
f07abea072 | ||
|
|
fb4600f6f9 | ||
|
|
1d0c75b0c2 | ||
|
|
2e435af4de | ||
|
|
62a7765e57 | ||
|
|
5ad942ed87 | ||
|
|
96609e3d6e | ||
|
|
28a8ebce5b | ||
|
|
17854663de | ||
|
|
a4a6b5930a | ||
|
|
e9ae620844 | ||
|
|
e7cfb8ad8e | ||
|
|
786a1c212c | ||
|
|
03bc270730 | ||
|
|
7cef042231 | ||
|
|
1155cc0d3f | ||
|
|
13c3f67ab0 | ||
|
|
ab2cdd840f | ||
|
|
143285e2b7 | ||
|
|
19e8c8d42a | ||
|
|
de9c4a3611 | ||
|
|
d7ad13d929 | ||
|
|
f9d50f677d | ||
|
|
3641993fab | ||
|
|
93d3ae04c7 | ||
|
|
e25e9fbf22 | ||
|
|
fe26d6116d | ||
|
|
06e1e18793 | ||
|
|
23d17b76be | ||
|
|
dfe4e78a77 | ||
|
|
59e7982040 | ||
|
|
c6b0587dc0 | ||
|
|
9baa4d1c3c | ||
|
|
a5390dbbeb | ||
|
|
019a486d5b | ||
|
|
34ce11d2be | ||
|
|
88e8ede0aa | ||
|
|
f6f250c507 | ||
|
|
2c45e901f0 | ||
|
|
9e1443799a | ||
|
|
dd72aff98a | ||
|
|
5039f9be48 |
@@ -1221,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
fs.Errorf(object.Name, "Can't create object %v", err)
|
||||
continue
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
|
||||
err = f.deleteByID(ctx, object.ID, object.Name)
|
||||
checkErr(err)
|
||||
tr.Done(ctx, err)
|
||||
@@ -1235,7 +1235,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
if err != nil {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||
if oldOnly && last != remote {
|
||||
// Check current version of the file
|
||||
if object.Action == "hide" {
|
||||
|
||||
@@ -761,7 +761,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
|
||||
fs.Errorf(f, "Received download limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
|
||||
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") {
|
||||
fs.Errorf(f, "Received upload limit error: %v", err)
|
||||
return false, fserrors.FatalError(err)
|
||||
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {
|
||||
|
||||
@@ -243,6 +243,15 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
|
||||
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, quotaExceededRetry)
|
||||
assert.Equal(t, quotaExceededError, expectedQuotaError)
|
||||
|
||||
sqEItem := googleapi.ErrorItem{
|
||||
Reason: "storageQuotaExceeded",
|
||||
}
|
||||
generic403.Errors[0] = sqEItem
|
||||
expectedStorageQuotaError := fserrors.FatalError(&generic403)
|
||||
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
|
||||
assert.False(t, storageQuotaExceededRetry)
|
||||
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
|
||||
}
|
||||
|
||||
func (f *Fs) InternalTestDocumentImport(t *testing.T) {
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rclone/ftp"
|
||||
"github.com/jlaffaye/ftp"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config"
|
||||
@@ -315,18 +315,26 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// returns true if this FTP error should be retried
|
||||
func isRetriableFtpError(err error) bool {
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// shouldRetry returns a boolean as to whether this err deserve to be
|
||||
// retried. It returns the err as a convenience
|
||||
func shouldRetry(ctx context.Context, err error) (bool, error) {
|
||||
if fserrors.ContextError(ctx, &err) {
|
||||
return false, err
|
||||
}
|
||||
switch errX := err.(type) {
|
||||
case *textproto.Error:
|
||||
switch errX.Code {
|
||||
case ftp.StatusNotAvailable:
|
||||
return true, err
|
||||
}
|
||||
if isRetriableFtpError(err) {
|
||||
return true, err
|
||||
}
|
||||
return fserrors.ShouldRetry(err), err
|
||||
}
|
||||
@@ -1186,15 +1194,26 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
|
||||
}
|
||||
}
|
||||
}
|
||||
c, err := o.fs.getFtpConnection(ctx)
|
||||
|
||||
var (
|
||||
fd *ftp.Response
|
||||
c *ftp.ServerConn
|
||||
)
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
c, err = o.fs.getFtpConnection(ctx)
|
||||
if err != nil {
|
||||
return false, err // getFtpConnection has retries already
|
||||
}
|
||||
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
}
|
||||
return shouldRetry(ctx, err)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
|
||||
if err != nil {
|
||||
o.fs.putFtpConnection(&c, err)
|
||||
return nil, fmt.Errorf("open: %w", err)
|
||||
}
|
||||
|
||||
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
@@ -82,7 +82,8 @@ func init() {
|
||||
saFile, _ := m.Get("service_account_file")
|
||||
saCreds, _ := m.Get("service_account_credentials")
|
||||
anonymous, _ := m.Get("anonymous")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" {
|
||||
envAuth, _ := m.Get("env_auth")
|
||||
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
|
||||
return nil, nil
|
||||
}
|
||||
return oauthutil.ConfigOut("", &oauthutil.Options{
|
||||
@@ -330,6 +331,17 @@ can't check the size and hash but the file contents will be decompressed.
|
||||
Default: (encoder.Base |
|
||||
encoder.EncodeCrLf |
|
||||
encoder.EncodeInvalidUtf8),
|
||||
}, {
|
||||
Name: "env_auth",
|
||||
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
|
||||
Default: false,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "false",
|
||||
Help: "Enter credentials in the next step.",
|
||||
}, {
|
||||
Value: "true",
|
||||
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
|
||||
}},
|
||||
}}...),
|
||||
})
|
||||
}
|
||||
@@ -349,6 +361,7 @@ type Options struct {
|
||||
Decompress bool `config:"decompress"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
EnvAuth bool `config:"env_auth"`
|
||||
}
|
||||
|
||||
// Fs represents a remote storage server
|
||||
@@ -500,6 +513,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
|
||||
}
|
||||
} else if opt.EnvAuth {
|
||||
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
|
||||
}
|
||||
} else {
|
||||
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
|
||||
if err != nil {
|
||||
|
||||
@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
|
||||
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
|
||||
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
|
||||
}
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
|
||||
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
|
||||
doneCount++
|
||||
}
|
||||
})
|
||||
|
||||
@@ -524,6 +524,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
|
||||
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
|
||||
localPath := filepath.Join(fsDirPath, name)
|
||||
fi, err = os.Stat(localPath)
|
||||
// Quietly skip errors on excluded files and directories
|
||||
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
|
||||
continue
|
||||
}
|
||||
if os.IsNotExist(err) || isCircularSymlinkError(err) {
|
||||
// Skip bad symlinks and circular symlinks
|
||||
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/accounting"
|
||||
"github.com/rclone/rclone/fs/config/configmap"
|
||||
"github.com/rclone/rclone/fs/filter"
|
||||
"github.com/rclone/rclone/fs/hash"
|
||||
@@ -395,3 +396,73 @@ func TestFilter(t *testing.T) {
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
func TestFilterSymlink(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
when := time.Now()
|
||||
f := r.Flocal.(*Fs)
|
||||
|
||||
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
|
||||
r.WriteFile("included.file", "included file", when)
|
||||
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
|
||||
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
|
||||
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
|
||||
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
|
||||
|
||||
// Set fs into "-L" mode
|
||||
f.opt.FollowSymlinks = true
|
||||
f.opt.TranslateSymlinks = false
|
||||
f.lstat = os.Stat
|
||||
|
||||
// Set fs into "-l" mode
|
||||
// f.opt.FollowSymlinks = false
|
||||
// f.opt.TranslateSymlinks = true
|
||||
// f.lstat = os.Lstat
|
||||
|
||||
// Check set up for filtering
|
||||
assert.True(t, f.Features().FilterAware)
|
||||
|
||||
// Reset global error count
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
// Add a filter
|
||||
ctx, fi := filter.AddConfig(ctx)
|
||||
require.NoError(t, fi.AddRule("+ included.file"))
|
||||
require.NoError(t, fi.AddRule("+ included.file.link"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir/**"))
|
||||
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
|
||||
require.NoError(t, fi.AddRule("- *"))
|
||||
|
||||
// Check listing without use filter flag
|
||||
entries, err := f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check 1 global errors one for each dangling symlink
|
||||
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
accounting.Stats(ctx).ResetErrors()
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
|
||||
// Add user filter flag
|
||||
ctx = filter.SetUseFilter(ctx, true)
|
||||
|
||||
// Check listing with use filter flag
|
||||
entries, err = f.List(ctx, "")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
|
||||
|
||||
// Check listing through a symlink still works
|
||||
entries, err = f.List(ctx, "included.dir")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
|
||||
|
||||
sort.Sort(entries)
|
||||
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
|
||||
}
|
||||
|
||||
@@ -83,6 +83,17 @@ than permanently deleting them. If you specify this then rclone will
|
||||
permanently delete objects instead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "use_https",
|
||||
Help: `Use HTTPS for transfers.
|
||||
|
||||
MEGA uses plain text HTTP connections by default.
|
||||
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
|
||||
Enabling this will force MEGA to use HTTPS for all transfers.
|
||||
HTTPS is normally not necesary since all data is already encrypted anyway.
|
||||
Enabling it will increase CPU usage and add network overhead.`,
|
||||
Default: false,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -100,6 +111,7 @@ type Options struct {
|
||||
Pass string `config:"pass"`
|
||||
Debug bool `config:"debug"`
|
||||
HardDelete bool `config:"hard_delete"`
|
||||
UseHTTPS bool `config:"use_https"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -204,6 +216,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
if srv == nil {
|
||||
srv = mega.New().SetClient(fshttp.NewClient(ctx))
|
||||
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
|
||||
srv.SetHTTPS(opt.UseHTTPS)
|
||||
srv.SetLogger(func(format string, v ...interface{}) {
|
||||
fs.Infof("*go-mega*", format, v...)
|
||||
})
|
||||
|
||||
@@ -126,6 +126,7 @@ type HashesType struct {
|
||||
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
|
||||
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
|
||||
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
|
||||
Sha256Hash string `json:"sha256Hash"` // hex encoded SHA256 value of the file (if available)
|
||||
}
|
||||
|
||||
// FileFacet groups file-related data on OneDrive into a single structure.
|
||||
|
||||
@@ -259,6 +259,48 @@ this flag there.
|
||||
At the time of writing this only works with OneDrive personal paid accounts.
|
||||
`,
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: "hash_type",
|
||||
Default: "auto",
|
||||
Help: `Specify the hash in use for the backend.
|
||||
|
||||
This specifies the hash type in use. If set to "auto" it will use the
|
||||
default hash which is is QuickXorHash.
|
||||
|
||||
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
|
||||
Personal. For 1.62 and later the default is to use a QuickXorHash for
|
||||
all onedrive types. If an SHA1 hash is desired then set this option
|
||||
accordingly.
|
||||
|
||||
From July 2023 QuickXorHash will be the only available hash for
|
||||
both OneDrive for Business and OneDriver Personal.
|
||||
|
||||
This can be set to "none" to not use any hashes.
|
||||
|
||||
If the hash requested does not exist on the object, it will be
|
||||
returned as an empty string which is treated as a missing hash by
|
||||
rclone.
|
||||
`,
|
||||
Examples: []fs.OptionExample{{
|
||||
Value: "auto",
|
||||
Help: "Rclone chooses the best hash",
|
||||
}, {
|
||||
Value: "quickxor",
|
||||
Help: "QuickXor",
|
||||
}, {
|
||||
Value: "sha1",
|
||||
Help: "SHA1",
|
||||
}, {
|
||||
Value: "sha256",
|
||||
Help: "SHA256",
|
||||
}, {
|
||||
Value: "crc32",
|
||||
Help: "CRC32",
|
||||
}, {
|
||||
Value: "none",
|
||||
Help: "None - don't use any hashes",
|
||||
}},
|
||||
Advanced: true,
|
||||
}, {
|
||||
Name: config.ConfigEncoding,
|
||||
Help: config.ConfigEncodingHelp,
|
||||
@@ -597,6 +639,7 @@ type Options struct {
|
||||
LinkScope string `config:"link_scope"`
|
||||
LinkType string `config:"link_type"`
|
||||
LinkPassword string `config:"link_password"`
|
||||
HashType string `config:"hash_type"`
|
||||
Enc encoder.MultiEncoder `config:"encoding"`
|
||||
}
|
||||
|
||||
@@ -613,6 +656,7 @@ type Fs struct {
|
||||
tokenRenewer *oauthutil.Renew // renew the token on expiry
|
||||
driveID string // ID to use for querying Microsoft Graph
|
||||
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
|
||||
hashType hash.Type // type of the hash we are using
|
||||
}
|
||||
|
||||
// Object describes a OneDrive object
|
||||
@@ -626,8 +670,7 @@ type Object struct {
|
||||
size int64 // size of the object
|
||||
modTime time.Time // modification time of the object
|
||||
id string // ID of the object
|
||||
sha1 string // SHA-1 of the object content
|
||||
quickxorhash string // QuickXorHash of the object content
|
||||
hash string // Hash of the content, usually QuickXorHash but set as hash_type
|
||||
mimeType string // Content-Type of object from server (may not be as uploaded)
|
||||
}
|
||||
|
||||
@@ -882,6 +925,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
driveType: opt.DriveType,
|
||||
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
|
||||
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
|
||||
hashType: QuickXorHashType,
|
||||
}
|
||||
f.features = (&fs.Features{
|
||||
CaseInsensitive: true,
|
||||
@@ -891,6 +935,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
|
||||
}).Fill(ctx, f)
|
||||
f.srv.SetErrorHandler(errorHandler)
|
||||
|
||||
// Set the user defined hash
|
||||
if opt.HashType == "auto" || opt.HashType == "" {
|
||||
opt.HashType = QuickXorHashType.String()
|
||||
}
|
||||
err = f.hashType.Set(opt.HashType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Disable change polling in China region
|
||||
// See: https://github.com/rclone/rclone/issues/6444
|
||||
if f.opt.Region == regionCN {
|
||||
@@ -1556,10 +1609,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
|
||||
|
||||
// Hashes returns the supported hash sets.
|
||||
func (f *Fs) Hashes() hash.Set {
|
||||
if f.driveType == driveTypePersonal {
|
||||
return hash.Set(hash.SHA1)
|
||||
}
|
||||
return hash.Set(QuickXorHashType)
|
||||
return hash.Set(f.hashType)
|
||||
}
|
||||
|
||||
// PublicLink returns a link for downloading without account.
|
||||
@@ -1768,14 +1818,8 @@ func (o *Object) rootPath() string {
|
||||
|
||||
// Hash returns the SHA-1 of an object returning a lowercase hex string
|
||||
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
|
||||
if o.fs.driveType == driveTypePersonal {
|
||||
if t == hash.SHA1 {
|
||||
return o.sha1, nil
|
||||
}
|
||||
} else {
|
||||
if t == QuickXorHashType {
|
||||
return o.quickxorhash, nil
|
||||
}
|
||||
if t == o.fs.hashType {
|
||||
return o.hash, nil
|
||||
}
|
||||
return "", hash.ErrUnsupported
|
||||
}
|
||||
@@ -1806,16 +1850,23 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
|
||||
file := info.GetFile()
|
||||
if file != nil {
|
||||
o.mimeType = file.MimeType
|
||||
if file.Hashes.Sha1Hash != "" {
|
||||
o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
}
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.quickxorhash = hex.EncodeToString(h)
|
||||
o.hash = ""
|
||||
switch o.fs.hashType {
|
||||
case QuickXorHashType:
|
||||
if file.Hashes.QuickXorHash != "" {
|
||||
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
|
||||
if err != nil {
|
||||
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
|
||||
} else {
|
||||
o.hash = hex.EncodeToString(h)
|
||||
}
|
||||
}
|
||||
case hash.SHA1:
|
||||
o.hash = strings.ToLower(file.Hashes.Sha1Hash)
|
||||
case hash.SHA256:
|
||||
o.hash = strings.ToLower(file.Hashes.Sha256Hash)
|
||||
case hash.CRC32:
|
||||
o.hash = strings.ToLower(file.Hashes.Crc32Hash)
|
||||
}
|
||||
}
|
||||
fileSystemInfo := info.GetFileSystemInfo()
|
||||
|
||||
@@ -2266,6 +2266,11 @@ rclone's choice here.
|
||||
Help: `Suppress setting and reading of system metadata`,
|
||||
Advanced: true,
|
||||
Default: false,
|
||||
}, {
|
||||
Name: "sts_endpoint",
|
||||
Help: "Endpoint for STS.\n\nLeave blank if using AWS to use the default endpoint for the region.",
|
||||
Provider: "AWS",
|
||||
Advanced: true,
|
||||
},
|
||||
}})
|
||||
}
|
||||
@@ -2352,6 +2357,7 @@ type Options struct {
|
||||
SecretAccessKey string `config:"secret_access_key"`
|
||||
Region string `config:"region"`
|
||||
Endpoint string `config:"endpoint"`
|
||||
STSEndpoint string `config:"sts_endpoint"`
|
||||
LocationConstraint string `config:"location_constraint"`
|
||||
ACL string `config:"acl"`
|
||||
BucketACL string `config:"bucket_acl"`
|
||||
@@ -2528,7 +2534,7 @@ func parsePath(path string) (root string) {
|
||||
// split returns bucket and bucketPath from the rootRelativePath
|
||||
// relative to f.root
|
||||
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
|
||||
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
|
||||
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
|
||||
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
|
||||
}
|
||||
|
||||
@@ -2560,6 +2566,38 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
|
||||
}
|
||||
}
|
||||
|
||||
// Default name resolver
|
||||
var defaultResolver = endpoints.DefaultResolver()
|
||||
|
||||
// resolve (service, region) to endpoint
|
||||
//
|
||||
// Used to set endpoint for s3 services and not for other services
|
||||
type resolver map[string]string
|
||||
|
||||
// Add a service to the resolver, ignoring empty urls
|
||||
func (r resolver) addService(service, url string) {
|
||||
if url == "" {
|
||||
return
|
||||
}
|
||||
if !strings.HasPrefix(url, "http") {
|
||||
url = "https://" + url
|
||||
}
|
||||
r[service] = url
|
||||
}
|
||||
|
||||
// EndpointFor return the endpoint for s3 if set or the default if not
|
||||
func (r resolver) EndpointFor(service, region string, opts ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
|
||||
fs.Debugf(nil, "Resolving service %q region %q", service, region)
|
||||
url, ok := r[service]
|
||||
if ok {
|
||||
return endpoints.ResolvedEndpoint{
|
||||
URL: url,
|
||||
SigningRegion: region,
|
||||
}, nil
|
||||
}
|
||||
return defaultResolver.EndpointFor(service, region, opts...)
|
||||
}
|
||||
|
||||
// s3Connection makes a connection to s3
|
||||
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
@@ -2638,8 +2676,12 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
if opt.Region != "" {
|
||||
awsConfig.WithRegion(opt.Region)
|
||||
}
|
||||
if opt.Endpoint != "" {
|
||||
awsConfig.WithEndpoint(opt.Endpoint)
|
||||
if opt.Endpoint != "" || opt.STSEndpoint != "" {
|
||||
// If endpoints are set, override the relevant services only
|
||||
r := make(resolver)
|
||||
r.addService("s3", opt.Endpoint)
|
||||
r.addService("sts", opt.STSEndpoint)
|
||||
awsConfig.WithEndpointResolver(r)
|
||||
}
|
||||
|
||||
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
|
||||
@@ -2657,7 +2699,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
|
||||
}
|
||||
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
|
||||
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
|
||||
// awsSessionOpts.Config.Credentials = nil
|
||||
awsSessionOpts.Config.Credentials = nil
|
||||
}
|
||||
ses, err := session.NewSessionWithOptions(awsSessionOpts)
|
||||
if err != nil {
|
||||
@@ -3426,15 +3468,16 @@ var errEndList = errors.New("end list")
|
||||
|
||||
// list options
|
||||
type listOpt struct {
|
||||
bucket string // bucket to list
|
||||
directory string // directory with bucket
|
||||
prefix string // prefix to remove from listing
|
||||
addBucket bool // if set, the bucket is added to the start of the remote
|
||||
recurse bool // if set, recurse to read sub directories
|
||||
withVersions bool // if set, versions are produced
|
||||
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
|
||||
findFile bool // if set, it will look for files called (bucket, directory)
|
||||
versionAt fs.Time // if set only show versions <= this time
|
||||
bucket string // bucket to list
|
||||
directory string // directory with bucket
|
||||
prefix string // prefix to remove from listing
|
||||
addBucket bool // if set, the bucket is added to the start of the remote
|
||||
recurse bool // if set, recurse to read sub directories
|
||||
withVersions bool // if set, versions are produced
|
||||
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
|
||||
findFile bool // if set, it will look for files called (bucket, directory)
|
||||
versionAt fs.Time // if set only show versions <= this time
|
||||
noSkipMarkers bool // if set return dir marker objects
|
||||
}
|
||||
|
||||
// list lists the objects into the function supplied with the opt
|
||||
@@ -3547,7 +3590,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
}
|
||||
remote = remote[len(opt.prefix):]
|
||||
if opt.addBucket {
|
||||
remote = path.Join(opt.bucket, remote)
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
}
|
||||
remote = strings.TrimSuffix(remote, "/")
|
||||
err = fn(remote, &s3.Object{Key: &remote}, nil, true)
|
||||
@@ -3576,10 +3619,10 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
|
||||
remote = remote[len(opt.prefix):]
|
||||
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
|
||||
if opt.addBucket {
|
||||
remote = path.Join(opt.bucket, remote)
|
||||
remote = bucket.Join(opt.bucket, remote)
|
||||
}
|
||||
// is this a directory marker?
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 {
|
||||
if isDirectory && object.Size != nil && *object.Size == 0 && !opt.noSkipMarkers {
|
||||
continue // skip directory marker
|
||||
}
|
||||
if versionIDs != nil {
|
||||
@@ -3869,7 +3912,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
|
||||
req.Bucket = &dstBucket
|
||||
req.ACL = stringPointerOrNil(f.opt.ACL)
|
||||
req.Key = &dstPath
|
||||
source := pathEscape(path.Join(srcBucket, srcPath))
|
||||
source := pathEscape(bucket.Join(srcBucket, srcPath))
|
||||
if src.versionID != nil {
|
||||
source += fmt.Sprintf("?versionId=%s", *src.versionID)
|
||||
}
|
||||
@@ -4526,13 +4569,14 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
delErr <- operations.DeleteFiles(ctx, delChan)
|
||||
}()
|
||||
checkErr(f.list(ctx, listOpt{
|
||||
bucket: bucket,
|
||||
directory: directory,
|
||||
prefix: f.rootDirectory,
|
||||
addBucket: f.rootBucket == "",
|
||||
recurse: true,
|
||||
withVersions: versioned,
|
||||
hidden: true,
|
||||
bucket: bucket,
|
||||
directory: directory,
|
||||
prefix: f.rootDirectory,
|
||||
addBucket: f.rootBucket == "",
|
||||
recurse: true,
|
||||
withVersions: versioned,
|
||||
hidden: true,
|
||||
noSkipMarkers: true,
|
||||
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
|
||||
if isDirectory {
|
||||
return nil
|
||||
@@ -4542,7 +4586,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
|
||||
fs.Errorf(object, "Can't create object %+v", err)
|
||||
return nil
|
||||
}
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
|
||||
// Work out whether the file is the current version or not
|
||||
isCurrentVersion := !versioned || !version.Match(remote)
|
||||
fs.Debugf(nil, "%q version %v", remote, version.Match(remote))
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package seafile
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -17,19 +16,19 @@ func TestShouldAllowShutdownTwice(t *testing.T) {
|
||||
renew.Shutdown()
|
||||
}
|
||||
|
||||
func TestRenewal(t *testing.T) {
|
||||
func TestRenewalInTimeLimit(t *testing.T) {
|
||||
var count int64
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2) // run the renewal twice
|
||||
renew := NewRenew(time.Millisecond, func() error {
|
||||
renew := NewRenew(100*time.Millisecond, func() error {
|
||||
atomic.AddInt64(&count, 1)
|
||||
wg.Done()
|
||||
return nil
|
||||
})
|
||||
wg.Wait()
|
||||
time.Sleep(time.Second)
|
||||
renew.Shutdown()
|
||||
|
||||
// it is technically possible that a third renewal gets triggered between Wait() and Shutdown()
|
||||
assert.GreaterOrEqual(t, atomic.LoadInt64(&count), int64(2))
|
||||
// there's no guarantee the CI agent can handle a simple goroutine
|
||||
renewCount := atomic.LoadInt64(&count)
|
||||
t.Logf("renew count = %d", renewCount)
|
||||
assert.Greater(t, renewCount, int64(0))
|
||||
assert.Less(t, renewCount, int64(11))
|
||||
}
|
||||
|
||||
@@ -34,9 +34,10 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
|
||||
|
||||
d := &smb2.Dialer{
|
||||
Initiator: &smb2.NTLMInitiator{
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
User: f.opt.User,
|
||||
Password: pass,
|
||||
Domain: f.opt.Domain,
|
||||
TargetSPN: f.opt.SPN,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -60,6 +60,17 @@ func init() {
|
||||
Name: "domain",
|
||||
Help: "Domain name for NTLM authentication.",
|
||||
Default: "WORKGROUP",
|
||||
}, {
|
||||
Name: "spn",
|
||||
Help: `Service principal name.
|
||||
|
||||
Rclone presents this name to the server. Some servers use this as further
|
||||
authentication, and it often needs to be set for clusters. For example:
|
||||
|
||||
cifs/remotehost:1020
|
||||
|
||||
Leave blank if not sure.
|
||||
`,
|
||||
}, {
|
||||
Name: "idle_timeout",
|
||||
Default: fs.Duration(60 * time.Second),
|
||||
@@ -109,6 +120,7 @@ type Options struct {
|
||||
User string `config:"user"`
|
||||
Pass string `config:"pass"`
|
||||
Domain string `config:"domain"`
|
||||
SPN string `config:"spn"`
|
||||
HideSpecial bool `config:"hide_special_share"`
|
||||
CaseInsensitive bool `config:"case_insensitive"`
|
||||
IdleTimeout fs.Duration `config:"idle_timeout"`
|
||||
|
||||
@@ -401,9 +401,15 @@ func initConfig() {
|
||||
// Start accounting
|
||||
accounting.Start(ctx)
|
||||
|
||||
// Hide console window
|
||||
// Configure console
|
||||
if ci.NoConsole {
|
||||
// Hide the console window
|
||||
terminal.HideConsole()
|
||||
} else {
|
||||
// Enable color support on stdout if possible.
|
||||
// This enables virtual terminal processing on Windows 10,
|
||||
// adding native support for ANSI/VT100 escape sequences.
|
||||
terminal.EnableColorsStdout()
|
||||
}
|
||||
|
||||
// Load filters
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -27,12 +28,12 @@ it will always be removed.
|
||||
},
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fs, fileName := cmd.NewFsFile(args[0])
|
||||
f, fileName := cmd.NewFsFile(args[0])
|
||||
cmd.Run(true, false, command, func() error {
|
||||
if fileName == "" {
|
||||
return fmt.Errorf("%s is a directory or doesn't exist", args[0])
|
||||
return fmt.Errorf("%s is a directory or doesn't exist: %w", args[0], fs.ErrorObjectNotFound)
|
||||
}
|
||||
fileObj, err := fs.NewObject(context.Background(), fileName)
|
||||
fileObj, err := f.NewObject(context.Background(), fileName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -60,12 +60,14 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
|
||||
case "":
|
||||
continue
|
||||
case "remote", "fs":
|
||||
p, err := fspath.Parse(str)
|
||||
if err != nil || p.Name == ":" {
|
||||
return fmt.Errorf("cannot parse path %q: %w", str, err)
|
||||
if str != "" {
|
||||
p, err := fspath.Parse(str)
|
||||
if err != nil || p.Name == ":" {
|
||||
return fmt.Errorf("cannot parse path %q: %w", str, err)
|
||||
}
|
||||
fsName, fsPath, fsOpt = p.Name, p.Path, p.Config
|
||||
vol.Fs = str
|
||||
}
|
||||
fsName, fsPath, fsOpt = p.Name, p.Path, p.Config
|
||||
vol.Fs = str
|
||||
case "type":
|
||||
fsType = str
|
||||
vol.Type = str
|
||||
|
||||
@@ -18,6 +18,8 @@ import (
|
||||
"github.com/rclone/rclone/fs/dirtree"
|
||||
"github.com/rclone/rclone/fs/log"
|
||||
"github.com/rclone/rclone/fs/walk"
|
||||
"github.com/rclone/rclone/lib/encoder"
|
||||
"github.com/rclone/rclone/lib/terminal"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
@@ -26,6 +28,7 @@ var (
|
||||
outFileName string
|
||||
noReport bool
|
||||
sort string
|
||||
enc = encoder.OS
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -100,22 +103,26 @@ For a more interactive navigation of the remote see the
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
fsrc := cmd.NewFsSrc(args)
|
||||
outFile := os.Stdout
|
||||
ci := fs.GetConfig(context.Background())
|
||||
var outFile io.Writer
|
||||
if outFileName != "" {
|
||||
var err error
|
||||
outFile, err = os.Create(outFileName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create output file: %w", err)
|
||||
}
|
||||
opts.Colorize = false
|
||||
} else {
|
||||
terminal.Start()
|
||||
outFile = terminal.Out
|
||||
opts.Colorize = true
|
||||
}
|
||||
opts.VerSort = opts.VerSort || sort == "version"
|
||||
opts.ModSort = opts.ModSort || sort == "mtime"
|
||||
opts.CTimeSort = opts.CTimeSort || sort == "ctime"
|
||||
opts.NameSort = sort == "name"
|
||||
opts.SizeSort = sort == "size"
|
||||
ci := fs.GetConfig(context.Background())
|
||||
opts.UnitSize = ci.HumanReadable
|
||||
opts.Colorize = ci.TerminalColorMode != fs.TerminalColorModeNever
|
||||
if opts.DeepLevel == 0 {
|
||||
opts.DeepLevel = ci.MaxDepth
|
||||
}
|
||||
@@ -158,7 +165,7 @@ type FileInfo struct {
|
||||
|
||||
// Name is base name of the file
|
||||
func (to *FileInfo) Name() string {
|
||||
return path.Base(to.entry.Remote())
|
||||
return enc.FromStandardName(path.Base(to.entry.Remote()))
|
||||
}
|
||||
|
||||
// Size in bytes for regular files; system-dependent for others
|
||||
@@ -192,7 +199,7 @@ func (to *FileInfo) Sys() interface{} {
|
||||
|
||||
// String returns the full path
|
||||
func (to *FileInfo) String() string {
|
||||
return to.entry.Remote()
|
||||
return filepath.FromSlash(enc.FromStandardPath(to.entry.Remote()))
|
||||
}
|
||||
|
||||
// Fs maps an fs.Fs into a tree.Fs
|
||||
@@ -207,6 +214,7 @@ func NewFs(dirs dirtree.DirTree) Fs {
|
||||
func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) {
|
||||
defer log.Trace(nil, "filePath=%q", filePath)("fi=%+v, err=%v", &fi, &err)
|
||||
filePath = filepath.ToSlash(filePath)
|
||||
filePath = enc.ToStandardPath(filePath)
|
||||
filePath = strings.TrimLeft(filePath, "/")
|
||||
if filePath == "" {
|
||||
return &FileInfo{fs.NewDir("", time.Now())}, nil
|
||||
@@ -222,13 +230,14 @@ func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) {
|
||||
func (dirs Fs) ReadDir(dir string) (names []string, err error) {
|
||||
defer log.Trace(nil, "dir=%s", dir)("names=%+v, err=%v", &names, &err)
|
||||
dir = filepath.ToSlash(dir)
|
||||
dir = enc.ToStandardPath(dir)
|
||||
dir = strings.TrimLeft(dir, "/")
|
||||
entries, ok := dirs[dir]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("couldn't find directory %q", dir)
|
||||
}
|
||||
for _, entry := range entries {
|
||||
names = append(names, path.Base(entry.Remote()))
|
||||
names = append(names, enc.FromStandardName(path.Base(entry.Remote())))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -684,3 +684,12 @@ put them back in again.` >}}
|
||||
* happyxhw <44490504+happyxhw@users.noreply.github.com>
|
||||
* Simmon Li (he/him) <hello@crespire.dev>
|
||||
* Matthias Baur <baurmatt@users.noreply.github.com>
|
||||
* Hunter Wittenborn <hunter@hunterwittenborn.com>
|
||||
* logopk <peter@kreuser.name>
|
||||
* Gerard Bosch <30733556+gerardbosch@users.noreply.github.com>
|
||||
* ToBeFree <github@tfrei.de>
|
||||
* NodudeWasTaken <75137537+NodudeWasTaken@users.noreply.github.com>
|
||||
* Peter Brunner <peter@lugoues.net>
|
||||
* Ninh Pham <dongian.rapclubkhtn@gmail.com>
|
||||
* Ryan Caezar Itang <sitiom@proton.me>
|
||||
* Peter Brunner <peter@psykhe.com>
|
||||
|
||||
@@ -146,9 +146,9 @@ The base directories on the both Path1 and Path2 filesystems must exist
|
||||
or bisync will fail. This is required for safety - that bisync can verify
|
||||
that both paths are valid.
|
||||
|
||||
When using `--resync` a newer version of a file on the Path2 filesystem
|
||||
will be overwritten by the Path1 filesystem version.
|
||||
Carefully evaluate deltas using [--dry-run](/flags/#non-backend-flags).
|
||||
When using `--resync`, a newer version of a file either on Path1 or Path2
|
||||
filesystem, will overwrite the file on the other path (only the last version
|
||||
will be kept). Carefully evaluate deltas using [--dry-run](/flags/#non-backend-flags).
|
||||
|
||||
For a resync run, one of the paths may be empty (no files in the path tree).
|
||||
The resync run should result in files on both paths, else a normal non-resync
|
||||
|
||||
@@ -58,7 +58,7 @@ custom salt is effectively a second password that must be memorized.
|
||||
based on XSalsa20 cipher and Poly1305 for integrity.
|
||||
[Names](#name-encryption) (file- and directory names) are also encrypted
|
||||
by default, but this has some implications and is therefore
|
||||
possible to turned off.
|
||||
possible to be turned off.
|
||||
|
||||
## Configuration
|
||||
|
||||
|
||||
@@ -165,6 +165,19 @@ developers so it may be out of date. Its current version is as below.
|
||||
|
||||
[](https://repology.org/project/rclone/versions)
|
||||
|
||||
### Scoop package manager {#windows-scoop}
|
||||
|
||||
Make sure you have [Scoop](https://scoop.sh/) installed
|
||||
|
||||
```
|
||||
scoop install rclone
|
||||
```
|
||||
|
||||
Note that this is a third party installer not controlled by the rclone
|
||||
developers so it may be out of date. Its current version is as below.
|
||||
|
||||
[](https://repology.org/project/rclone/versions)
|
||||
|
||||
## Package manager installation {#package-manager}
|
||||
|
||||
Many Linux, Windows, macOS and other OS distributions package and
|
||||
|
||||
@@ -168,10 +168,19 @@ OneDrive allows modification times to be set on objects accurate to 1
|
||||
second. These will be used to detect whether objects need syncing or
|
||||
not.
|
||||
|
||||
OneDrive personal supports SHA1 type hashes. OneDrive for business and
|
||||
Sharepoint Server support
|
||||
OneDrive Personal, OneDrive for Business and Sharepoint Server support
|
||||
[QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash).
|
||||
|
||||
Before rclone 1.62 the default hash for Onedrive Personal was `SHA1`.
|
||||
For rclone 1.62 and above the default for all Onedrive backends is
|
||||
`QuickXorHash`.
|
||||
|
||||
Starting from July 2023 `SHA1` support is being phased out in Onedrive
|
||||
Personal in favour of `QuickXorHash`. If necessary the
|
||||
`--onedrive-hash-type` flag (or `hash_type` config option) can be used
|
||||
to select `SHA1` during the transition period if this is important
|
||||
your workflow.
|
||||
|
||||
For all types of OneDrive you can use the `--checksum` flag.
|
||||
|
||||
### Restricted filename characters
|
||||
|
||||
@@ -39,7 +39,7 @@ Here is an overview of the major features of each cloud storage system.
|
||||
| Mega | - | - | No | Yes | - | - |
|
||||
| Memory | MD5 | R/W | No | No | - | - |
|
||||
| Microsoft Azure Blob Storage | MD5 | R/W | No | No | R/W | - |
|
||||
| Microsoft OneDrive | SHA1 ⁵ | R/W | Yes | No | R | - |
|
||||
| Microsoft OneDrive | QuickXorHash ⁵ | R/W | Yes | No | R | - |
|
||||
| OpenDrive | MD5 | R/W | Yes | Partial ⁸ | - | - |
|
||||
| OpenStack Swift | MD5 | R/W | No | No | R/W | - |
|
||||
| Oracle Object Storage | MD5 | R/W | No | No | R/W | - |
|
||||
@@ -72,9 +72,7 @@ This is an SHA256 sum of all the 4 MiB block SHA256s.
|
||||
|
||||
⁴ WebDAV supports modtimes when used with Owncloud and Nextcloud only.
|
||||
|
||||
⁵ Microsoft OneDrive Personal supports SHA1 hashes, whereas OneDrive
|
||||
for business and SharePoint server support Microsoft's own
|
||||
[QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash).
|
||||
⁵ [QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash) is Microsoft's own hash.
|
||||
|
||||
⁶ Mail.ru uses its own modified SHA1 hash
|
||||
|
||||
|
||||
@@ -689,8 +689,8 @@ func (s *StatsInfo) RetryAfter() time.Time {
|
||||
}
|
||||
|
||||
// NewCheckingTransfer adds a checking transfer to the stats, from the object.
|
||||
func (s *StatsInfo) NewCheckingTransfer(obj fs.DirEntry) *Transfer {
|
||||
tr := newCheckingTransfer(s, obj)
|
||||
func (s *StatsInfo) NewCheckingTransfer(obj fs.DirEntry, what string) *Transfer {
|
||||
tr := newCheckingTransfer(s, obj, what)
|
||||
s.checking.add(tr)
|
||||
return tr
|
||||
}
|
||||
@@ -720,7 +720,7 @@ func (s *StatsInfo) NewTransfer(obj fs.DirEntry) *Transfer {
|
||||
|
||||
// NewTransferRemoteSize adds a transfer to the stats based on remote and size.
|
||||
func (s *StatsInfo) NewTransferRemoteSize(remote string, size int64) *Transfer {
|
||||
tr := newTransferRemoteSize(s, remote, size, false)
|
||||
tr := newTransferRemoteSize(s, remote, size, false, "")
|
||||
s.transferring.add(tr)
|
||||
s.startAverageLoop()
|
||||
return tr
|
||||
|
||||
@@ -50,6 +50,7 @@ type Transfer struct {
|
||||
size int64
|
||||
startedAt time.Time
|
||||
checking bool
|
||||
what string // what kind of transfer this is
|
||||
|
||||
// Protects all below
|
||||
//
|
||||
@@ -63,22 +64,23 @@ type Transfer struct {
|
||||
}
|
||||
|
||||
// newCheckingTransfer instantiates new checking of the object.
|
||||
func newCheckingTransfer(stats *StatsInfo, obj fs.DirEntry) *Transfer {
|
||||
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), true)
|
||||
func newCheckingTransfer(stats *StatsInfo, obj fs.DirEntry, what string) *Transfer {
|
||||
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), true, what)
|
||||
}
|
||||
|
||||
// newTransfer instantiates new transfer.
|
||||
func newTransfer(stats *StatsInfo, obj fs.DirEntry) *Transfer {
|
||||
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), false)
|
||||
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), false, "")
|
||||
}
|
||||
|
||||
func newTransferRemoteSize(stats *StatsInfo, remote string, size int64, checking bool) *Transfer {
|
||||
func newTransferRemoteSize(stats *StatsInfo, remote string, size int64, checking bool, what string) *Transfer {
|
||||
tr := &Transfer{
|
||||
stats: stats,
|
||||
remote: remote,
|
||||
size: size,
|
||||
startedAt: time.Now(),
|
||||
checking: checking,
|
||||
what: what,
|
||||
}
|
||||
stats.AddTransfer(tr)
|
||||
return tr
|
||||
|
||||
@@ -98,6 +98,7 @@ func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude
|
||||
ci := fs.GetConfig(ctx)
|
||||
stringList := make([]string, 0, len(tm.items))
|
||||
for _, tr := range tm._sortedSlice() {
|
||||
var what = tr.what
|
||||
if exclude != nil {
|
||||
exclude.mu.RLock()
|
||||
_, found := exclude.items[tr.remote]
|
||||
@@ -109,11 +110,17 @@ func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude
|
||||
var out string
|
||||
if acc := progress.get(tr.remote); acc != nil {
|
||||
out = acc.String()
|
||||
if what != "" {
|
||||
out += ", " + what
|
||||
}
|
||||
} else {
|
||||
if what == "" {
|
||||
what = tm.name
|
||||
}
|
||||
out = fmt.Sprintf("%*s: %s",
|
||||
ci.StatsFileNameLength,
|
||||
shortenName(tr.remote, ci.StatsFileNameLength),
|
||||
tm.name,
|
||||
what,
|
||||
)
|
||||
}
|
||||
stringList = append(stringList, " * "+out)
|
||||
|
||||
@@ -120,7 +120,7 @@ func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
|
||||
// check to see if two objects are identical using the check function
|
||||
func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(src, "checking")
|
||||
defer func() {
|
||||
tr.Done(ctx, err)
|
||||
}()
|
||||
@@ -450,7 +450,7 @@ func (c *checkMarch) checkSum(ctx context.Context, obj fs.Object, download bool,
|
||||
}
|
||||
|
||||
var err error
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(obj)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(obj, "hashing")
|
||||
defer tr.Done(ctx, err)
|
||||
|
||||
if !sumFound {
|
||||
|
||||
@@ -286,7 +286,7 @@ func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) (duplicateDirs [][]*d
|
||||
ci := fs.GetConfig(ctx)
|
||||
err = walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListAll, func(entries fs.DirEntries) error {
|
||||
for _, entry := range entries {
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(entry)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(entry, "merging")
|
||||
|
||||
remote := entry.Remote()
|
||||
parentRemote := path.Dir(remote)
|
||||
@@ -438,7 +438,7 @@ func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode, byHash bool
|
||||
files := map[string][]fs.Object{}
|
||||
err := walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
|
||||
entries.ForObject(func(o fs.Object) {
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "checking")
|
||||
defer tr.Done(ctx, nil)
|
||||
|
||||
var remote string
|
||||
|
||||
@@ -544,7 +544,7 @@ func SameObject(src, dst fs.Object) bool {
|
||||
// be nil.
|
||||
func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(src, "moving")
|
||||
defer func() {
|
||||
if err == nil {
|
||||
accounting.Stats(ctx).Renames(1)
|
||||
@@ -633,7 +633,7 @@ func SuffixName(ctx context.Context, remote string) string {
|
||||
// deleting
|
||||
func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) {
|
||||
ci := fs.GetConfig(ctx)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(dst)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(dst, "deleting")
|
||||
defer func() {
|
||||
tr.Done(ctx, err)
|
||||
}()
|
||||
@@ -678,11 +678,11 @@ func DeleteFile(ctx context.Context, dst fs.Object) (err error) {
|
||||
func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error {
|
||||
var wg sync.WaitGroup
|
||||
ci := fs.GetConfig(ctx)
|
||||
wg.Add(ci.Transfers)
|
||||
wg.Add(ci.Checkers)
|
||||
var errorCount int32
|
||||
var fatalErrorCount int32
|
||||
|
||||
for i := 0; i < ci.Transfers; i++ {
|
||||
for i := 0; i < ci.Checkers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for dst := range toBeDeleted {
|
||||
@@ -938,7 +938,7 @@ func List(ctx context.Context, f fs.Fs, w io.Writer) error {
|
||||
func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
return ListFn(ctx, f, func(o fs.Object) {
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "listing")
|
||||
defer func() {
|
||||
tr.Done(ctx, nil)
|
||||
}()
|
||||
@@ -996,7 +996,7 @@ func hashSum(ctx context.Context, ht hash.Type, base64Encoded bool, downloadFlag
|
||||
return "ERROR", fmt.Errorf("hasher returned an error: %w", err)
|
||||
}
|
||||
} else {
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "hashing")
|
||||
defer func() {
|
||||
tr.Done(ctx, err)
|
||||
}()
|
||||
@@ -1022,7 +1022,12 @@ func hashSum(ctx context.Context, ht hash.Type, base64Encoded bool, downloadFlag
|
||||
// Updated to perform multiple hashes concurrently
|
||||
func HashLister(ctx context.Context, ht hash.Type, outputBase64 bool, downloadFlag bool, f fs.Fs, w io.Writer) error {
|
||||
width := hash.Width(ht, outputBase64)
|
||||
concurrencyControl := make(chan struct{}, fs.GetConfig(ctx).Transfers)
|
||||
// Use --checkers concurrency unless downloading in which case use --transfers
|
||||
concurrency := fs.GetConfig(ctx).Checkers
|
||||
if downloadFlag {
|
||||
concurrency = fs.GetConfig(ctx).Transfers
|
||||
}
|
||||
concurrencyControl := make(chan struct{}, concurrency)
|
||||
var wg sync.WaitGroup
|
||||
err := ListFn(ctx, f, func(o fs.Object) {
|
||||
wg.Add(1)
|
||||
@@ -1173,7 +1178,7 @@ func Purge(ctx context.Context, f fs.Fs, dir string) (err error) {
|
||||
// obeys includes and excludes.
|
||||
func Delete(ctx context.Context, f fs.Fs) error {
|
||||
ci := fs.GetConfig(ctx)
|
||||
delChan := make(fs.ObjectsChan, ci.Transfers)
|
||||
delChan := make(fs.ObjectsChan, ci.Checkers)
|
||||
delErr := make(chan error, 1)
|
||||
go func() {
|
||||
delErr <- DeleteFiles(ctx, delChan)
|
||||
@@ -1929,7 +1934,6 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
||||
|
||||
_, err = Op(ctx, fdst, dstObj, dstFileName, srcObj)
|
||||
} else {
|
||||
tr := accounting.Stats(ctx).NewCheckingTransfer(srcObj)
|
||||
if !cp {
|
||||
if ci.IgnoreExisting {
|
||||
fs.Debugf(srcObj, "Not removing source file as destination file exists and --ignore-existing is set")
|
||||
@@ -1937,7 +1941,6 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
|
||||
err = DeleteFile(ctx, srcObj)
|
||||
}
|
||||
}
|
||||
tr.Done(ctx, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -2189,9 +2192,9 @@ func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err err
|
||||
o fs.Object
|
||||
newPath string
|
||||
}
|
||||
renames := make(chan rename, ci.Transfers)
|
||||
renames := make(chan rename, ci.Checkers)
|
||||
g, gCtx := errgroup.WithContext(context.Background())
|
||||
for i := 0; i < ci.Transfers; i++ {
|
||||
for i := 0; i < ci.Checkers; i++ {
|
||||
g.Go(func() error {
|
||||
for job := range renames {
|
||||
dstOverwritten, _ := f.NewObject(gCtx, job.newPath)
|
||||
|
||||
@@ -329,7 +329,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W
|
||||
}
|
||||
src := pair.Src
|
||||
var err error
|
||||
tr := accounting.Stats(s.ctx).NewCheckingTransfer(src)
|
||||
tr := accounting.Stats(s.ctx).NewCheckingTransfer(src, "checking")
|
||||
// Check to see if can store this
|
||||
if src.Storable() {
|
||||
needTransfer := operations.NeedTransfer(s.ctx, pair.Dst, pair.Src)
|
||||
@@ -537,7 +537,7 @@ func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
|
||||
}
|
||||
|
||||
// Delete the spare files
|
||||
toDelete := make(fs.ObjectsChan, s.ci.Transfers)
|
||||
toDelete := make(fs.ObjectsChan, s.ci.Checkers)
|
||||
go func() {
|
||||
outer:
|
||||
for remote, o := range s.dstFiles {
|
||||
@@ -772,14 +772,14 @@ func (s *syncCopyMove) makeRenameMap() {
|
||||
// now make a map of size,hash for all dstFiles
|
||||
s.renameMap = make(map[string][]fs.Object)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(s.ci.Transfers)
|
||||
for i := 0; i < s.ci.Transfers; i++ {
|
||||
wg.Add(s.ci.Checkers)
|
||||
for i := 0; i < s.ci.Checkers; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for obj := range in {
|
||||
// only create hash for dst fs.Object if its size could match
|
||||
if _, found := possibleSizes[obj.Size()]; found {
|
||||
tr := accounting.Stats(s.ctx).NewCheckingTransfer(obj)
|
||||
tr := accounting.Stats(s.ctx).NewCheckingTransfer(obj, "renaming")
|
||||
hash := s.renameID(obj, s.trackRenamesStrategy, s.modifyWindow)
|
||||
|
||||
if hash != "" {
|
||||
|
||||
6
go.mod
6
go.mod
@@ -32,6 +32,7 @@ require (
|
||||
github.com/hirochachacha/go-smb2 v1.1.0
|
||||
github.com/iguanesolutions/go-systemd/v5 v5.1.0
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.3
|
||||
github.com/jlaffaye/ftp v0.1.1-0.20230214004652-d84bf4be2b6e
|
||||
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004
|
||||
github.com/klauspost/compress v1.15.14
|
||||
github.com/koofr/go-httpclient v0.0.0-20221124135700-2eb26cff5dd8
|
||||
@@ -47,7 +48,6 @@ require (
|
||||
github.com/pmezard/go-difflib v1.0.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8
|
||||
github.com/rclone/ftp v0.0.0-20221014110213-e44dedbc76c6
|
||||
github.com/rfjakob/eme v1.1.2
|
||||
github.com/rivo/uniseg v0.4.3
|
||||
github.com/shirou/gopsutil/v3 v3.22.12
|
||||
@@ -56,13 +56,13 @@ require (
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20230228171823-a01a2cda13ca
|
||||
github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0
|
||||
github.com/xanzy/ssh-agent v0.3.3
|
||||
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a
|
||||
github.com/yunify/qingstor-sdk-go/v3 v3.2.0
|
||||
go.etcd.io/bbolt v1.3.6
|
||||
goftp.io/server v0.4.1
|
||||
goftp.io/server v0.4.2-0.20210615155358-d07a820aac35
|
||||
golang.org/x/crypto v0.5.0
|
||||
golang.org/x/net v0.7.0
|
||||
golang.org/x/oauth2 v0.4.0
|
||||
|
||||
11
go.sum
11
go.sum
@@ -277,8 +277,9 @@ github.com/jcmturner/gokrb5/v8 v8.4.3 h1:iTonLeSJOn7MVUtyMT+arAn5AKAPrkilzhGw8wE
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.3/go.mod h1:dqRwJGXznQrzw6cWmyo6kH+E7jksEQG/CyVWsJEsJO0=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf h1:2IYBd5TD/maMqTU2YUzp2tJL4cNaOYQ9EBullN9t9pk=
|
||||
github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
|
||||
github.com/jlaffaye/ftp v0.1.1-0.20230214004652-d84bf4be2b6e h1:Xofa5zcfulLjSb9ZNpb7MI9TFCpVkPCy3JSwrL7xoWE=
|
||||
github.com/jlaffaye/ftp v0.1.1-0.20230214004652-d84bf4be2b6e/go.mod h1:sRSt+7UoQ5BgrZhwta4kr7N5SenQsoIZHMJHY7+zqJg=
|
||||
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
@@ -407,8 +408,6 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzXU/potCYnQd1r6wlAnoMB68BiCkCcCnKx1SH8=
|
||||
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU=
|
||||
github.com/rclone/ftp v0.0.0-20221014110213-e44dedbc76c6 h1:J832KfU2Z44Ck3XR5bvw2UxShP0QnjueruNQ6dTYH+g=
|
||||
github.com/rclone/ftp v0.0.0-20221014110213-e44dedbc76c6/go.mod h1:qRpxqlna6CaIq9fSRud1bDC5S7EEUEou0j8nMZ0lxO8=
|
||||
github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4=
|
||||
github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@@ -462,6 +461,8 @@ github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKs
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf h1:Y43S3e9P1NPs/QF4R5/SdlXj2d31540hP4Gk8VKNvDg=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf/go.mod h1:c+cGNU1qi9bO7ZF4IRMYk+KaZTNiQ/gQrSbyMmGFq1Q=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20230228171823-a01a2cda13ca h1:I9rVnNXdIkij4UvMT7OmKhH9sOIvS8iXkxfPdnn9wQA=
|
||||
github.com/t3rm1n4l/go-mega v0.0.0-20230228171823-a01a2cda13ca/go.mod h1:suDIky6yrK07NnaBadCB4sS0CqFOvUK91lH7CR+JlDA=
|
||||
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
|
||||
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
||||
@@ -504,8 +505,8 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
|
||||
goftp.io/server v0.4.1 h1:x7KG4HIxSMdK/rpYhExMinRN/aO/T9icvaG/B5e/XfY=
|
||||
goftp.io/server v0.4.1/go.mod h1:hFZeR656ErRt3ojMKt7H10vQ5nuWV1e0YeUTeorlR6k=
|
||||
goftp.io/server v0.4.2-0.20210615155358-d07a820aac35 h1:D4DhKKOtievTsshtbA6W0XL/gBjERF5/vu6Vhmb4sBw=
|
||||
goftp.io/server v0.4.2-0.20210615155358-d07a820aac35/go.mod h1:hFZeR656ErRt3ojMKt7H10vQ5nuWV1e0YeUTeorlR6k=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
|
||||
@@ -29,6 +29,19 @@ func Split(absPath string) (bucket, bucketPath string) {
|
||||
return absPath[:slash], absPath[slash+1:]
|
||||
}
|
||||
|
||||
// Join path1 and path2
|
||||
//
|
||||
// Like path.Join but does not clean the path - useful to preserve trailing /
|
||||
func Join(path1, path2 string) string {
|
||||
if path1 == "" {
|
||||
return path2
|
||||
}
|
||||
if path2 == "" {
|
||||
return path1
|
||||
}
|
||||
return strings.TrimSuffix(path1, "/") + "/" + strings.TrimPrefix(path2, "/")
|
||||
}
|
||||
|
||||
// Cache stores whether buckets are available and their IDs
|
||||
type Cache struct {
|
||||
mu sync.Mutex // mutex to protect created and deleted
|
||||
|
||||
@@ -2,6 +2,7 @@ package bucket
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -24,6 +25,26 @@ func TestSplit(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestJoin(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in1, in2 string
|
||||
want string
|
||||
}{
|
||||
{in1: "", in2: "", want: ""},
|
||||
{in1: "in1", in2: "", want: "in1"},
|
||||
{in1: "", in2: "in2", want: "in2"},
|
||||
{in1: "in1", in2: "in2", want: "in1/in2"},
|
||||
{in1: "in1/", in2: "in2", want: "in1/in2"},
|
||||
{in1: "in1", in2: "/in2", want: "in1/in2"},
|
||||
{in1: "in1", in2: "in2/", want: "in1/in2/"},
|
||||
{in1: "/in1", in2: "/in2", want: "/in1/in2"},
|
||||
{in1: "/in1", in2: "../in2", want: "/in1/../in2"},
|
||||
} {
|
||||
got := Join(test.in1, test.in2)
|
||||
assert.Equal(t, test.want, got, fmt.Sprintf("in1=%q, in2=%q", test.in1, test.in2))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
c := NewCache()
|
||||
errBoom := errors.New("boom")
|
||||
|
||||
@@ -111,3 +111,14 @@ func Write(out []byte) {
|
||||
Start()
|
||||
_, _ = Out.Write(out)
|
||||
}
|
||||
|
||||
// EnableColorsStdout enable colors if possible.
|
||||
// This enables virtual terminal processing on Windows 10 console,
|
||||
// adding native support for VT100 escape codes. When this terminal
|
||||
// package is used for output, the result is that the colorable library
|
||||
// don't have to decode the escapes and explicitely write text with color
|
||||
// formatting to the console using Windows API functions, but can simply
|
||||
// relay everything to stdout.
|
||||
func EnableColorsStdout() {
|
||||
_ = colorable.EnableColorsStdout(nil)
|
||||
}
|
||||
|
||||
11
vfs/dir.go
11
vfs/dir.go
@@ -325,10 +325,15 @@ func (d *Dir) renameTree(dirPath string) {
|
||||
d.entry = fs.NewDirCopy(context.TODO(), d.entry).SetRemote(dirPath)
|
||||
}
|
||||
|
||||
// Do the same to any child directories
|
||||
// Do the same to any child directories and files
|
||||
for leaf, node := range d.items {
|
||||
if dir, ok := node.(*Dir); ok {
|
||||
dir.renameTree(path.Join(dirPath, leaf))
|
||||
switch x := node.(type) {
|
||||
case *Dir:
|
||||
x.renameTree(path.Join(dirPath, leaf))
|
||||
case *File:
|
||||
x.renameDir(dirPath)
|
||||
default:
|
||||
panic("bad dir entry")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
@@ -577,3 +578,7 @@ func TestDirRename(t *testing.T) {
|
||||
err = dir.Rename("potato", "tuba", dir)
|
||||
assert.Equal(t, EROFS, err)
|
||||
}
|
||||
|
||||
func TestDirStructSize(t *testing.T) {
|
||||
t.Logf("Dir struct has size %d bytes", unsafe.Sizeof(Dir{}))
|
||||
}
|
||||
|
||||
14
vfs/file.go
14
vfs/file.go
@@ -141,6 +141,13 @@ func (f *File) Node() Node {
|
||||
return f
|
||||
}
|
||||
|
||||
// renameDir - call when parent directory has been renamed
|
||||
func (f *File) renameDir(dPath string) {
|
||||
f.mu.RLock()
|
||||
f.dPath = dPath
|
||||
f.mu.RUnlock()
|
||||
}
|
||||
|
||||
// applyPendingRename runs a previously set rename operation if there are no
|
||||
// more remaining writers. Call without lock held.
|
||||
func (f *File) applyPendingRename() {
|
||||
@@ -296,6 +303,9 @@ func (f *File) activeWriters() int {
|
||||
// It should be called with the lock held
|
||||
func (f *File) _roundModTime(modTime time.Time) time.Time {
|
||||
precision := f.d.f.Precision()
|
||||
if precision == fs.ModTimeNotSupported {
|
||||
return modTime
|
||||
}
|
||||
return modTime.Truncate(precision)
|
||||
}
|
||||
|
||||
@@ -312,7 +322,9 @@ func (f *File) ModTime() (modTime time.Time) {
|
||||
}
|
||||
// Read the modtime from a dirty item if it exists
|
||||
if f.d.vfs.Opt.CacheMode >= vfscommon.CacheModeMinimal {
|
||||
if item := f.d.vfs.cache.DirtyItem(f._path()); item != nil {
|
||||
item := f.d.vfs.cache.ItemOrNil(f._path())
|
||||
noModTime := f.d.f.Precision() == fs.ModTimeNotSupported
|
||||
if item != nil && (item.IsDirty() || noModTime) {
|
||||
modTime, err := item.GetModTime()
|
||||
if err != nil {
|
||||
fs.Errorf(f._path(), "ModTime: Item GetModTime failed: %v", err)
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/rclone/rclone/fs"
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
@@ -411,3 +412,7 @@ func TestFileRename(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileStructSize(t *testing.T) {
|
||||
t.Logf("File struct has size %d bytes", unsafe.Sizeof(File{}))
|
||||
}
|
||||
|
||||
@@ -294,14 +294,22 @@ func (c *Cache) put(name string, item *Item) (oldItem *Item) {
|
||||
return oldItem
|
||||
}
|
||||
|
||||
// ItemOrNil returns the Item if it exists in the cache otherwise it
|
||||
// returns nil.
|
||||
//
|
||||
// name should be a remote path not an osPath
|
||||
func (c *Cache) ItemOrNil(name string) (item *Item) {
|
||||
name = clean(name)
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.item[name]
|
||||
}
|
||||
|
||||
// InUse returns whether the name is in use in the cache
|
||||
//
|
||||
// name should be a remote path not an osPath
|
||||
func (c *Cache) InUse(name string) bool {
|
||||
name = clean(name)
|
||||
c.mu.Lock()
|
||||
item := c.item[name]
|
||||
c.mu.Unlock()
|
||||
item := c.ItemOrNil(name)
|
||||
if item == nil {
|
||||
return false
|
||||
}
|
||||
@@ -313,10 +321,7 @@ func (c *Cache) InUse(name string) bool {
|
||||
//
|
||||
// name should be a remote path not an osPath
|
||||
func (c *Cache) DirtyItem(name string) (item *Item) {
|
||||
name = clean(name)
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
item = c.item[name]
|
||||
item = c.ItemOrNil(name)
|
||||
if item != nil && !item.IsDirty() {
|
||||
item = nil
|
||||
}
|
||||
|
||||
@@ -1223,7 +1223,7 @@ func (item *Item) setModTime(modTime time.Time) {
|
||||
item.mu.Unlock()
|
||||
}
|
||||
|
||||
// GetModTime of the cache file
|
||||
// GetModTime of the cache item
|
||||
func (item *Item) GetModTime() (modTime time.Time, err error) {
|
||||
// defer log.Trace(item.name, "modTime=%v", modTime)("")
|
||||
item.mu.Lock()
|
||||
@@ -1231,6 +1231,9 @@ func (item *Item) GetModTime() (modTime time.Time, err error) {
|
||||
fi, err := item._stat()
|
||||
if err == nil {
|
||||
modTime = fi.ModTime()
|
||||
item.info.ModTime = modTime
|
||||
} else {
|
||||
modTime = item.info.ModTime
|
||||
}
|
||||
return modTime, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user