1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-26 22:33:35 +00:00

Compare commits

..

43 Commits

Author SHA1 Message Date
Nick Craig-Wood
1a66ed9315 vfs: make uploaded files retain modtime with non-modtime backends
Before this change if a file was uploaded to a backend which didn't
support modtimes, the time of the file read after the upload had
completed would change to the time the file was uploaded on the
backend.

When using `--vfs-cache-mode writes` or `full` this time would be
different by the `--vfs-write-back` delay which would cause
applications to think the file had been modified.

This changes uses the last modification time read in the cache as the
modtime for backends which don't support setting modtimes. It does not
change the modtime to that actually uploaded.

This means that as long as the file remains in the directory cache it
will have the expected modtime.

See: https://forum.rclone.org/t/saving-files-causes-wrong-modified-time-to-be-set-for-a-few-seconds-on-webdav-mount-with-bitrix24/36451
2023-03-08 12:10:50 +00:00
Nick Craig-Wood
f07abea072 vfs: cache: factor common code from InUse and DirtyItem into ItemOrNil 2023-03-08 11:51:13 +00:00
Nick Craig-Wood
fb4600f6f9 tree: fix display of files with illegal Windows file system names
Before this change, files with illegal Windows names (eg those
containing \) would not be displayed properly in tree.

This change adds the local encoding to the Windows file names so \
will be displayed as its wide unicode equivalent.

See: https://forum.rclone.org/t/error-with-build-v1-61-1-tree-command-panic-runtime-error-invalid-memory-address-or-nil-pointer-dereference/35922/
2023-03-07 15:30:11 +00:00
Nick Craig-Wood
1d0c75b0c2 ftp: retry errors when initiating downloads
This adds a retry loop to the Open() call in the FTP server so it can
retry failures opening files.

This should make downloading multipart files more reliable.

See: https://forum.rclone.org/t/downloads-fail-from-remote-server-error-426-failure-writing-network-stream/33839/
2023-03-07 12:34:20 +00:00
Nick Craig-Wood
2e435af4de ftp: retry 426 errors
Before this change we didn't retry 426 errors which are

    426 Connection closed; transfer aborted.

Or in this particular case

    426 Failure writing network stream.

These seem like they might be temporary so retry them.

See: https://forum.rclone.org/t/downloads-fail-from-remote-server-error-426-failure-writing-network-stream/33839/
2023-03-07 12:34:20 +00:00
Nick Craig-Wood
62a7765e57 smb: allow SPN (service principal name) to be configured
This enables connection to clusters.

Fixes #6515
2023-03-07 12:18:32 +00:00
Nick Craig-Wood
5ad942ed87 local: fix exclusion of dangling symlinks with -L/--copy-links
Before this fix, a dangling symlink was erroring the sync. It was
writing an ERROR log and causing rclone to exit with an error. The
List method wasn't returning an error though.

This fix makes sure that we don't log or report a global error on a
file/directory that has been excluded.

This feature was first implemented in:

a61d219bc local: fix -L/--copy-links with filters missing directories

Then fixed in:

8d1fff9a8 local: obey file filters in listing to fix errors on excluded files

This commit also adds test cases for the failure modes of those commits.

See #6376
2023-03-07 12:15:10 +00:00
Nick Craig-Wood
96609e3d6e ftp: revert to upstream github.com/jlaffaye/ftp now fix is merged
This reverts to using the upstream now the patch to fix hang when
using ExplicitTLS to certain servers is merged.

Fixes #6426
2023-03-07 12:12:07 +00:00
Nick Craig-Wood
28a8ebce5b vfs: fix rename of directory containing files to be uploaded
Before this change, if you renamed a directory containg files yet to
be uploaded then deleted the directory the files would still be
uploaded.

This fixes the problem by changing the directory path in all the file
objects in a directory when it is renamed. This wasn't necessary until
we introduced virtual files and directories which lived beyond the
directory flush mechanism.

Fixes #6809
2023-03-07 11:40:50 +00:00
Nick Craig-Wood
17854663de vfs: log size of File and Dir in tests for optimization 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
a4a6b5930a Add Peter Brunner to contributors 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
e9ae620844 Add Ryan Caezar Itang to contributors 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
e7cfb8ad8e Add Ninh Pham to contributors 2023-03-07 11:40:50 +00:00
Nick Craig-Wood
786a1c212c Add Peter Brunner to contributors 2023-03-07 11:40:50 +00:00
Peter Brunner
03bc270730 gcs: fix google cloud storage provider help 2023-03-07 11:39:02 +00:00
Ryan Caezar Itang
7cef042231 docs: add scoop installation method 2023-03-07 11:36:07 +00:00
Ninh Pham
1155cc0d3f drive: Make --drive-stop-on-upload-limit to respond to storageQuotaExceeded
Before this change, if a "--drive-stop-on-upload-limit" was set,
rclone would not stop the upload if a "storageQuotaExceeded" error occurred.

This fix now checks for the "storageQuotaExceeded" error
and "--drive-stop-on-upload-limit", and fails fast.
2023-03-07 11:00:08 +00:00
Peter Brunner
13c3f67ab0 gcs: add env_auth to pick up IAM credentials from env/instance
This change provides the ability to pass `env_auth` as a parameter to
the google cloud storage provider. This enables the provider to pull IAM
credentials from the environment or instance metadata. Previously if no
auth method was given it would default to requesting oauth.
2023-03-06 18:18:33 +00:00
Nick Craig-Wood
ab2cdd840f serve ftp: fix timestamps older than 1 year in listings
Fixes #6785
2023-03-06 15:59:56 +00:00
Nick Craig-Wood
143285e2b7 vfs: fix incorrect modtime on fs which don't support setting modtime
Before this change we were using the Precision literally to round the
precision of the mod times.

However fs.ModTimeNotSupported is 100y on backends which don't support
setting modtimes so rounding to 100y was producing very strange
results.

See: https://forum.rclone.org/t/saving-files-causes-wrong-modified-time-to-be-set-for-a-few-seconds-on-webdav-mount-with-bitrix24/36451/
2023-03-06 10:54:21 +00:00
Nick Craig-Wood
19e8c8d42a s3: make purge remove directory markers too
See: https://forum.rclone.org/t/cannot-purge-aws-s3/36169/
2023-03-03 15:51:00 +00:00
Nick Craig-Wood
de9c4a3611 s3: use bucket.Join instead of path.Join to preserve paths
Before this change, path.Join would remove the trailing / from objects
which had them. The simplified bucket.Join does not.
2023-03-03 15:51:00 +00:00
Nick Craig-Wood
d7ad13d929 bucket: add Join function for a simplified path.Join 2023-03-03 15:51:00 +00:00
albertony
f9d50f677d lib/terminal: enable windows console virtual terminal sequences processing (ANSI/VT100 colors)
This ensures the virtual terminal processing mode is enabled on the rclone process
for Windows 10 consoles (by using Windows Console API functions GetConsoleMode/SetConsoleMode
and flag ENABLE_VIRTUAL_TERMINAL_PROCESSING), which adds native support for ANSI/VT100
escape sequences. This mode is default in many cases, e.g. when using the Windows
Terminal application, but in other cases it is not, and the default can also be
controlled with registry setting (see below), and therefore configuring it on the process
seem to be the only reliable way of ensuring it is enabled when supported.

[HKEY_CURRENT_USER\Console]
"VirtualTerminalLevel"=dword:00000001
2023-03-03 12:37:01 +01:00
albertony
3641993fab tree: fix colored output on windows
Since rclone version 1.61.0 the tree command uses ANSI color sequences in output by
default, but this lead to issues in Windows terminals that were not handling these (#6668).

This commit ensures the tree command uses the terminal package for output. It relies on
go-colorable to properly handle ANSI color sequences: If stdout is connected to a terminal
the escape sequences are decoded and the text are written with color formatting using
Windows Console API. If stdout is not connected to a terminal, e.g. redirected to file,
the escape sequences are stripped off. The tree command has its own method for writing
directly to a file, specified with flag --output, and then the output is not passed
through the terminal package and must therefore be written without ansi codes.
2023-03-03 12:37:01 +01:00
Nick Craig-Wood
93d3ae04c7 deletefile: return error code 4 if file does not exist
Before this change `rclone deletefile` would return error code 1 if
the file it was trying to delete does not exist.

Rclone can't actually tell at this point whether the file doesn't
exist or what you tried to delete is a directory, but it seems more
logical to return error code 4 "object not found" here.

See: https://forum.rclone.org/t/rclone-deletefile-cmd-return-exit-code-1-when-file-not-found-in-remote-why-1-and-not-exit-code-4/
2023-03-03 09:51:23 +00:00
Nick Craig-Wood
e25e9fbf22 Add NodudeWasTaken to contributors 2023-03-03 09:51:23 +00:00
NodudeWasTaken
fe26d6116d mega: add --mega-use-https flag
Some ISPs throttle HTTP which MEGA uses by default, so some users may find using HTTPS beneficial.
2023-03-02 20:28:10 +00:00
Fred
06e1e18793 seafile: fix for flaky tests #6799 2023-03-02 20:03:25 +00:00
Nick Craig-Wood
23d17b76be onedrive: default onedrive personal to QuickXorHash
Before this change the hash used for Onedrive Personal was SHA1. From
July 2023 Microsoft is phasing out SHA1 hashes in favour of
QuickXorHash in Onedrive Personal. Onedrive Business and Sharepoint
remain using QuickXorHash as before.

This choice can be changed using the --onedrive-hash-type flag (and
config option) so that SHA1 can be selected while it is still
available in the transition period.

See: https://forum.rclone.org/t/microsoft-is-switching-onedrive-personal-to-quickxorhash-from-sha1/36296/
2023-03-02 19:32:35 +00:00
Nick Craig-Wood
dfe4e78a77 onedrive: add --onedrive-hash-type to change the hash in use
In preparation for Microsoft removing the SHA1 hash on OneDrive
Personal this allows the hash type to be set on OneDrive.

See: https://forum.rclone.org/t/microsoft-is-switching-onedrive-personal-to-quickxorhash-from-sha1/36296/
2023-03-02 19:32:35 +00:00
Nick Craig-Wood
59e7982040 s3: add --s3-sts-endpoint to specify STS endpoint
See: https://forum.rclone.org/t/s3-profile-failing-when-explicit-s3-endpoint-is-present/36063/
2023-03-02 09:56:09 +00:00
Nick Craig-Wood
c6b0587dc0 s3: fix AWS STS failing if --s3-endpoint is set
Before this change if an --s3-profile was set which used AWS STS (eg
to assume a role) and --s3-endpoint was set then rclone would use the
value from --s3-endpoint to contact the STS server which did not work.

This fix implements an endpoint resolver which only overrides the "s3"
service if --s3-endpoint is set. It sends the "sts" service (and any
other service) to the default resolver.

Fixes #6443
See: https://forum.rclone.org/t/s3-profile-failing-when-explicit-s3-endpoint-is-present/36063/
2023-03-01 16:24:40 +00:00
Nick Craig-Wood
9baa4d1c3c accounting: show checking tag if available even on transfers 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
a5390dbbeb sync,operations: fix correct concurrency: use --checkers unless transferring files
There were some places (e.g. deleting files) where we were using
--transfers instead of --checkers to control the concurrency when
files weren't being transferred.

These have been updated to use --checkers.
2023-03-01 11:10:38 +00:00
Nick Craig-Wood
019a486d5b accounting: Make checkers show what they are doing
Before this change, all types of checkers showed "checking" after the
file name despite the fact that not all of them were checking.

After this change, they can show

- checking
- deleting
- hashing
- importing
- listing
- merging
- moving
- renaming

See: https://forum.rclone.org/t/what-is-rclone-checking-during-a-purge/35931/
2023-03-01 11:10:38 +00:00
Nick Craig-Wood
34ce11d2be Add ToBeFree to contributors 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
88e8ede0aa Add Gerard Bosch to contributors 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
f6f250c507 Add logopk to contributors 2023-03-01 11:10:38 +00:00
Nick Craig-Wood
2c45e901f0 Add Hunter Wittenborn to contributors 2023-03-01 11:10:38 +00:00
ToBeFree
9e1443799a docs: crypt: fix typo 2023-02-28 11:50:53 +00:00
Gerard Bosch
dd72aff98a docs: bisync: clarification of --resync 2023-02-28 11:47:28 +00:00
logopk
5039f9be48 docker: fix volume plugin does not remount volume on docker restart
docker volume plugin restoreState: skip fs option if empty

Fixes #6769
Co-authored-by: Peter Kreuser <logo@kreuser.name>
2023-02-28 11:29:07 +00:00
43 changed files with 535 additions and 153 deletions

View File

@@ -1221,7 +1221,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
fs.Errorf(object.Name, "Can't create object %v", err)
continue
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "deleting")
err = f.deleteByID(ctx, object.ID, object.Name)
checkErr(err)
tr.Done(ctx, err)
@@ -1235,7 +1235,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
if err != nil {
fs.Errorf(object, "Can't create object %+v", err)
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
if oldOnly && last != remote {
// Check current version of the file
if object.Action == "hide" {

View File

@@ -761,7 +761,7 @@ func (f *Fs) shouldRetry(ctx context.Context, err error) (bool, error) {
} else if f.opt.StopOnDownloadLimit && reason == "downloadQuotaExceeded" {
fs.Errorf(f, "Received download limit error: %v", err)
return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && reason == "quotaExceeded" {
} else if f.opt.StopOnUploadLimit && (reason == "quotaExceeded" || reason == "storageQuotaExceeded") {
fs.Errorf(f, "Received upload limit error: %v", err)
return false, fserrors.FatalError(err)
} else if f.opt.StopOnUploadLimit && reason == "teamDriveFileLimitExceeded" {

View File

@@ -243,6 +243,15 @@ func (f *Fs) InternalTestShouldRetry(t *testing.T) {
quotaExceededRetry, quotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, quotaExceededRetry)
assert.Equal(t, quotaExceededError, expectedQuotaError)
sqEItem := googleapi.ErrorItem{
Reason: "storageQuotaExceeded",
}
generic403.Errors[0] = sqEItem
expectedStorageQuotaError := fserrors.FatalError(&generic403)
storageQuotaExceededRetry, storageQuotaExceededError := f.shouldRetry(ctx, &generic403)
assert.False(t, storageQuotaExceededRetry)
assert.Equal(t, storageQuotaExceededError, expectedStorageQuotaError)
}
func (f *Fs) InternalTestDocumentImport(t *testing.T) {

View File

@@ -15,7 +15,7 @@ import (
"sync"
"time"
"github.com/rclone/ftp"
"github.com/jlaffaye/ftp"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config"
@@ -315,18 +315,26 @@ func (dl *debugLog) Write(p []byte) (n int, err error) {
return len(p), nil
}
// returns true if this FTP error should be retried
func isRetriableFtpError(err error) bool {
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable, ftp.StatusTransfertAborted:
return true
}
}
return false
}
// shouldRetry returns a boolean as to whether this err deserve to be
// retried. It returns the err as a convenience
func shouldRetry(ctx context.Context, err error) (bool, error) {
if fserrors.ContextError(ctx, &err) {
return false, err
}
switch errX := err.(type) {
case *textproto.Error:
switch errX.Code {
case ftp.StatusNotAvailable:
return true, err
}
if isRetriableFtpError(err) {
return true, err
}
return fserrors.ShouldRetry(err), err
}
@@ -1186,15 +1194,26 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (rc io.Read
}
}
}
c, err := o.fs.getFtpConnection(ctx)
var (
fd *ftp.Response
c *ftp.ServerConn
)
err = o.fs.pacer.Call(func() (bool, error) {
c, err = o.fs.getFtpConnection(ctx)
if err != nil {
return false, err // getFtpConnection has retries already
}
fd, err = c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
}
return shouldRetry(ctx, err)
})
if err != nil {
return nil, fmt.Errorf("open: %w", err)
}
fd, err := c.RetrFrom(o.fs.opt.Enc.FromStandardPath(path), uint64(offset))
if err != nil {
o.fs.putFtpConnection(&c, err)
return nil, fmt.Errorf("open: %w", err)
}
rc = &ftpReadCloser{rc: readers.NewLimitedReadCloser(fd, limit), c: c, f: o.fs}
return rc, nil
}

View File

@@ -82,7 +82,8 @@ func init() {
saFile, _ := m.Get("service_account_file")
saCreds, _ := m.Get("service_account_credentials")
anonymous, _ := m.Get("anonymous")
if saFile != "" || saCreds != "" || anonymous == "true" {
envAuth, _ := m.Get("env_auth")
if saFile != "" || saCreds != "" || anonymous == "true" || envAuth == "true" {
return nil, nil
}
return oauthutil.ConfigOut("", &oauthutil.Options{
@@ -330,6 +331,17 @@ can't check the size and hash but the file contents will be decompressed.
Default: (encoder.Base |
encoder.EncodeCrLf |
encoder.EncodeInvalidUtf8),
}, {
Name: "env_auth",
Help: "Get GCP IAM credentials from runtime (environment variables or instance meta data if no env vars).\n\nOnly applies if service_account_file and service_account_credentials is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter credentials in the next step.",
}, {
Value: "true",
Help: "Get GCP IAM credentials from the environment (env vars or IAM).",
}},
}}...),
})
}
@@ -349,6 +361,7 @@ type Options struct {
Decompress bool `config:"decompress"`
Endpoint string `config:"endpoint"`
Enc encoder.MultiEncoder `config:"encoding"`
EnvAuth bool `config:"env_auth"`
}
// Fs represents a remote storage server
@@ -500,6 +513,11 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, fmt.Errorf("failed configuring Google Cloud Storage Service Account: %w", err)
}
} else if opt.EnvAuth {
oAuthClient, err = google.DefaultClient(ctx, storage.DevstorageFullControlScope)
if err != nil {
return nil, fmt.Errorf("failed to configure Google Cloud Storage: %w", err)
}
} else {
oAuthClient, _, err = oauthutil.NewClient(ctx, name, m, storageConfig)
if err != nil {

View File

@@ -161,7 +161,7 @@ func (f *Fs) dbImport(ctx context.Context, hashName, sumRemote string, sticky bo
if err := o.putHashes(ctx, hashMap{hashType: hash}); err != nil {
fs.Errorf(nil, "%s: failed to import: %v", remote, err)
}
accounting.Stats(ctx).NewCheckingTransfer(obj).Done(ctx, err)
accounting.Stats(ctx).NewCheckingTransfer(obj, "importing").Done(ctx, err)
doneCount++
}
})

View File

@@ -524,6 +524,10 @@ func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err e
if f.opt.FollowSymlinks && (mode&os.ModeSymlink) != 0 {
localPath := filepath.Join(fsDirPath, name)
fi, err = os.Stat(localPath)
// Quietly skip errors on excluded files and directories
if err != nil && useFilter && !filter.IncludeRemote(newRemote) {
continue
}
if os.IsNotExist(err) || isCircularSymlinkError(err) {
// Skip bad symlinks and circular symlinks
err = fserrors.NoRetryError(fmt.Errorf("symlink: %w", err))

View File

@@ -14,6 +14,7 @@ import (
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configmap"
"github.com/rclone/rclone/fs/filter"
"github.com/rclone/rclone/fs/hash"
@@ -395,3 +396,73 @@ func TestFilter(t *testing.T) {
sort.Sort(entries)
require.Equal(t, "[included]", fmt.Sprint(entries))
}
func TestFilterSymlink(t *testing.T) {
ctx := context.Background()
r := fstest.NewRun(t)
defer r.Finalise()
when := time.Now()
f := r.Flocal.(*Fs)
// Create a file, a directory, a symlink to a file, a symlink to a directory and a dangling symlink
r.WriteFile("included.file", "included file", when)
r.WriteFile("included.dir/included.sub.file", "included sub file", when)
require.NoError(t, os.Symlink("included.file", filepath.Join(r.LocalName, "included.file.link")))
require.NoError(t, os.Symlink("included.dir", filepath.Join(r.LocalName, "included.dir.link")))
require.NoError(t, os.Symlink("dangling", filepath.Join(r.LocalName, "dangling.link")))
// Set fs into "-L" mode
f.opt.FollowSymlinks = true
f.opt.TranslateSymlinks = false
f.lstat = os.Stat
// Set fs into "-l" mode
// f.opt.FollowSymlinks = false
// f.opt.TranslateSymlinks = true
// f.lstat = os.Lstat
// Check set up for filtering
assert.True(t, f.Features().FilterAware)
// Reset global error count
accounting.Stats(ctx).ResetErrors()
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
// Add a filter
ctx, fi := filter.AddConfig(ctx)
require.NoError(t, fi.AddRule("+ included.file"))
require.NoError(t, fi.AddRule("+ included.file.link"))
require.NoError(t, fi.AddRule("+ included.dir/**"))
require.NoError(t, fi.AddRule("+ included.dir.link/**"))
require.NoError(t, fi.AddRule("- *"))
// Check listing without use filter flag
entries, err := f.List(ctx, "")
require.NoError(t, err)
// Check 1 global errors one for each dangling symlink
assert.Equal(t, int64(1), accounting.Stats(ctx).GetErrors(), "global errors found")
accounting.Stats(ctx).ResetErrors()
sort.Sort(entries)
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
// Add user filter flag
ctx = filter.SetUseFilter(ctx, true)
// Check listing with use filter flag
entries, err = f.List(ctx, "")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
require.Equal(t, "[included.dir included.dir.link included.file included.file.link]", fmt.Sprint(entries))
// Check listing through a symlink still works
entries, err = f.List(ctx, "included.dir")
require.NoError(t, err)
assert.Equal(t, int64(0), accounting.Stats(ctx).GetErrors(), "global errors found")
sort.Sort(entries)
require.Equal(t, "[included.dir/included.sub.file]", fmt.Sprint(entries))
}

View File

@@ -83,6 +83,17 @@ than permanently deleting them. If you specify this then rclone will
permanently delete objects instead.`,
Default: false,
Advanced: true,
}, {
Name: "use_https",
Help: `Use HTTPS for transfers.
MEGA uses plain text HTTP connections by default.
Some ISPs throttle HTTP connections, this causes transfers to become very slow.
Enabling this will force MEGA to use HTTPS for all transfers.
HTTPS is normally not necesary since all data is already encrypted anyway.
Enabling it will increase CPU usage and add network overhead.`,
Default: false,
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -100,6 +111,7 @@ type Options struct {
Pass string `config:"pass"`
Debug bool `config:"debug"`
HardDelete bool `config:"hard_delete"`
UseHTTPS bool `config:"use_https"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -204,6 +216,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if srv == nil {
srv = mega.New().SetClient(fshttp.NewClient(ctx))
srv.SetRetries(ci.LowLevelRetries) // let mega do the low level retries
srv.SetHTTPS(opt.UseHTTPS)
srv.SetLogger(func(format string, v ...interface{}) {
fs.Infof("*go-mega*", format, v...)
})

View File

@@ -126,6 +126,7 @@ type HashesType struct {
Sha1Hash string `json:"sha1Hash"` // hex encoded SHA1 hash for the contents of the file (if available)
Crc32Hash string `json:"crc32Hash"` // hex encoded CRC32 value of the file (if available)
QuickXorHash string `json:"quickXorHash"` // base64 encoded QuickXorHash value of the file (if available)
Sha256Hash string `json:"sha256Hash"` // hex encoded SHA256 value of the file (if available)
}
// FileFacet groups file-related data on OneDrive into a single structure.

View File

@@ -259,6 +259,48 @@ this flag there.
At the time of writing this only works with OneDrive personal paid accounts.
`,
Advanced: true,
}, {
Name: "hash_type",
Default: "auto",
Help: `Specify the hash in use for the backend.
This specifies the hash type in use. If set to "auto" it will use the
default hash which is is QuickXorHash.
Before rclone 1.62 an SHA1 hash was used by default for Onedrive
Personal. For 1.62 and later the default is to use a QuickXorHash for
all onedrive types. If an SHA1 hash is desired then set this option
accordingly.
From July 2023 QuickXorHash will be the only available hash for
both OneDrive for Business and OneDriver Personal.
This can be set to "none" to not use any hashes.
If the hash requested does not exist on the object, it will be
returned as an empty string which is treated as a missing hash by
rclone.
`,
Examples: []fs.OptionExample{{
Value: "auto",
Help: "Rclone chooses the best hash",
}, {
Value: "quickxor",
Help: "QuickXor",
}, {
Value: "sha1",
Help: "SHA1",
}, {
Value: "sha256",
Help: "SHA256",
}, {
Value: "crc32",
Help: "CRC32",
}, {
Value: "none",
Help: "None - don't use any hashes",
}},
Advanced: true,
}, {
Name: config.ConfigEncoding,
Help: config.ConfigEncodingHelp,
@@ -597,6 +639,7 @@ type Options struct {
LinkScope string `config:"link_scope"`
LinkType string `config:"link_type"`
LinkPassword string `config:"link_password"`
HashType string `config:"hash_type"`
Enc encoder.MultiEncoder `config:"encoding"`
}
@@ -613,6 +656,7 @@ type Fs struct {
tokenRenewer *oauthutil.Renew // renew the token on expiry
driveID string // ID to use for querying Microsoft Graph
driveType string // https://developer.microsoft.com/en-us/graph/docs/api-reference/v1.0/resources/drive
hashType hash.Type // type of the hash we are using
}
// Object describes a OneDrive object
@@ -626,8 +670,7 @@ type Object struct {
size int64 // size of the object
modTime time.Time // modification time of the object
id string // ID of the object
sha1 string // SHA-1 of the object content
quickxorhash string // QuickXorHash of the object content
hash string // Hash of the content, usually QuickXorHash but set as hash_type
mimeType string // Content-Type of object from server (may not be as uploaded)
}
@@ -882,6 +925,7 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
driveType: opt.DriveType,
srv: rest.NewClient(oAuthClient).SetRoot(rootURL),
pacer: fs.NewPacer(ctx, pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))),
hashType: QuickXorHashType,
}
f.features = (&fs.Features{
CaseInsensitive: true,
@@ -891,6 +935,15 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
}).Fill(ctx, f)
f.srv.SetErrorHandler(errorHandler)
// Set the user defined hash
if opt.HashType == "auto" || opt.HashType == "" {
opt.HashType = QuickXorHashType.String()
}
err = f.hashType.Set(opt.HashType)
if err != nil {
return nil, err
}
// Disable change polling in China region
// See: https://github.com/rclone/rclone/issues/6444
if f.opt.Region == regionCN {
@@ -1556,10 +1609,7 @@ func (f *Fs) About(ctx context.Context) (usage *fs.Usage, err error) {
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
if f.driveType == driveTypePersonal {
return hash.Set(hash.SHA1)
}
return hash.Set(QuickXorHashType)
return hash.Set(f.hashType)
}
// PublicLink returns a link for downloading without account.
@@ -1768,14 +1818,8 @@ func (o *Object) rootPath() string {
// Hash returns the SHA-1 of an object returning a lowercase hex string
func (o *Object) Hash(ctx context.Context, t hash.Type) (string, error) {
if o.fs.driveType == driveTypePersonal {
if t == hash.SHA1 {
return o.sha1, nil
}
} else {
if t == QuickXorHashType {
return o.quickxorhash, nil
}
if t == o.fs.hashType {
return o.hash, nil
}
return "", hash.ErrUnsupported
}
@@ -1806,16 +1850,23 @@ func (o *Object) setMetaData(info *api.Item) (err error) {
file := info.GetFile()
if file != nil {
o.mimeType = file.MimeType
if file.Hashes.Sha1Hash != "" {
o.sha1 = strings.ToLower(file.Hashes.Sha1Hash)
}
if file.Hashes.QuickXorHash != "" {
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
if err != nil {
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
} else {
o.quickxorhash = hex.EncodeToString(h)
o.hash = ""
switch o.fs.hashType {
case QuickXorHashType:
if file.Hashes.QuickXorHash != "" {
h, err := base64.StdEncoding.DecodeString(file.Hashes.QuickXorHash)
if err != nil {
fs.Errorf(o, "Failed to decode QuickXorHash %q: %v", file.Hashes.QuickXorHash, err)
} else {
o.hash = hex.EncodeToString(h)
}
}
case hash.SHA1:
o.hash = strings.ToLower(file.Hashes.Sha1Hash)
case hash.SHA256:
o.hash = strings.ToLower(file.Hashes.Sha256Hash)
case hash.CRC32:
o.hash = strings.ToLower(file.Hashes.Crc32Hash)
}
}
fileSystemInfo := info.GetFileSystemInfo()

View File

@@ -2266,6 +2266,11 @@ rclone's choice here.
Help: `Suppress setting and reading of system metadata`,
Advanced: true,
Default: false,
}, {
Name: "sts_endpoint",
Help: "Endpoint for STS.\n\nLeave blank if using AWS to use the default endpoint for the region.",
Provider: "AWS",
Advanced: true,
},
}})
}
@@ -2352,6 +2357,7 @@ type Options struct {
SecretAccessKey string `config:"secret_access_key"`
Region string `config:"region"`
Endpoint string `config:"endpoint"`
STSEndpoint string `config:"sts_endpoint"`
LocationConstraint string `config:"location_constraint"`
ACL string `config:"acl"`
BucketACL string `config:"bucket_acl"`
@@ -2528,7 +2534,7 @@ func parsePath(path string) (root string) {
// split returns bucket and bucketPath from the rootRelativePath
// relative to f.root
func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
bucketName, bucketPath = bucket.Split(path.Join(f.root, rootRelativePath))
bucketName, bucketPath = bucket.Split(bucket.Join(f.root, rootRelativePath))
return f.opt.Enc.FromStandardName(bucketName), f.opt.Enc.FromStandardPath(bucketPath)
}
@@ -2560,6 +2566,38 @@ func getClient(ctx context.Context, opt *Options) *http.Client {
}
}
// Default name resolver
var defaultResolver = endpoints.DefaultResolver()
// resolve (service, region) to endpoint
//
// Used to set endpoint for s3 services and not for other services
type resolver map[string]string
// Add a service to the resolver, ignoring empty urls
func (r resolver) addService(service, url string) {
if url == "" {
return
}
if !strings.HasPrefix(url, "http") {
url = "https://" + url
}
r[service] = url
}
// EndpointFor return the endpoint for s3 if set or the default if not
func (r resolver) EndpointFor(service, region string, opts ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
fs.Debugf(nil, "Resolving service %q region %q", service, region)
url, ok := r[service]
if ok {
return endpoints.ResolvedEndpoint{
URL: url,
SigningRegion: region,
}, nil
}
return defaultResolver.EndpointFor(service, region, opts...)
}
// s3Connection makes a connection to s3
func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S3, *session.Session, error) {
ci := fs.GetConfig(ctx)
@@ -2638,8 +2676,12 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
if opt.Region != "" {
awsConfig.WithRegion(opt.Region)
}
if opt.Endpoint != "" {
awsConfig.WithEndpoint(opt.Endpoint)
if opt.Endpoint != "" || opt.STSEndpoint != "" {
// If endpoints are set, override the relevant services only
r := make(resolver)
r.addService("s3", opt.Endpoint)
r.addService("sts", opt.STSEndpoint)
awsConfig.WithEndpointResolver(r)
}
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
@@ -2657,7 +2699,7 @@ func s3Connection(ctx context.Context, opt *Options, client *http.Client) (*s3.S
}
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
// awsSessionOpts.Config.Credentials = nil
awsSessionOpts.Config.Credentials = nil
}
ses, err := session.NewSessionWithOptions(awsSessionOpts)
if err != nil {
@@ -3426,15 +3468,16 @@ var errEndList = errors.New("end list")
// list options
type listOpt struct {
bucket string // bucket to list
directory string // directory with bucket
prefix string // prefix to remove from listing
addBucket bool // if set, the bucket is added to the start of the remote
recurse bool // if set, recurse to read sub directories
withVersions bool // if set, versions are produced
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
findFile bool // if set, it will look for files called (bucket, directory)
versionAt fs.Time // if set only show versions <= this time
bucket string // bucket to list
directory string // directory with bucket
prefix string // prefix to remove from listing
addBucket bool // if set, the bucket is added to the start of the remote
recurse bool // if set, recurse to read sub directories
withVersions bool // if set, versions are produced
hidden bool // if set, return delete markers as objects with size == isDeleteMarker
findFile bool // if set, it will look for files called (bucket, directory)
versionAt fs.Time // if set only show versions <= this time
noSkipMarkers bool // if set return dir marker objects
}
// list lists the objects into the function supplied with the opt
@@ -3547,7 +3590,7 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
}
remote = remote[len(opt.prefix):]
if opt.addBucket {
remote = path.Join(opt.bucket, remote)
remote = bucket.Join(opt.bucket, remote)
}
remote = strings.TrimSuffix(remote, "/")
err = fn(remote, &s3.Object{Key: &remote}, nil, true)
@@ -3576,10 +3619,10 @@ func (f *Fs) list(ctx context.Context, opt listOpt, fn listFn) error {
remote = remote[len(opt.prefix):]
isDirectory := remote == "" || strings.HasSuffix(remote, "/")
if opt.addBucket {
remote = path.Join(opt.bucket, remote)
remote = bucket.Join(opt.bucket, remote)
}
// is this a directory marker?
if isDirectory && object.Size != nil && *object.Size == 0 {
if isDirectory && object.Size != nil && *object.Size == 0 && !opt.noSkipMarkers {
continue // skip directory marker
}
if versionIDs != nil {
@@ -3869,7 +3912,7 @@ func (f *Fs) copy(ctx context.Context, req *s3.CopyObjectInput, dstBucket, dstPa
req.Bucket = &dstBucket
req.ACL = stringPointerOrNil(f.opt.ACL)
req.Key = &dstPath
source := pathEscape(path.Join(srcBucket, srcPath))
source := pathEscape(bucket.Join(srcBucket, srcPath))
if src.versionID != nil {
source += fmt.Sprintf("?versionId=%s", *src.versionID)
}
@@ -4526,13 +4569,14 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
delErr <- operations.DeleteFiles(ctx, delChan)
}()
checkErr(f.list(ctx, listOpt{
bucket: bucket,
directory: directory,
prefix: f.rootDirectory,
addBucket: f.rootBucket == "",
recurse: true,
withVersions: versioned,
hidden: true,
bucket: bucket,
directory: directory,
prefix: f.rootDirectory,
addBucket: f.rootBucket == "",
recurse: true,
withVersions: versioned,
hidden: true,
noSkipMarkers: true,
}, func(remote string, object *s3.Object, versionID *string, isDirectory bool) error {
if isDirectory {
return nil
@@ -4542,7 +4586,7 @@ func (f *Fs) purge(ctx context.Context, dir string, oldOnly bool) error {
fs.Errorf(object, "Can't create object %+v", err)
return nil
}
tr := accounting.Stats(ctx).NewCheckingTransfer(oi)
tr := accounting.Stats(ctx).NewCheckingTransfer(oi, "checking")
// Work out whether the file is the current version or not
isCurrentVersion := !versioned || !version.Match(remote)
fs.Debugf(nil, "%q version %v", remote, version.Match(remote))

View File

@@ -1,7 +1,6 @@
package seafile
import (
"sync"
"sync/atomic"
"testing"
"time"
@@ -17,19 +16,19 @@ func TestShouldAllowShutdownTwice(t *testing.T) {
renew.Shutdown()
}
func TestRenewal(t *testing.T) {
func TestRenewalInTimeLimit(t *testing.T) {
var count int64
wg := sync.WaitGroup{}
wg.Add(2) // run the renewal twice
renew := NewRenew(time.Millisecond, func() error {
renew := NewRenew(100*time.Millisecond, func() error {
atomic.AddInt64(&count, 1)
wg.Done()
return nil
})
wg.Wait()
time.Sleep(time.Second)
renew.Shutdown()
// it is technically possible that a third renewal gets triggered between Wait() and Shutdown()
assert.GreaterOrEqual(t, atomic.LoadInt64(&count), int64(2))
// there's no guarantee the CI agent can handle a simple goroutine
renewCount := atomic.LoadInt64(&count)
t.Logf("renew count = %d", renewCount)
assert.Greater(t, renewCount, int64(0))
assert.Less(t, renewCount, int64(11))
}

View File

@@ -34,9 +34,10 @@ func (f *Fs) dial(ctx context.Context, network, addr string) (*conn, error) {
d := &smb2.Dialer{
Initiator: &smb2.NTLMInitiator{
User: f.opt.User,
Password: pass,
Domain: f.opt.Domain,
User: f.opt.User,
Password: pass,
Domain: f.opt.Domain,
TargetSPN: f.opt.SPN,
},
}

View File

@@ -60,6 +60,17 @@ func init() {
Name: "domain",
Help: "Domain name for NTLM authentication.",
Default: "WORKGROUP",
}, {
Name: "spn",
Help: `Service principal name.
Rclone presents this name to the server. Some servers use this as further
authentication, and it often needs to be set for clusters. For example:
cifs/remotehost:1020
Leave blank if not sure.
`,
}, {
Name: "idle_timeout",
Default: fs.Duration(60 * time.Second),
@@ -109,6 +120,7 @@ type Options struct {
User string `config:"user"`
Pass string `config:"pass"`
Domain string `config:"domain"`
SPN string `config:"spn"`
HideSpecial bool `config:"hide_special_share"`
CaseInsensitive bool `config:"case_insensitive"`
IdleTimeout fs.Duration `config:"idle_timeout"`

View File

@@ -401,9 +401,15 @@ func initConfig() {
// Start accounting
accounting.Start(ctx)
// Hide console window
// Configure console
if ci.NoConsole {
// Hide the console window
terminal.HideConsole()
} else {
// Enable color support on stdout if possible.
// This enables virtual terminal processing on Windows 10,
// adding native support for ANSI/VT100 escape sequences.
terminal.EnableColorsStdout()
}
// Load filters

View File

@@ -6,6 +6,7 @@ import (
"fmt"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/spf13/cobra"
)
@@ -27,12 +28,12 @@ it will always be removed.
},
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(1, 1, command, args)
fs, fileName := cmd.NewFsFile(args[0])
f, fileName := cmd.NewFsFile(args[0])
cmd.Run(true, false, command, func() error {
if fileName == "" {
return fmt.Errorf("%s is a directory or doesn't exist", args[0])
return fmt.Errorf("%s is a directory or doesn't exist: %w", args[0], fs.ErrorObjectNotFound)
}
fileObj, err := fs.NewObject(context.Background(), fileName)
fileObj, err := f.NewObject(context.Background(), fileName)
if err != nil {
return err
}

View File

@@ -60,12 +60,14 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
case "":
continue
case "remote", "fs":
p, err := fspath.Parse(str)
if err != nil || p.Name == ":" {
return fmt.Errorf("cannot parse path %q: %w", str, err)
if str != "" {
p, err := fspath.Parse(str)
if err != nil || p.Name == ":" {
return fmt.Errorf("cannot parse path %q: %w", str, err)
}
fsName, fsPath, fsOpt = p.Name, p.Path, p.Config
vol.Fs = str
}
fsName, fsPath, fsOpt = p.Name, p.Path, p.Config
vol.Fs = str
case "type":
fsType = str
vol.Type = str

View File

@@ -18,6 +18,8 @@ import (
"github.com/rclone/rclone/fs/dirtree"
"github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/walk"
"github.com/rclone/rclone/lib/encoder"
"github.com/rclone/rclone/lib/terminal"
"github.com/spf13/cobra"
)
@@ -26,6 +28,7 @@ var (
outFileName string
noReport bool
sort string
enc = encoder.OS
)
func init() {
@@ -100,22 +103,26 @@ For a more interactive navigation of the remote see the
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 1, command, args)
fsrc := cmd.NewFsSrc(args)
outFile := os.Stdout
ci := fs.GetConfig(context.Background())
var outFile io.Writer
if outFileName != "" {
var err error
outFile, err = os.Create(outFileName)
if err != nil {
return fmt.Errorf("failed to create output file: %w", err)
}
opts.Colorize = false
} else {
terminal.Start()
outFile = terminal.Out
opts.Colorize = true
}
opts.VerSort = opts.VerSort || sort == "version"
opts.ModSort = opts.ModSort || sort == "mtime"
opts.CTimeSort = opts.CTimeSort || sort == "ctime"
opts.NameSort = sort == "name"
opts.SizeSort = sort == "size"
ci := fs.GetConfig(context.Background())
opts.UnitSize = ci.HumanReadable
opts.Colorize = ci.TerminalColorMode != fs.TerminalColorModeNever
if opts.DeepLevel == 0 {
opts.DeepLevel = ci.MaxDepth
}
@@ -158,7 +165,7 @@ type FileInfo struct {
// Name is base name of the file
func (to *FileInfo) Name() string {
return path.Base(to.entry.Remote())
return enc.FromStandardName(path.Base(to.entry.Remote()))
}
// Size in bytes for regular files; system-dependent for others
@@ -192,7 +199,7 @@ func (to *FileInfo) Sys() interface{} {
// String returns the full path
func (to *FileInfo) String() string {
return to.entry.Remote()
return filepath.FromSlash(enc.FromStandardPath(to.entry.Remote()))
}
// Fs maps an fs.Fs into a tree.Fs
@@ -207,6 +214,7 @@ func NewFs(dirs dirtree.DirTree) Fs {
func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) {
defer log.Trace(nil, "filePath=%q", filePath)("fi=%+v, err=%v", &fi, &err)
filePath = filepath.ToSlash(filePath)
filePath = enc.ToStandardPath(filePath)
filePath = strings.TrimLeft(filePath, "/")
if filePath == "" {
return &FileInfo{fs.NewDir("", time.Now())}, nil
@@ -222,13 +230,14 @@ func (dirs Fs) Stat(filePath string) (fi os.FileInfo, err error) {
func (dirs Fs) ReadDir(dir string) (names []string, err error) {
defer log.Trace(nil, "dir=%s", dir)("names=%+v, err=%v", &names, &err)
dir = filepath.ToSlash(dir)
dir = enc.ToStandardPath(dir)
dir = strings.TrimLeft(dir, "/")
entries, ok := dirs[dir]
if !ok {
return nil, fmt.Errorf("couldn't find directory %q", dir)
}
for _, entry := range entries {
names = append(names, path.Base(entry.Remote()))
names = append(names, enc.FromStandardName(path.Base(entry.Remote())))
}
return
}

View File

@@ -684,3 +684,12 @@ put them back in again.` >}}
* happyxhw <44490504+happyxhw@users.noreply.github.com>
* Simmon Li (he/him) <hello@crespire.dev>
* Matthias Baur <baurmatt@users.noreply.github.com>
* Hunter Wittenborn <hunter@hunterwittenborn.com>
* logopk <peter@kreuser.name>
* Gerard Bosch <30733556+gerardbosch@users.noreply.github.com>
* ToBeFree <github@tfrei.de>
* NodudeWasTaken <75137537+NodudeWasTaken@users.noreply.github.com>
* Peter Brunner <peter@lugoues.net>
* Ninh Pham <dongian.rapclubkhtn@gmail.com>
* Ryan Caezar Itang <sitiom@proton.me>
* Peter Brunner <peter@psykhe.com>

View File

@@ -146,9 +146,9 @@ The base directories on the both Path1 and Path2 filesystems must exist
or bisync will fail. This is required for safety - that bisync can verify
that both paths are valid.
When using `--resync` a newer version of a file on the Path2 filesystem
will be overwritten by the Path1 filesystem version.
Carefully evaluate deltas using [--dry-run](/flags/#non-backend-flags).
When using `--resync`, a newer version of a file either on Path1 or Path2
filesystem, will overwrite the file on the other path (only the last version
will be kept). Carefully evaluate deltas using [--dry-run](/flags/#non-backend-flags).
For a resync run, one of the paths may be empty (no files in the path tree).
The resync run should result in files on both paths, else a normal non-resync

View File

@@ -58,7 +58,7 @@ custom salt is effectively a second password that must be memorized.
based on XSalsa20 cipher and Poly1305 for integrity.
[Names](#name-encryption) (file- and directory names) are also encrypted
by default, but this has some implications and is therefore
possible to turned off.
possible to be turned off.
## Configuration

View File

@@ -165,6 +165,19 @@ developers so it may be out of date. Its current version is as below.
[![Chocolatey package](https://repology.org/badge/version-for-repo/chocolatey/rclone.svg)](https://repology.org/project/rclone/versions)
### Scoop package manager {#windows-scoop}
Make sure you have [Scoop](https://scoop.sh/) installed
```
scoop install rclone
```
Note that this is a third party installer not controlled by the rclone
developers so it may be out of date. Its current version is as below.
[![Scoop package](https://repology.org/badge/version-for-repo/scoop/rclone.svg)](https://repology.org/project/rclone/versions)
## Package manager installation {#package-manager}
Many Linux, Windows, macOS and other OS distributions package and

View File

@@ -168,10 +168,19 @@ OneDrive allows modification times to be set on objects accurate to 1
second. These will be used to detect whether objects need syncing or
not.
OneDrive personal supports SHA1 type hashes. OneDrive for business and
Sharepoint Server support
OneDrive Personal, OneDrive for Business and Sharepoint Server support
[QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash).
Before rclone 1.62 the default hash for Onedrive Personal was `SHA1`.
For rclone 1.62 and above the default for all Onedrive backends is
`QuickXorHash`.
Starting from July 2023 `SHA1` support is being phased out in Onedrive
Personal in favour of `QuickXorHash`. If necessary the
`--onedrive-hash-type` flag (or `hash_type` config option) can be used
to select `SHA1` during the transition period if this is important
your workflow.
For all types of OneDrive you can use the `--checksum` flag.
### Restricted filename characters

View File

@@ -39,7 +39,7 @@ Here is an overview of the major features of each cloud storage system.
| Mega | - | - | No | Yes | - | - |
| Memory | MD5 | R/W | No | No | - | - |
| Microsoft Azure Blob Storage | MD5 | R/W | No | No | R/W | - |
| Microsoft OneDrive | SHA1 ⁵ | R/W | Yes | No | R | - |
| Microsoft OneDrive | QuickXorHash ⁵ | R/W | Yes | No | R | - |
| OpenDrive | MD5 | R/W | Yes | Partial ⁸ | - | - |
| OpenStack Swift | MD5 | R/W | No | No | R/W | - |
| Oracle Object Storage | MD5 | R/W | No | No | R/W | - |
@@ -72,9 +72,7 @@ This is an SHA256 sum of all the 4 MiB block SHA256s.
⁴ WebDAV supports modtimes when used with Owncloud and Nextcloud only.
Microsoft OneDrive Personal supports SHA1 hashes, whereas OneDrive
for business and SharePoint server support Microsoft's own
[QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash).
[QuickXorHash](https://docs.microsoft.com/en-us/onedrive/developer/code-snippets/quickxorhash) is Microsoft's own hash.
⁶ Mail.ru uses its own modified SHA1 hash

View File

@@ -689,8 +689,8 @@ func (s *StatsInfo) RetryAfter() time.Time {
}
// NewCheckingTransfer adds a checking transfer to the stats, from the object.
func (s *StatsInfo) NewCheckingTransfer(obj fs.DirEntry) *Transfer {
tr := newCheckingTransfer(s, obj)
func (s *StatsInfo) NewCheckingTransfer(obj fs.DirEntry, what string) *Transfer {
tr := newCheckingTransfer(s, obj, what)
s.checking.add(tr)
return tr
}
@@ -720,7 +720,7 @@ func (s *StatsInfo) NewTransfer(obj fs.DirEntry) *Transfer {
// NewTransferRemoteSize adds a transfer to the stats based on remote and size.
func (s *StatsInfo) NewTransferRemoteSize(remote string, size int64) *Transfer {
tr := newTransferRemoteSize(s, remote, size, false)
tr := newTransferRemoteSize(s, remote, size, false, "")
s.transferring.add(tr)
s.startAverageLoop()
return tr

View File

@@ -50,6 +50,7 @@ type Transfer struct {
size int64
startedAt time.Time
checking bool
what string // what kind of transfer this is
// Protects all below
//
@@ -63,22 +64,23 @@ type Transfer struct {
}
// newCheckingTransfer instantiates new checking of the object.
func newCheckingTransfer(stats *StatsInfo, obj fs.DirEntry) *Transfer {
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), true)
func newCheckingTransfer(stats *StatsInfo, obj fs.DirEntry, what string) *Transfer {
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), true, what)
}
// newTransfer instantiates new transfer.
func newTransfer(stats *StatsInfo, obj fs.DirEntry) *Transfer {
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), false)
return newTransferRemoteSize(stats, obj.Remote(), obj.Size(), false, "")
}
func newTransferRemoteSize(stats *StatsInfo, remote string, size int64, checking bool) *Transfer {
func newTransferRemoteSize(stats *StatsInfo, remote string, size int64, checking bool, what string) *Transfer {
tr := &Transfer{
stats: stats,
remote: remote,
size: size,
startedAt: time.Now(),
checking: checking,
what: what,
}
stats.AddTransfer(tr)
return tr

View File

@@ -98,6 +98,7 @@ func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude
ci := fs.GetConfig(ctx)
stringList := make([]string, 0, len(tm.items))
for _, tr := range tm._sortedSlice() {
var what = tr.what
if exclude != nil {
exclude.mu.RLock()
_, found := exclude.items[tr.remote]
@@ -109,11 +110,17 @@ func (tm *transferMap) String(ctx context.Context, progress *inProgress, exclude
var out string
if acc := progress.get(tr.remote); acc != nil {
out = acc.String()
if what != "" {
out += ", " + what
}
} else {
if what == "" {
what = tm.name
}
out = fmt.Sprintf("%*s: %s",
ci.StatsFileNameLength,
shortenName(tr.remote, ci.StatsFileNameLength),
tm.name,
what,
)
}
stringList = append(stringList, " * "+out)

View File

@@ -120,7 +120,7 @@ func (c *checkMarch) SrcOnly(src fs.DirEntry) (recurse bool) {
// check to see if two objects are identical using the check function
func (c *checkMarch) checkIdentical(ctx context.Context, dst, src fs.Object) (differ bool, noHash bool, err error) {
ci := fs.GetConfig(ctx)
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
tr := accounting.Stats(ctx).NewCheckingTransfer(src, "checking")
defer func() {
tr.Done(ctx, err)
}()
@@ -450,7 +450,7 @@ func (c *checkMarch) checkSum(ctx context.Context, obj fs.Object, download bool,
}
var err error
tr := accounting.Stats(ctx).NewCheckingTransfer(obj)
tr := accounting.Stats(ctx).NewCheckingTransfer(obj, "hashing")
defer tr.Done(ctx, err)
if !sumFound {

View File

@@ -286,7 +286,7 @@ func dedupeFindDuplicateDirs(ctx context.Context, f fs.Fs) (duplicateDirs [][]*d
ci := fs.GetConfig(ctx)
err = walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListAll, func(entries fs.DirEntries) error {
for _, entry := range entries {
tr := accounting.Stats(ctx).NewCheckingTransfer(entry)
tr := accounting.Stats(ctx).NewCheckingTransfer(entry, "merging")
remote := entry.Remote()
parentRemote := path.Dir(remote)
@@ -438,7 +438,7 @@ func Deduplicate(ctx context.Context, f fs.Fs, mode DeduplicateMode, byHash bool
files := map[string][]fs.Object{}
err := walk.ListR(ctx, f, "", false, ci.MaxDepth, walk.ListObjects, func(entries fs.DirEntries) error {
entries.ForObject(func(o fs.Object) {
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "checking")
defer tr.Done(ctx, nil)
var remote string

View File

@@ -544,7 +544,7 @@ func SameObject(src, dst fs.Object) bool {
// be nil.
func Move(ctx context.Context, fdst fs.Fs, dst fs.Object, remote string, src fs.Object) (newDst fs.Object, err error) {
ci := fs.GetConfig(ctx)
tr := accounting.Stats(ctx).NewCheckingTransfer(src)
tr := accounting.Stats(ctx).NewCheckingTransfer(src, "moving")
defer func() {
if err == nil {
accounting.Stats(ctx).Renames(1)
@@ -633,7 +633,7 @@ func SuffixName(ctx context.Context, remote string) string {
// deleting
func DeleteFileWithBackupDir(ctx context.Context, dst fs.Object, backupDir fs.Fs) (err error) {
ci := fs.GetConfig(ctx)
tr := accounting.Stats(ctx).NewCheckingTransfer(dst)
tr := accounting.Stats(ctx).NewCheckingTransfer(dst, "deleting")
defer func() {
tr.Done(ctx, err)
}()
@@ -678,11 +678,11 @@ func DeleteFile(ctx context.Context, dst fs.Object) (err error) {
func DeleteFilesWithBackupDir(ctx context.Context, toBeDeleted fs.ObjectsChan, backupDir fs.Fs) error {
var wg sync.WaitGroup
ci := fs.GetConfig(ctx)
wg.Add(ci.Transfers)
wg.Add(ci.Checkers)
var errorCount int32
var fatalErrorCount int32
for i := 0; i < ci.Transfers; i++ {
for i := 0; i < ci.Checkers; i++ {
go func() {
defer wg.Done()
for dst := range toBeDeleted {
@@ -938,7 +938,7 @@ func List(ctx context.Context, f fs.Fs, w io.Writer) error {
func ListLong(ctx context.Context, f fs.Fs, w io.Writer) error {
ci := fs.GetConfig(ctx)
return ListFn(ctx, f, func(o fs.Object) {
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "listing")
defer func() {
tr.Done(ctx, nil)
}()
@@ -996,7 +996,7 @@ func hashSum(ctx context.Context, ht hash.Type, base64Encoded bool, downloadFlag
return "ERROR", fmt.Errorf("hasher returned an error: %w", err)
}
} else {
tr := accounting.Stats(ctx).NewCheckingTransfer(o)
tr := accounting.Stats(ctx).NewCheckingTransfer(o, "hashing")
defer func() {
tr.Done(ctx, err)
}()
@@ -1022,7 +1022,12 @@ func hashSum(ctx context.Context, ht hash.Type, base64Encoded bool, downloadFlag
// Updated to perform multiple hashes concurrently
func HashLister(ctx context.Context, ht hash.Type, outputBase64 bool, downloadFlag bool, f fs.Fs, w io.Writer) error {
width := hash.Width(ht, outputBase64)
concurrencyControl := make(chan struct{}, fs.GetConfig(ctx).Transfers)
// Use --checkers concurrency unless downloading in which case use --transfers
concurrency := fs.GetConfig(ctx).Checkers
if downloadFlag {
concurrency = fs.GetConfig(ctx).Transfers
}
concurrencyControl := make(chan struct{}, concurrency)
var wg sync.WaitGroup
err := ListFn(ctx, f, func(o fs.Object) {
wg.Add(1)
@@ -1173,7 +1178,7 @@ func Purge(ctx context.Context, f fs.Fs, dir string) (err error) {
// obeys includes and excludes.
func Delete(ctx context.Context, f fs.Fs) error {
ci := fs.GetConfig(ctx)
delChan := make(fs.ObjectsChan, ci.Transfers)
delChan := make(fs.ObjectsChan, ci.Checkers)
delErr := make(chan error, 1)
go func() {
delErr <- DeleteFiles(ctx, delChan)
@@ -1929,7 +1934,6 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
_, err = Op(ctx, fdst, dstObj, dstFileName, srcObj)
} else {
tr := accounting.Stats(ctx).NewCheckingTransfer(srcObj)
if !cp {
if ci.IgnoreExisting {
fs.Debugf(srcObj, "Not removing source file as destination file exists and --ignore-existing is set")
@@ -1937,7 +1941,6 @@ func moveOrCopyFile(ctx context.Context, fdst fs.Fs, fsrc fs.Fs, dstFileName str
err = DeleteFile(ctx, srcObj)
}
}
tr.Done(ctx, err)
}
return err
}
@@ -2189,9 +2192,9 @@ func DirMove(ctx context.Context, f fs.Fs, srcRemote, dstRemote string) (err err
o fs.Object
newPath string
}
renames := make(chan rename, ci.Transfers)
renames := make(chan rename, ci.Checkers)
g, gCtx := errgroup.WithContext(context.Background())
for i := 0; i < ci.Transfers; i++ {
for i := 0; i < ci.Checkers; i++ {
g.Go(func() error {
for job := range renames {
dstOverwritten, _ := f.NewObject(gCtx, job.newPath)

View File

@@ -329,7 +329,7 @@ func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, fraction int, wg *sync.W
}
src := pair.Src
var err error
tr := accounting.Stats(s.ctx).NewCheckingTransfer(src)
tr := accounting.Stats(s.ctx).NewCheckingTransfer(src, "checking")
// Check to see if can store this
if src.Storable() {
needTransfer := operations.NeedTransfer(s.ctx, pair.Dst, pair.Src)
@@ -537,7 +537,7 @@ func (s *syncCopyMove) deleteFiles(checkSrcMap bool) error {
}
// Delete the spare files
toDelete := make(fs.ObjectsChan, s.ci.Transfers)
toDelete := make(fs.ObjectsChan, s.ci.Checkers)
go func() {
outer:
for remote, o := range s.dstFiles {
@@ -772,14 +772,14 @@ func (s *syncCopyMove) makeRenameMap() {
// now make a map of size,hash for all dstFiles
s.renameMap = make(map[string][]fs.Object)
var wg sync.WaitGroup
wg.Add(s.ci.Transfers)
for i := 0; i < s.ci.Transfers; i++ {
wg.Add(s.ci.Checkers)
for i := 0; i < s.ci.Checkers; i++ {
go func() {
defer wg.Done()
for obj := range in {
// only create hash for dst fs.Object if its size could match
if _, found := possibleSizes[obj.Size()]; found {
tr := accounting.Stats(s.ctx).NewCheckingTransfer(obj)
tr := accounting.Stats(s.ctx).NewCheckingTransfer(obj, "renaming")
hash := s.renameID(obj, s.trackRenamesStrategy, s.modifyWindow)
if hash != "" {

6
go.mod
View File

@@ -32,6 +32,7 @@ require (
github.com/hirochachacha/go-smb2 v1.1.0
github.com/iguanesolutions/go-systemd/v5 v5.1.0
github.com/jcmturner/gokrb5/v8 v8.4.3
github.com/jlaffaye/ftp v0.1.1-0.20230214004652-d84bf4be2b6e
github.com/jzelinskie/whirlpool v0.0.0-20201016144138-0675e54bb004
github.com/klauspost/compress v1.15.14
github.com/koofr/go-httpclient v0.0.0-20221124135700-2eb26cff5dd8
@@ -47,7 +48,6 @@ require (
github.com/pmezard/go-difflib v1.0.0
github.com/prometheus/client_golang v1.14.0
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8
github.com/rclone/ftp v0.0.0-20221014110213-e44dedbc76c6
github.com/rfjakob/eme v1.1.2
github.com/rivo/uniseg v0.4.3
github.com/shirou/gopsutil/v3 v3.22.12
@@ -56,13 +56,13 @@ require (
github.com/spf13/cobra v1.6.1
github.com/spf13/pflag v1.0.5
github.com/stretchr/testify v1.8.1
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf
github.com/t3rm1n4l/go-mega v0.0.0-20230228171823-a01a2cda13ca
github.com/winfsp/cgofuse v1.5.1-0.20221118130120-84c0898ad2e0
github.com/xanzy/ssh-agent v0.3.3
github.com/youmark/pkcs8 v0.0.0-20201027041543-1326539a0a0a
github.com/yunify/qingstor-sdk-go/v3 v3.2.0
go.etcd.io/bbolt v1.3.6
goftp.io/server v0.4.1
goftp.io/server v0.4.2-0.20210615155358-d07a820aac35
golang.org/x/crypto v0.5.0
golang.org/x/net v0.7.0
golang.org/x/oauth2 v0.4.0

11
go.sum
View File

@@ -277,8 +277,9 @@ github.com/jcmturner/gokrb5/v8 v8.4.3 h1:iTonLeSJOn7MVUtyMT+arAn5AKAPrkilzhGw8wE
github.com/jcmturner/gokrb5/v8 v8.4.3/go.mod h1:dqRwJGXznQrzw6cWmyo6kH+E7jksEQG/CyVWsJEsJO0=
github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf h1:2IYBd5TD/maMqTU2YUzp2tJL4cNaOYQ9EBullN9t9pk=
github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
github.com/jlaffaye/ftp v0.1.1-0.20230214004652-d84bf4be2b6e h1:Xofa5zcfulLjSb9ZNpb7MI9TFCpVkPCy3JSwrL7xoWE=
github.com/jlaffaye/ftp v0.1.1-0.20230214004652-d84bf4be2b6e/go.mod h1:sRSt+7UoQ5BgrZhwta4kr7N5SenQsoIZHMJHY7+zqJg=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
@@ -407,8 +408,6 @@ github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8 h1:Y258uzXU/potCYnQd1r6wlAnoMB68BiCkCcCnKx1SH8=
github.com/putdotio/go-putio/putio v0.0.0-20200123120452-16d982cac2b8/go.mod h1:bSJjRokAHHOhA+XFxplld8w2R/dXLH7Z3BZ532vhFwU=
github.com/rclone/ftp v0.0.0-20221014110213-e44dedbc76c6 h1:J832KfU2Z44Ck3XR5bvw2UxShP0QnjueruNQ6dTYH+g=
github.com/rclone/ftp v0.0.0-20221014110213-e44dedbc76c6/go.mod h1:qRpxqlna6CaIq9fSRud1bDC5S7EEUEou0j8nMZ0lxO8=
github.com/rfjakob/eme v1.1.2 h1:SxziR8msSOElPayZNFfQw4Tjx/Sbaeeh3eRvrHVMUs4=
github.com/rfjakob/eme v1.1.2/go.mod h1:cVvpasglm/G3ngEfcfT/Wt0GwhkuO32pf/poW6Nyk1k=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@@ -462,6 +461,8 @@ github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKs
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf h1:Y43S3e9P1NPs/QF4R5/SdlXj2d31540hP4Gk8VKNvDg=
github.com/t3rm1n4l/go-mega v0.0.0-20220725095014-c4e0c2b5debf/go.mod h1:c+cGNU1qi9bO7ZF4IRMYk+KaZTNiQ/gQrSbyMmGFq1Q=
github.com/t3rm1n4l/go-mega v0.0.0-20230228171823-a01a2cda13ca h1:I9rVnNXdIkij4UvMT7OmKhH9sOIvS8iXkxfPdnn9wQA=
github.com/t3rm1n4l/go-mega v0.0.0-20230228171823-a01a2cda13ca/go.mod h1:suDIky6yrK07NnaBadCB4sS0CqFOvUK91lH7CR+JlDA=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
@@ -504,8 +505,8 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
goftp.io/server v0.4.1 h1:x7KG4HIxSMdK/rpYhExMinRN/aO/T9icvaG/B5e/XfY=
goftp.io/server v0.4.1/go.mod h1:hFZeR656ErRt3ojMKt7H10vQ5nuWV1e0YeUTeorlR6k=
goftp.io/server v0.4.2-0.20210615155358-d07a820aac35 h1:D4DhKKOtievTsshtbA6W0XL/gBjERF5/vu6Vhmb4sBw=
goftp.io/server v0.4.2-0.20210615155358-d07a820aac35/go.mod h1:hFZeR656ErRt3ojMKt7H10vQ5nuWV1e0YeUTeorlR6k=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=

View File

@@ -29,6 +29,19 @@ func Split(absPath string) (bucket, bucketPath string) {
return absPath[:slash], absPath[slash+1:]
}
// Join path1 and path2
//
// Like path.Join but does not clean the path - useful to preserve trailing /
func Join(path1, path2 string) string {
if path1 == "" {
return path2
}
if path2 == "" {
return path1
}
return strings.TrimSuffix(path1, "/") + "/" + strings.TrimPrefix(path2, "/")
}
// Cache stores whether buckets are available and their IDs
type Cache struct {
mu sync.Mutex // mutex to protect created and deleted

View File

@@ -2,6 +2,7 @@ package bucket
import (
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
@@ -24,6 +25,26 @@ func TestSplit(t *testing.T) {
}
}
func TestJoin(t *testing.T) {
for _, test := range []struct {
in1, in2 string
want string
}{
{in1: "", in2: "", want: ""},
{in1: "in1", in2: "", want: "in1"},
{in1: "", in2: "in2", want: "in2"},
{in1: "in1", in2: "in2", want: "in1/in2"},
{in1: "in1/", in2: "in2", want: "in1/in2"},
{in1: "in1", in2: "/in2", want: "in1/in2"},
{in1: "in1", in2: "in2/", want: "in1/in2/"},
{in1: "/in1", in2: "/in2", want: "/in1/in2"},
{in1: "/in1", in2: "../in2", want: "/in1/../in2"},
} {
got := Join(test.in1, test.in2)
assert.Equal(t, test.want, got, fmt.Sprintf("in1=%q, in2=%q", test.in1, test.in2))
}
}
func TestCache(t *testing.T) {
c := NewCache()
errBoom := errors.New("boom")

View File

@@ -111,3 +111,14 @@ func Write(out []byte) {
Start()
_, _ = Out.Write(out)
}
// EnableColorsStdout enable colors if possible.
// This enables virtual terminal processing on Windows 10 console,
// adding native support for VT100 escape codes. When this terminal
// package is used for output, the result is that the colorable library
// don't have to decode the escapes and explicitely write text with color
// formatting to the console using Windows API functions, but can simply
// relay everything to stdout.
func EnableColorsStdout() {
_ = colorable.EnableColorsStdout(nil)
}

View File

@@ -325,10 +325,15 @@ func (d *Dir) renameTree(dirPath string) {
d.entry = fs.NewDirCopy(context.TODO(), d.entry).SetRemote(dirPath)
}
// Do the same to any child directories
// Do the same to any child directories and files
for leaf, node := range d.items {
if dir, ok := node.(*Dir); ok {
dir.renameTree(path.Join(dirPath, leaf))
switch x := node.(type) {
case *Dir:
x.renameTree(path.Join(dirPath, leaf))
case *File:
x.renameDir(dirPath)
default:
panic("bad dir entry")
}
}
}

View File

@@ -7,6 +7,7 @@ import (
"sort"
"testing"
"time"
"unsafe"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
@@ -577,3 +578,7 @@ func TestDirRename(t *testing.T) {
err = dir.Rename("potato", "tuba", dir)
assert.Equal(t, EROFS, err)
}
func TestDirStructSize(t *testing.T) {
t.Logf("Dir struct has size %d bytes", unsafe.Sizeof(Dir{}))
}

View File

@@ -141,6 +141,13 @@ func (f *File) Node() Node {
return f
}
// renameDir - call when parent directory has been renamed
func (f *File) renameDir(dPath string) {
f.mu.RLock()
f.dPath = dPath
f.mu.RUnlock()
}
// applyPendingRename runs a previously set rename operation if there are no
// more remaining writers. Call without lock held.
func (f *File) applyPendingRename() {
@@ -296,6 +303,9 @@ func (f *File) activeWriters() int {
// It should be called with the lock held
func (f *File) _roundModTime(modTime time.Time) time.Time {
precision := f.d.f.Precision()
if precision == fs.ModTimeNotSupported {
return modTime
}
return modTime.Truncate(precision)
}
@@ -312,7 +322,9 @@ func (f *File) ModTime() (modTime time.Time) {
}
// Read the modtime from a dirty item if it exists
if f.d.vfs.Opt.CacheMode >= vfscommon.CacheModeMinimal {
if item := f.d.vfs.cache.DirtyItem(f._path()); item != nil {
item := f.d.vfs.cache.ItemOrNil(f._path())
noModTime := f.d.f.Precision() == fs.ModTimeNotSupported
if item != nil && (item.IsDirty() || noModTime) {
modTime, err := item.GetModTime()
if err != nil {
fs.Errorf(f._path(), "ModTime: Item GetModTime failed: %v", err)

View File

@@ -6,6 +6,7 @@ import (
"io"
"os"
"testing"
"unsafe"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
@@ -411,3 +412,7 @@ func TestFileRename(t *testing.T) {
})
}
}
func TestFileStructSize(t *testing.T) {
t.Logf("File struct has size %d bytes", unsafe.Sizeof(File{}))
}

View File

@@ -294,14 +294,22 @@ func (c *Cache) put(name string, item *Item) (oldItem *Item) {
return oldItem
}
// ItemOrNil returns the Item if it exists in the cache otherwise it
// returns nil.
//
// name should be a remote path not an osPath
func (c *Cache) ItemOrNil(name string) (item *Item) {
name = clean(name)
c.mu.Lock()
defer c.mu.Unlock()
return c.item[name]
}
// InUse returns whether the name is in use in the cache
//
// name should be a remote path not an osPath
func (c *Cache) InUse(name string) bool {
name = clean(name)
c.mu.Lock()
item := c.item[name]
c.mu.Unlock()
item := c.ItemOrNil(name)
if item == nil {
return false
}
@@ -313,10 +321,7 @@ func (c *Cache) InUse(name string) bool {
//
// name should be a remote path not an osPath
func (c *Cache) DirtyItem(name string) (item *Item) {
name = clean(name)
c.mu.Lock()
defer c.mu.Unlock()
item = c.item[name]
item = c.ItemOrNil(name)
if item != nil && !item.IsDirty() {
item = nil
}

View File

@@ -1223,7 +1223,7 @@ func (item *Item) setModTime(modTime time.Time) {
item.mu.Unlock()
}
// GetModTime of the cache file
// GetModTime of the cache item
func (item *Item) GetModTime() (modTime time.Time, err error) {
// defer log.Trace(item.name, "modTime=%v", modTime)("")
item.mu.Lock()
@@ -1231,6 +1231,9 @@ func (item *Item) GetModTime() (modTime time.Time, err error) {
fi, err := item._stat()
if err == nil {
modTime = fi.ModTime()
item.info.ModTime = modTime
} else {
modTime = item.info.ModTime
}
return modTime, nil
}