1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-03 00:53:43 +00:00

Compare commits

..

4 Commits

Author SHA1 Message Date
Anagh Kumar Baranwal
f26e41d1c5 fix: mount parsing for linux under the WSL layer which may contain mounts with spaces
Signed-off-by: Anagh Kumar Baranwal <6824881+darthShadow@users.noreply.github.com>
2023-07-18 17:39:25 +05:30
Benjamin
8a6bf35481 Add Leviia Object Storage on index.md 2023-07-18 09:52:05 +01:00
Benjamin
f7d27f4bf2 Add Object storage to Leviia on README.md 2023-07-18 09:52:05 +01:00
kapitainsky
378a2d21ee --max-transfer - add new exit code (10)
It adds dedicated exit code (10) for --max-duration flag.

Rclone will exit with exit code 10 if the duration limit is reached.

It behaves in similar fashion as --max-transfer and exit code 8.

discussed on the forum:

https://forum.rclone.org/t/max-duration-option-is-triggering-exit-with-error/39917/6
2023-07-18 09:51:31 +01:00
12 changed files with 80 additions and 142 deletions

View File

@@ -51,7 +51,7 @@ Rclone *("rsync for cloud storage")* is a command-line program to sync files and
* IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
* IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
* Koofr [:page_facing_up:](https://rclone.org/koofr/)
* Leviia [:page_facing_up:](https://rclone.org/s3/#leviia)
* Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)
* Liara Object Storage [:page_facing_up:](https://rclone.org/s3/#liara-object-storage)
* Mail.ru Cloud [:page_facing_up:](https://rclone.org/mailru/)
* Memset Memstore [:page_facing_up:](https://rclone.org/swift/)

View File

@@ -35,6 +35,7 @@ import (
fslog "github.com/rclone/rclone/fs/log"
"github.com/rclone/rclone/fs/rc/rcflags"
"github.com/rclone/rclone/fs/rc/rcserver"
fssync "github.com/rclone/rclone/fs/sync"
"github.com/rclone/rclone/lib/atexit"
"github.com/rclone/rclone/lib/buildinfo"
"github.com/rclone/rclone/lib/exitcode"
@@ -501,6 +502,8 @@ func resolveExitCode(err error) {
os.Exit(exitcode.UncategorizedError)
case errors.Is(err, accounting.ErrorMaxTransferLimitReached):
os.Exit(exitcode.TransferExceeded)
case errors.Is(err, fssync.ErrorMaxDurationReached):
os.Exit(exitcode.DurationExceeded)
case fserrors.ShouldRetry(err):
os.Exit(exitcode.RetryError)
case fserrors.IsNoRetryError(err), fserrors.IsNoLowLevelRetryError(err):

View File

@@ -4,22 +4,20 @@
package mountlib
import (
"errors"
"fmt"
"path/filepath"
"strings"
"time"
"github.com/artyom/mtab"
"github.com/moby/sys/mountinfo"
)
const (
mtabPath = "/proc/mounts"
pollInterval = 100 * time.Millisecond
)
// CheckMountEmpty checks if folder is not already a mountpoint.
// On Linux we use the OS-specific /proc/mount API so the check won't access the path.
// On Linux we use the OS-specific /proc/self/mountinfo API so the check won't access the path.
// Directories marked as "mounted" by autofs are considered not mounted.
func CheckMountEmpty(mountpoint string) error {
const msg = "directory already mounted, use --allow-non-empty to mount anyway: %s"
@@ -29,43 +27,48 @@ func CheckMountEmpty(mountpoint string) error {
return fmt.Errorf("cannot get absolute path: %s: %w", mountpoint, err)
}
entries, err := mtab.Entries(mtabPath)
infos, err := mountinfo.GetMounts(mountinfo.SingleEntryFilter(mountpointAbs))
if err != nil {
return fmt.Errorf("cannot read %s: %w", mtabPath, err)
return fmt.Errorf("cannot get mounts: %w", err)
}
foundAutofs := false
for _, entry := range entries {
if entry.Dir == mountpointAbs {
if entry.Type != "autofs" {
return fmt.Errorf(msg, mountpointAbs)
}
foundAutofs = true
for _, info := range infos {
if info.FSType != "autofs" {
return fmt.Errorf(msg, mountpointAbs)
}
foundAutofs = true
}
// It isn't safe to list an autofs in the middle of mounting
if foundAutofs {
return nil
}
return checkMountEmpty(mountpoint)
}
// CheckMountReady checks whether mountpoint is mounted by rclone.
// Only mounts with type "rclone" or "fuse.rclone" count.
func CheckMountReady(mountpoint string) error {
const msg = "mount not ready: %s"
mountpointAbs, err := filepath.Abs(mountpoint)
if err != nil {
return fmt.Errorf("cannot get absolute path: %s: %w", mountpoint, err)
}
entries, err := mtab.Entries(mtabPath)
infos, err := mountinfo.GetMounts(mountinfo.SingleEntryFilter(mountpointAbs))
if err != nil {
return fmt.Errorf("cannot read %s: %w", mtabPath, err)
return fmt.Errorf("cannot get mounts: %w", err)
}
for _, entry := range entries {
if entry.Dir == mountpointAbs && strings.Contains(entry.Type, "rclone") {
for _, info := range infos {
if strings.Contains(info.FSType, "rclone") {
return nil
}
}
return errors.New("mount not ready")
return fmt.Errorf(msg, mountpointAbs)
}
// WaitMountReady waits until mountpoint is mounted by rclone.

View File

@@ -136,6 +136,7 @@ WebDAV or S3, that work out of the box.)
{{< provider name="IDrive e2" home="https://www.idrive.com/e2/?refer=rclone" config="/s3/#idrive-e2" >}}
{{< provider name="IONOS Cloud" home="https://cloud.ionos.com/storage/object-storage" config="/s3/#ionos" >}}
{{< provider name="Koofr" home="https://koofr.eu/" config="/koofr/" >}}
{{< provider name="Leviia Object Storage" home="https://www.leviia.com/object-storage" config="/s3/#leviia" >}}
{{< provider name="Liara Object Storage" home="https://liara.ir/landing/object-storage" config="/s3/#liara-object-storage" >}}
{{< provider name="Mail.ru Cloud" home="https://cloud.mail.ru/" config="/mailru/" >}}
{{< provider name="Memset Memstore" home="https://www.memset.com/cloud/storage/" config="/swift/" >}}

View File

@@ -1454,14 +1454,14 @@ what will happen.
### --max-duration=TIME ###
Rclone will stop scheduling new transfers when it has run for the
Rclone will stop transferring when it has run for the
duration specified.
Defaults to off.
When the limit is reached any existing transfers will complete.
When the limit is reached all transfers will stop immediately.
Use `--cutoff-mode` to modify this behaviour.
Rclone won't exit with an error if the transfer limit is reached.
Rclone will exit with exit code 10 if the duration limit is reached.
### --max-transfer=SIZE ###
@@ -1469,9 +1469,24 @@ Rclone will stop transferring when it has reached the size specified.
Defaults to off.
When the limit is reached all transfers will stop immediately.
Use `--cutoff-mode` to modify this behaviour.
Rclone will exit with exit code 8 if the transfer limit is reached.
### --cutoff-mode=hard|soft|cautious ###
This modifies the behavior of `--max-transfer` and `--max-duration`
Defaults to `--cutoff-mode=hard`.
Specifying `--cutoff-mode=hard` will stop transferring immediately
when Rclone reaches the limit.
Specifying `--cutoff-mode=soft` will stop starting new transfers
when Rclone reaches the limit.
Specifying `--cutoff-mode=cautious` will try to prevent Rclone
from reaching the limit. Only applicable for `--max-transfer`
## -M, --metadata
Setting this flag enables rclone to copy the metadata from the source
@@ -1484,20 +1499,6 @@ Add metadata `key` = `value` when uploading. This can be repeated as
many times as required. See the [#metadata](metadata section) for more
info.
### --cutoff-mode=hard|soft|cautious ###
This modifies the behavior of `--max-transfer`
Defaults to `--cutoff-mode=hard`.
Specifying `--cutoff-mode=hard` will stop transferring immediately
when Rclone reaches the limit.
Specifying `--cutoff-mode=soft` will stop starting new transfers
when Rclone reaches the limit.
Specifying `--cutoff-mode=cautious` will try to prevent Rclone
from reaching the limit.
### --modify-window=TIME ###
When checking whether a file has been modified, this is the maximum
@@ -2561,6 +2562,7 @@ it will log a high priority message if the retry was successful.
* `7` - Fatal error (one that more retries won't fix, like account suspended) (Fatal errors)
* `8` - Transfer exceeded - limit set by --max-transfer reached
* `9` - Operation successful, but no files transferred
* `10` - Duration exceeded - limit set by --max-duration reached
Environment Variables
---------------------

View File

@@ -86,7 +86,7 @@ These flags are available for every command.
--max-delete int When synchronizing, limit the number of deletes (default -1)
--max-delete-size SizeSuffix When synchronizing, limit the total size of deletes (default off)
--max-depth int If set limits the recursion depth to this (default -1)
--max-duration Duration Maximum duration rclone will transfer data for (default 0s)
--max-duration Duration Maximum duration rclone will transfer data for (default off)
--max-size SizeSuffix Only transfer files smaller than this in KiB or suffix B|K|M|G|T|P (default off)
--max-stats-groups int Maximum number of stats groups to keep in memory, on max oldest is discarded (default 1000)
--max-transfer SizeSuffix Maximum size of data to transfer (default off)

View File

@@ -20,6 +20,14 @@ import (
"github.com/rclone/rclone/fs/operations"
)
// ErrorMaxDurationReached defines error when transfer duration is reached
// Used for checking on exit and matching to correct exit code.
var ErrorMaxDurationReached = errors.New("max transfer duration reached as set by --max-duration")
// ErrorMaxDurationReachedFatal is returned from when the max
// duration limit is reached.
var ErrorMaxDurationReachedFatal = fserrors.FatalError(ErrorMaxDurationReached)
type syncCopyMove struct {
// parameters
fdst fs.Fs
@@ -845,10 +853,6 @@ func (s *syncCopyMove) tryRename(src fs.Object) bool {
return true
}
// errorMaxDurationReached defines error when transfer duration is reached
// Used for checking on exit and matching to correct exit code.
var errorMaxDurationReached = fserrors.FatalError(errors.New("max transfer duration reached as set by --max-duration"))
// Syncs fsrc into fdst
//
// If Delete is true then it deletes any files in fdst that aren't in fsrc
@@ -945,8 +949,8 @@ func (s *syncCopyMove) run() error {
// If the duration was exceeded then add a Fatal Error so we don't retry
if !s.maxDurationEndTime.IsZero() && time.Since(s.maxDurationEndTime) > 0 {
fs.Errorf(s.fdst, "%v", errorMaxDurationReached)
s.processError(errorMaxDurationReached)
fs.Errorf(s.fdst, "%v", ErrorMaxDurationReachedFatal)
s.processError(ErrorMaxDurationReachedFatal)
}
// Print nothing to transfer message if there were no transfers and no errors

View File

@@ -996,7 +996,7 @@ func testSyncWithMaxDuration(t *testing.T, cutoffMode fs.CutoffMode) {
accounting.GlobalStats().ResetCounters()
startTime := time.Now()
err := Sync(ctx, r.Fremote, r.Flocal, false)
require.True(t, errors.Is(err, errorMaxDurationReached))
require.True(t, errors.Is(err, ErrorMaxDurationReached))
if cutoffMode == fs.CutoffModeHard {
r.CheckRemoteItems(t, file1)

2
go.mod
View File

@@ -15,7 +15,6 @@ require (
github.com/abbot/go-http-auth v0.4.0
github.com/anacrolix/dms v1.5.0
github.com/anacrolix/log v0.13.1
github.com/artyom/mtab v1.0.0
github.com/atotto/clipboard v0.1.4
github.com/aws/aws-sdk-go v1.44.246
github.com/buengese/sgzip v0.1.1
@@ -39,6 +38,7 @@ require (
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-runewidth v0.0.14
github.com/mitchellh/go-homedir v1.1.0
github.com/moby/sys/mountinfo v0.6.2
github.com/ncw/go-acd v0.0.0-20201019170801-fe55f33415b1
github.com/ncw/swift/v2 v2.0.1
github.com/oracle/oci-go-sdk/v65 v65.34.0

2
go.sum
View File

@@ -82,8 +82,6 @@ github.com/anacrolix/log v0.13.1 h1:BmVwTdxHd5VcNrLylgKwph4P4wf+5VvPgOK4yi91fTY=
github.com/anacrolix/log v0.13.1/go.mod h1:D4+CvN8SnruK6zIFS/xPoRJmtvtnxs+CSfDQ+BFxZ68=
github.com/anacrolix/missinggo v1.1.0/go.mod h1:MBJu3Sk/k3ZfGYcS7z18gwfu72Ey/xopPFJJbTi5yIo=
github.com/anacrolix/tagflag v0.0.0-20180109131632-2146c8d41bf0/go.mod h1:1m2U/K6ZT+JZG0+bdMK6qauP49QT4wE5pmhJXOKKCHw=
github.com/artyom/mtab v1.0.0 h1:r7OSVo5Jeqi8+LotZ0rT2kzfPIBp9KCpEJP8RQqGmSE=
github.com/artyom/mtab v1.0.0/go.mod h1:EHpkp5OmPfS1yZX+/DFTztlJ9di5UzdDLX1/XzWPXw8=
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/aws/aws-sdk-go v1.44.246 h1:iLxPX6JU0bxAci9R6/bp8rX0kL871ByCTx0MZlQWv1U=

View File

@@ -22,4 +22,6 @@ const (
TransferExceeded
// NoFilesTransferred everything succeeded, but no transfer was made.
NoFilesTransferred
// DurationExceeded is returned when transfer duration exceeded the quota.
DurationExceeded
)

View File

@@ -22,10 +22,9 @@ import (
// Dir represents a directory entry
type Dir struct {
vfs *VFS // read only
inode uint64 // read only: inode number
f fs.Fs // read only
cleanupTimer *time.Timer // read only: timer to call cacheCleanup
vfs *VFS // read only
inode uint64 // read only: inode number
f fs.Fs // read only
mu sync.RWMutex // protects the following
parent *Dir // parent, nil for root
@@ -38,8 +37,6 @@ type Dir struct {
modTimeMu sync.Mutex // protects the following
modTime time.Time
_childVirtuals atomic.Int32 // non zero if any children have virtual directory entries
}
//go:generate stringer -type=vState
@@ -55,7 +52,7 @@ const (
)
func newDir(vfs *VFS, f fs.Fs, parent *Dir, fsDir fs.Directory) *Dir {
d := &Dir{
return &Dir{
vfs: vfs,
f: f,
parent: parent,
@@ -65,25 +62,6 @@ func newDir(vfs *VFS, f fs.Fs, parent *Dir, fsDir fs.Directory) *Dir {
inode: newInode(),
items: make(map[string]Node),
}
d.cleanupTimer = time.AfterFunc(vfs.Opt.DirCacheTime*2, d.cacheCleanup)
return d
}
func (d *Dir) cacheCleanup() {
defer func() {
// We should never panic here
_ = recover()
}()
when := time.Now()
d.mu.Lock()
_, stale := d._age(when)
d.mu.Unlock()
if stale {
d.ForgetAll()
}
}
// String converts it to printable
@@ -196,81 +174,38 @@ func (d *Dir) Node() Node {
return d
}
// hasVirtuals returns whether the directory has virtual entries
func (d *Dir) hasVirtuals() bool {
return d._childVirtuals.Load() != 0
}
// getVirtuals returns the number of virtual entries in this and children
func (d *Dir) getVirtuals() int32 {
return d._childVirtuals.Load()
}
// addVirtuals increments or decrements the number of virtual
// directories by the amount given in this and all the parent
// directories.
func (d *Dir) addVirtuals(inc int32) {
for {
d._childVirtuals.Add(inc)
d.mu.RLock()
parent := d.parent
d.mu.RUnlock()
if parent == nil {
break
}
d = parent
}
}
// _addVirtuals increments or decrements the number of virtual
// directories by the amount given in this and all the parent
// directories.
//
// The dir lock must be held to call this
func (d *Dir) _addVirtuals(inc int32) {
d._childVirtuals.Add(inc)
if d.parent == nil {
return
}
d.parent.addVirtuals(inc)
}
// ForgetAll forgets directory entries for this directory and any children.
//
// It does not invalidate or clear the cache of the parent directory.
//
// Directories or parents of directories with virtual entries won't be
// forgotten.
func (d *Dir) ForgetAll() {
d.mu.RLock()
// It returns true if the directory or any of its children had virtual entries
// so could not be forgotten. Children which didn't have virtual entries and
// children with virtual entries will be forgotten even if true is returned.
func (d *Dir) ForgetAll() (hasVirtual bool) {
d.mu.Lock()
defer d.mu.Unlock()
fs.Debugf(d.path, "forgetting directory cache")
for _, node := range d.items {
if dir, ok := node.(*Dir); ok {
dir.ForgetAll()
if dir.ForgetAll() {
hasVirtual = true
}
}
}
d.mu.RUnlock()
d.mu.Lock()
defer d.mu.Unlock()
// Purge any unnecessary virtual entries
d._purgeVirtual()
d.read = time.Time{}
// Check if this dir has virtual entries
if len(d.virtual) != 0 {
hasVirtual = true
}
// Don't clear directory entries if there are virtual entries in this
// directory or any children
if d.hasVirtuals() {
d.cleanupTimer.Reset(d.vfs.Opt.DirCacheTime * 2)
return
if !hasVirtual {
d.items = make(map[string]Node)
}
// Forget the items and stop the timer
d.items = make(map[string]Node)
d.cleanupTimer.Stop()
return hasVirtual
}
// forgetDirPath clears the cache for itself and all subdirectories if
@@ -415,9 +350,6 @@ func (d *Dir) renameTree(dirPath string) {
// reading everything again
func (d *Dir) rename(newParent *Dir, fsDir fs.Directory) {
d.ForgetAll()
virtuals := d.getVirtuals()
d.addVirtuals(-virtuals)
newParent.addVirtuals(virtuals)
d.modTimeMu.Lock()
d.modTime = fsDir.ModTime(context.TODO())
@@ -454,7 +386,6 @@ func (d *Dir) addObject(node Node) {
d.items[leaf] = node
if d.virtual == nil {
d.virtual = make(map[string]vState)
d._addVirtuals(1)
}
vAdd := vAddFile
if node.IsDir() {
@@ -503,7 +434,6 @@ func (d *Dir) delObject(leaf string) {
delete(d.items, leaf)
if d.virtual == nil {
d.virtual = make(map[string]vState)
d._addVirtuals(1)
}
d.virtual[leaf] = vDel
fs.Debugf(d.path, "Added virtual directory entry %v: %q", vDel, leaf)
@@ -545,8 +475,6 @@ func (d *Dir) _readDir() error {
}
d.read = when
d.cleanupTimer.Reset(d.vfs.Opt.DirCacheTime * 2)
return nil
}
@@ -565,7 +493,6 @@ func (d *Dir) _deleteVirtual(name string) {
delete(d.virtual, name)
if len(d.virtual) == 0 {
d.virtual = nil
d._addVirtuals(-1)
}
fs.Debugf(d.path, "Removed virtual directory entry %v: %q", virtualState, name)
}
@@ -727,7 +654,6 @@ func (d *Dir) _readDirFromEntries(entries fs.DirEntries, dirTree dirtree.DirTree
dir.read = time.Time{}
} else {
dir.read = when
dir.cleanupTimer.Reset(d.vfs.Opt.DirCacheTime * 2)
}
dir.mu.Unlock()
if err != nil {
@@ -765,7 +691,6 @@ func (d *Dir) readDirTree() error {
}
fs.Debugf(d.path, "Reading directory tree done in %s", time.Since(when))
d.read = when
d.cleanupTimer.Reset(d.vfs.Opt.DirCacheTime * 2)
return nil
}