1
0
mirror of https://github.com/rclone/rclone.git synced 2026-01-28 07:13:39 +00:00

Compare commits

...

57 Commits

Author SHA1 Message Date
Nick Craig-Wood
f499c625bc docker: make docker volume accept all global options
Fixes #8401
2025-09-09 16:27:04 +01:00
Nick Craig-Wood
c5a3e86df8 operations: fix partial name collisions for non --inplace copies
In this commit:

c63f1865f3 operations: copy: generate stable partial suffix

We made the partial suffix for non inplace copies stable. This was a
hash based off the file fingerprint.

However, given a directory of files which have the same fingerprint
the partial suffix collides. On some backends (eg the local backend)
the fingerprint is just the size and modification time so files with
different contents can collide.

The effect of collisions was hash failures on copy when using
--transfers > 1. These copies invariably retried successfully which
probably explains why this bug hasn't been reported.

This fixes the problem by adding the file name to the hash.

It also makes sure the hash is always represented as 8 hex bytes for
consistency.
2025-09-05 16:09:46 +01:00
Ed Craig-Wood
4026e8db20 drive: docs: update making your own client ID instructions
update instructions with the most recent changes to google cloud console
2025-09-05 15:30:52 +01:00
dougal
c9ce686231 swift: add ListP interface - #4788 2025-09-05 15:29:37 +01:00
dougal
b085598cbc memory: add ListP interface - #4788 2025-09-05 15:29:37 +01:00
dougal
bb47dccdeb oraceobjectstorage: add ListP interface - #4788 2025-09-05 15:29:37 +01:00
dougal
7a279d2789 B2: add ListP interface - #4788 2025-09-05 15:29:37 +01:00
dougal
9bd5df658a azureblob: add ListP interface - #4788 2025-09-05 15:29:37 +01:00
dougal
d512e4d566 googlecloudstorage: add ListP interface - Fixes #8763 2025-09-05 15:29:37 +01:00
dependabot[bot]
3dd68c824a build: bump actions/github-script from 7 to 8
Bumps [actions/github-script](https://github.com/actions/github-script) from 7 to 8.
- [Release notes](https://github.com/actions/github-script/releases)
- [Commits](https://github.com/actions/github-script/compare/v7...v8)

---
updated-dependencies:
- dependency-name: actions/github-script
  dependency-version: '8'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-05 08:14:32 +02:00
dependabot[bot]
fbe73c993b build: bump actions/setup-go from 5 to 6
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 5 to 6.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/v5...v6)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-09-05 08:12:38 +02:00
nielash
d915f75edf bisync: fix chunker integration tests
Before this change, TestChunkerS3: tests were failing because our use of
obj.Remove (for "modtime_write_test") created an unexpected extra transfer.

This is because chunker calls operations.Move for removes, which (per its
function comment) is supposed to be only accounted as a check. But because S3
can Copy but not Move, the move falls back to copy and ends up getting counted
as a transfer anyway.
99e8a63df2/fs/operations/operations.go (L506)
99e8a63df2/fs/operations/copy.go (L381)

This is probably a bug that should get a more proper fix in operations. But in
the meantime, we can get around it by doing our "modtime_write_test" with its
own unique stats group.
2025-09-04 14:38:10 -04:00
nielash
26b629f42f bisync: fix koofr integration tests
Before this change, koofr failed certain bisync tests because it can't set mod
time without deleting and re-uploading. This caused the "nothing to transfer" log
to not get printed where expected (as it is only printed when there are 0
transfers, but koofr requires extra transfers to set modtime.)

This change fixes the issue by ignoring the absence of the "nothing to transfer"
log line on backends that return `fs.ErrorCantSetModTimeWithoutDelete` for
`obj.SetModTime`.
2025-09-04 14:38:10 -04:00
Nick Craig-Wood
ceaac2194c internetarchive: fix server side copy files with spaces
In this commit we broke server side copy for files with spaces

4c5764204d internetarchive: fix server side copy files with &

This fixes the problem by using rest.URLPathEscapeAll which escapes
everything possible.

Fixes #8754
2025-09-04 10:37:27 +01:00
Nick Craig-Wood
1f14b6aa35 lib/rest: add URLPathEscapeAll to URL escape as many chars as possible 2025-09-04 10:37:27 +01:00
Nick Craig-Wood
dd75af6a18 Add alternate email for dougal to contributors 2025-09-04 10:37:27 +01:00
dougal
99e8a63df2 test speed: add command to test a specified remotes speed
Run speed test to try and work in a given time budget, uploading
randomly created files to the remote then downloading them again.

Fixes #3198
2025-09-03 12:37:52 +01:00
Nick Craig-Wood
0019e18ac3 docs: add link to MEGA S4 from MEGA page 2025-09-02 17:22:32 +01:00
Nick Craig-Wood
218c3bf6e9 Add Robin Rolf to contributors 2025-09-02 17:22:32 +01:00
Nick Craig-Wood
8f9702583d Add anon-pradip to contributors 2025-09-02 17:22:32 +01:00
Robin Rolf
e6578fb5a1 s3: Add Intercolo provider 2025-09-02 16:34:43 +01:00
albertony
fa1d7da272 gendocs: refactor and add logging of skipped command docs 2025-09-02 14:06:31 +02:00
albertony
813708c24d gendocs: ignore missing rclone_mount.md, rclone_nfsmount.md, rclone_serve_nfs.md on windows 2025-09-02 14:06:31 +02:00
nielash
fee4716343 bin: add bisync.md generator
This change adds make_bisync_docs.go step to dynamically update the list of
ignored and failed tests in bisync.md
2025-09-01 14:43:40 -04:00
nielash
6e9a675b3f fstest: refactor to decouple package from implementation 2025-09-01 14:43:40 -04:00
nielash
7f5a444350 gendocs: ignore missing rclone_mount.md on macOS 2025-09-01 14:43:40 -04:00
nielash
d2916ac5c7 bisync: ignore expected "nothing to transfer" differences on tests
The "There was nothing to transfer" log is only printed when the number of
transfers is exactly 0. However, there are a variety of reasons why the transfer
count would be expected to differ between backends. For example, if either side
lacks hashes, the sync may in fact need to transfer, where it would otherwise
skip based on hash or just update modtime. Transfer stats will also differ in
the "src and dst identical but can't set mod time without deleting and re-
uploading" scenario (because the re-upload is a transfer), and where --download-hash
is needed (because calculating the hash requires downloading the file, which is
a transfer).

Before this change, these expected differences would result in erroneous test
failures. This change fixes the issue by ignoring the absence of the "nothing to
transfer" log where it is expected.

Note that this issue did not occur before
9e200531b1
because the number of transfers was not getting reset between test steps,
sometimes resulting in an artificially inflated transfers count.
2025-09-01 14:05:00 -04:00
nielash
3369a15285 bisync: fix TestBisyncConcurrent ignoring -case
Before this change, TestBisyncConcurrent would still run the "basic" test case
if a non-blank -case arg was used to specify a case other than "basic". This
change fixes it by skipping in this scenario.
2025-09-01 14:05:00 -04:00
nielash
58aee30de7 bisync: make number of parallel tests configurable
Example usage:
go test ./cmd/bisync -remote local -race -pcount 10
2025-09-01 14:05:00 -04:00
anon-pradip
ef919241a6 docs: clarify subcommand description in rclone usage 2025-09-01 17:09:51 +01:00
albertony
d5386bb9a7 docs: fix description of regex syntax of name transform 2025-09-01 16:40:14 +01:00
albertony
bf46ea5611 docs: add some more details about supported regex syntax 2025-09-01 16:40:14 +01:00
nielash
b8a379c9c9 makefile: fix lib/transform docs not getting updated
As of
4280ec75cc
the lib/transform docs are generated with //go:generate and embedded with
//go:embed.

Before this change, however, they were not getting automatically updated with
subsequent changes (like
fe62a2bb4e)
because `go generate ./lib/transform` was not being run as part of the release
making process.

This change fixes that by running it in `make commanddocs`.
2025-09-01 16:39:20 +01:00
Nick Craig-Wood
8c37a9c2ef lib/pool: fix flaky test which was causing timeouts
This puts a limit on the number of allocation failures in a row which
stops the test timing out as the exponential backoffs get very large.
2025-09-01 16:25:31 +01:00
Nick Craig-Wood
963a72ce01 Add dougal to contributors 2025-09-01 16:25:31 +01:00
dougal
a4962e21d1 vfs: fix SIGHUP killing serve instead of flushing directory caches
Before, rclone serve would crash when sent a SIGHUP which contradicts
the documentation - saying it should flush the directory caches.

Moved signal handling from the mount into the vfs layer, which now
handles SIGHUP on all uses of the VFS including mount and serve.

Fixes #8607
2025-09-01 13:15:11 +01:00
nielash
9e200531b1 bisync: use unique stats groups on tests 2025-08-30 17:46:33 +01:00
Nick Craig-Wood
04683f2032 fstest: stop errors in test cleanup changing the global stats
This was causing the concurrent bisync tests to fail every now and again.
2025-08-30 17:46:33 +01:00
Nick Craig-Wood
b41f7994da Add Motte to contributors 2025-08-30 17:46:33 +01:00
Nick Craig-Wood
13a5ffe391 Add Claudius Ellsel to contributors 2025-08-30 17:46:33 +01:00
Nick Craig-Wood
85deea82e4 build: add local markdown linting to make check 2025-08-28 16:56:40 +01:00
Motte
89a8ea7a91 lsf: add support for unix and unixnano time formats 2025-08-28 16:28:49 +01:00
albertony
c8912eb6a0 docs: remove broken links from rc to commands 2025-08-28 11:52:18 +02:00
albertony
01674949a1 hashsum: changed output format when listing algorithms 2025-08-27 23:36:28 +02:00
Claudius Ellsel
98e1d3ee73 docs: add example of how to add date as suffix 2025-08-27 22:01:28 +02:00
Nick Craig-Wood
50d7a80331 box: fix about after change in API return - fixes #8776 2025-08-26 18:03:09 +01:00
Nick Craig-Wood
bc3e8e1abd Add skbeh to contributors 2025-08-26 18:03:09 +01:00
Nick Craig-Wood
30e80d0716 Add Tilman Vogel to contributors 2025-08-26 18:03:09 +01:00
albertony
f288920696 docs: fix incorrectly escaped windows path separators 2025-08-26 14:29:33 +02:00
albertony
fa2bbd705c build: restore error handling in gendocs 2025-08-26 14:28:05 +02:00
skbeh
43a794860f combine: propagate SlowHash feature 2025-08-26 12:39:32 +01:00
albertony
adfe6b3bad docs/oracleobjectstorage: add introduction before external links and remove broken link 2025-08-26 12:04:00 +02:00
albertony
091ccb649c docs: fix markdown lint issues in backend docs 2025-08-26 12:04:00 +02:00
albertony
2e02d49578 docs: fix markdown lint issues in command docs 2025-08-26 12:04:00 +02:00
albertony
514535ad46 docs: update markdown code block json indent size 2 2025-08-26 12:04:00 +02:00
Tilman Vogel
b010591c96 mount: do not log successful unmount as an error - fixes #8766 2025-08-23 16:30:33 +01:00
Nick Craig-Wood
1aaee9edce Start v1.72.0-DEV development 2025-08-22 17:42:25 +01:00
225 changed files with 5176 additions and 2824 deletions

View File

@@ -100,7 +100,7 @@ jobs:
fetch-depth: 0
- name: Install Go
uses: actions/setup-go@v5
uses: actions/setup-go@v6
with:
go-version: ${{ matrix.go }}
check-latest: true
@@ -222,7 +222,7 @@ jobs:
- name: Install Go
id: setup-go
uses: actions/setup-go@v5
uses: actions/setup-go@v6
with:
go-version: '>=1.24.0-rc.1'
check-latest: true
@@ -311,7 +311,7 @@ jobs:
# Upgrade together with NDK version
- name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v6
with:
go-version: '>=1.25.0-rc.1'

View File

@@ -92,7 +92,7 @@ jobs:
# There's no way around this, because "ImageOS" is only available to
# processes, but the setup-go action uses it in its key.
id: imageos
uses: actions/github-script@v7
uses: actions/github-script@v8
with:
result-encoding: string
script: |

View File

@@ -100,6 +100,7 @@ compiletest:
check: rclone
@echo "-- START CODE QUALITY REPORT -------------------------------"
@golangci-lint run $(LINTTAGS) ./...
@bin/markdown-lint
@echo "-- END CODE QUALITY REPORT ---------------------------------"
# Get the build dependencies
@@ -144,9 +145,11 @@ MANUAL.txt: MANUAL.md
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
commanddocs: rclone
go generate ./lib/transform
-@rmdir -p '$$HOME/.config/rclone'
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
go run bin/make_bisync_docs.go ./docs/content/
backenddocs: rclone bin/make_backend_docs.py
-@rmdir -p '$$HOME/.config/rclone'

View File

@@ -59,6 +59,7 @@ directories to and from different cloud storage providers.
- Internet Archive [:page_facing_up:](https://rclone.org/internetarchive/)
- Jottacloud [:page_facing_up:](https://rclone.org/jottacloud/)
- IBM COS S3 [:page_facing_up:](https://rclone.org/s3/#ibm-cos-s3)
- Intercolo Object Storage [:page_facing_up:](https://rclone.org/s3/#intercolo)
- IONOS Cloud [:page_facing_up:](https://rclone.org/s3/#ionos)
- Koofr [:page_facing_up:](https://rclone.org/koofr/)
- Leviia Object Storage [:page_facing_up:](https://rclone.org/s3/#leviia)

View File

@@ -1 +1 @@
v1.71.0
v1.72.0

View File

@@ -1338,9 +1338,9 @@ func (f *Fs) containerOK(container string) bool {
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
if !f.containerOK(containerName) {
return nil, fs.ErrorDirNotFound
return fs.ErrorDirNotFound
}
err = f.list(ctx, containerName, directory, prefix, addContainer, false, int32(f.opt.ListChunkSize), func(remote string, object *container.BlobItem, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
@@ -1348,16 +1348,16 @@ func (f *Fs) listDir(ctx context.Context, containerName, directory, prefix strin
return err
}
if entry != nil {
entries = append(entries, entry)
return callback(entry)
}
return nil
})
if err != nil {
return nil, err
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(containerName)
return entries, nil
return nil
}
// listContainers returns all the containers to out
@@ -1393,14 +1393,47 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
return fs.ErrorListBucketRequired
}
return f.listContainers(ctx)
entries, err := f.listContainers(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
if err != nil {
return err
}
}
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
return list.Flush()
}
// ListR lists the objects and directories of the Fs starting
@@ -3156,6 +3189,7 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.Purger = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}

View File

@@ -847,7 +847,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *api.File
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
last := ""
err = f.list(ctx, bucket, directory, prefix, f.rootBucket == "", false, 0, f.opt.Versions, false, func(remote string, object *api.File, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory, &last)
@@ -855,16 +855,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return err
}
if entry != nil {
entries = append(entries, entry)
return callback(entry)
}
return nil
})
if err != nil {
return nil, err
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return entries, nil
return nil
}
// listBuckets returns all the buckets to out
@@ -890,14 +890,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
}
return f.listBuckets(ctx)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
return list.Flush()
}
// ListR lists the objects and directories of the Fs starting
@@ -2428,6 +2460,7 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.PublicLinker = &Fs{}
_ fs.OpenChunkWriter = &Fs{}
_ fs.Commander = &Fs{}

View File

@@ -125,10 +125,21 @@ type FolderItems struct {
Offset int `json:"offset"`
Limit int `json:"limit"`
NextMarker *string `json:"next_marker,omitempty"`
Order []struct {
By string `json:"by"`
Direction string `json:"direction"`
} `json:"order"`
// There is some confusion about how this is actually
// returned. The []struct has worked for many years, but in
// https://github.com/rclone/rclone/issues/8776 box was
// returning it returned not as a list. We don't actually use
// this so comment it out.
//
// Order struct {
// By string `json:"by"`
// Direction string `json:"direction"`
// } `json:"order"`
//
// Order []struct {
// By string `json:"by"`
// Direction string `json:"direction"`
// } `json:"order"`
}
// Parent defined the ID of the parent directory

View File

@@ -241,18 +241,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
DirModTimeUpdatesOnWrite: true,
PartialUploads: true,
}).Fill(ctx, f)
canMove := true
canMove, slowHash := true, false
for _, u := range f.upstreams {
features = features.Mask(ctx, u.f) // Mask all upstream fs
if !operations.CanServerSideMove(u.f) {
canMove = false
}
slowHash = slowHash || u.f.Features().SlowHash
}
// We can move if all remotes support Move or Copy
if canMove {
features.Move = f.Move
}
// If any of upstreams are SlowHash, propagate it
features.SlowHash = slowHash
// Enable ListR when upstreams either support ListR or is local
// But not when all upstreams are local
if features.ListR == nil {

View File

@@ -760,7 +760,7 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *storage.
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
// List the objects
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, object *storage.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
@@ -768,16 +768,16 @@ func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addB
return err
}
if entry != nil {
entries = append(entries, entry)
return callback(entry)
}
return nil
})
if err != nil {
return nil, err
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return entries, err
return err
}
// listBuckets lists the buckets
@@ -820,14 +820,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
}
return f.listBuckets(ctx)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
return list.Flush()
}
// ListR lists the objects and directories of the Fs starting
@@ -1462,6 +1494,7 @@ var (
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -590,7 +590,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
return "", err
}
bucket, bucketPath := f.split(remote)
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, rest.URLPathEscapeAll(bucketPath)), nil
}
// Copy src to this remote using server-side copy operations.
@@ -622,7 +622,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
"x-archive-auto-make-bucket": "1",
"x-archive-queue-derive": "0",
"x-archive-keep-old-version": "0",
"x-amz-copy-source": quotePath(path.Join("/", srcBucket, srcPath)),
"x-amz-copy-source": rest.URLPathEscapeAll(path.Join("/", srcBucket, srcPath)),
"x-amz-metadata-directive": "COPY",
"x-archive-filemeta-sha1": srcObj.sha1,
"x-archive-filemeta-md5": srcObj.md5,
@@ -778,7 +778,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
// make a GET request to (frontend)/download/:item/:path
opts := rest.Opts{
Method: "GET",
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
Path: path.Join("/download/", o.fs.root, rest.URLPathEscapeAll(o.fs.opt.Enc.FromStandardPath(o.remote))),
Options: optionsFixed,
}
err = o.fs.pacer.Call(func() (bool, error) {
@@ -1334,16 +1334,6 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
}
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
func quotePath(s string) string {
seg := strings.Split(s, "/")
newValues := []string{}
for _, v := range seg {
newValues = append(newValues, url.QueryEscape(v))
}
return strings.Join(newValues, "/")
}
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}

View File

@@ -325,13 +325,12 @@ func (f *Fs) list(ctx context.Context, bucket, directory, prefix string, addBuck
}
// listDir lists the bucket to the entries
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
// List the objects and directories
err = f.list(ctx, bucket, directory, prefix, addBucket, false, func(remote string, entry fs.DirEntry, isDirectory bool) error {
entries = append(entries, entry)
return nil
return callback(entry)
})
return entries, err
return err
}
// listBuckets lists the buckets to entries
@@ -354,15 +353,46 @@ func (f *Fs) listBuckets(ctx context.Context) (entries fs.DirEntries, err error)
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
// defer fslog.Trace(dir, "")("entries = %q, err = %v", &entries, &err)
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucket, directory := f.split(dir)
if bucket == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
}
return f.listBuckets(ctx)
}
return f.listDir(ctx, bucket, directory, f.rootDirectory, f.rootBucket == "")
return list.Flush()
}
// ListR lists the objects and directories of the Fs starting
@@ -629,6 +659,7 @@ var (
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

View File

@@ -254,15 +254,47 @@ func (f *Fs) split(rootRelativePath string) (bucketName, bucketPath string) {
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
bucketName, directory := f.split(dir)
fs.Debugf(f, "listing: bucket : %v, directory: %v", bucketName, dir)
if bucketName == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
return fs.ErrorListBucketRequired
}
entries, err := f.listBuckets(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "", list.Add)
if err != nil {
return err
}
return f.listBuckets(ctx)
}
return f.listDir(ctx, bucketName, directory, f.rootDirectory, f.rootBucket == "")
return list.Flush()
}
// listFn is called from list to handle an object.
@@ -411,24 +443,24 @@ func (f *Fs) itemToDirEntry(ctx context.Context, remote string, object *objectst
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(ctx context.Context, bucket, directory, prefix string, addBucket bool, callback func(fs.DirEntry) error) (err error) {
fn := func(remote string, object *objectstorage.ObjectSummary, isDirectory bool) error {
entry, err := f.itemToDirEntry(ctx, remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
return callback(entry)
}
return nil
}
err = f.list(ctx, bucket, directory, prefix, addBucket, false, 0, fn)
if err != nil {
return nil, err
return err
}
// bucket must be present if listing succeeded
f.cache.MarkOK(bucket)
return entries, nil
return nil
}
// listBuckets returns all the buckets to out
@@ -765,6 +797,7 @@ var (
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Commander = &Fs{}
_ fs.CleanUpper = &Fs{}
_ fs.OpenChunkWriter = &Fs{}

View File

@@ -119,6 +119,9 @@ var providerOption = fs.Option{
}, {
Value: "IDrive",
Help: "IDrive e2",
}, {
Value: "Intercolo",
Help: "Intercolo Object Storage",
}, {
Value: "IONOS",
Help: "IONOS Cloud",
@@ -504,6 +507,14 @@ func init() {
Value: "us-east-1",
Help: "Indore, Madhya Pradesh, India",
}},
}, {
Name: "region",
Help: "Region where your bucket will be created and your data stored.\n",
Provider: "Intercolo",
Examples: []fs.OptionExample{{
Value: "de-fra",
Help: "Frankfurt, Germany",
}},
}, {
Name: "region",
Help: "Region where your bucket will be created and your data stored.\n",
@@ -643,7 +654,7 @@ func init() {
}, {
Name: "region",
Help: "Region to connect to.\n\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,IONOS,Petabox,Liara,Linode,Magalu,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
Provider: "!AWS,Alibaba,ArvanCloud,ChinaMobile,Cloudflare,FlashBlade,Intercolo,IONOS,Petabox,Liara,Linode,Magalu,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,Storj,Synology,TencentCOS,HuaweiOBS,IDrive,Mega,Zata",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure.\nWill use v4 signatures and an empty region.",
@@ -954,6 +965,14 @@ func init() {
Value: "s3.private.sng01.cloud-object-storage.appdomain.cloud",
Help: "Singapore Single Site Private Endpoint",
}},
}, {
Name: "endpoint",
Help: "Endpoint for Intercolo Object Storage.",
Provider: "Intercolo",
Examples: []fs.OptionExample{{
Value: "de-fra.i3storage.com",
Help: "Frankfurt, Germany",
}},
}, {
Name: "endpoint",
Help: "Endpoint for IONOS S3 Object Storage.\n\nSpecify the endpoint from the same region.",
@@ -1532,7 +1551,7 @@ func init() {
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\n\nRequired when using an S3 clone.",
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,OVHcloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata",
Provider: "!AWS,ArvanCloud,IBMCOS,IDrive,Intercolo,IONOS,TencentCOS,HuaweiOBS,Alibaba,ChinaMobile,GCS,Liara,Linode,LyveCloud,Magalu,OVHcloud,Scaleway,Selectel,StackPath,Storj,Synology,RackCorp,Qiniu,Petabox,Zata",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
@@ -2067,7 +2086,7 @@ func init() {
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\n\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,IONOS,Leviia,Liara,Linode,Magalu,Outscale,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
Provider: "!AWS,Alibaba,ArvanCloud,HuaweiOBS,ChinaMobile,Cloudflare,FlashBlade,IBMCOS,IDrive,Intercolo,IONOS,Leviia,Liara,Linode,Magalu,Outscale,OVHcloud,Qiniu,RackCorp,Scaleway,Selectel,StackPath,Storj,TencentCOS,Petabox,Mega",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
@@ -3677,6 +3696,9 @@ func setQuirks(opt *Options) {
case "IDrive":
virtualHostStyle = false
useAlreadyExists = false // untested
case "Intercolo":
// no quirks
useUnsignedPayload = false // Intercolo has trailer support
case "IONOS":
// listObjectsV2 supported - https://api.ionos.com/docs/s3/#Basic-Operations-get-Bucket-list-type-2
virtualHostStyle = false

View File

@@ -773,21 +773,20 @@ func (f *Fs) list(ctx context.Context, container, directory, prefix string, addC
}
// listDir lists a single directory
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool) (entries fs.DirEntries, err error) {
func (f *Fs) listDir(ctx context.Context, container, directory, prefix string, addContainer bool, callback func(fs.DirEntry) error) (err error) {
if container == "" {
return nil, fs.ErrorListBucketRequired
return fs.ErrorListBucketRequired
}
// List the objects
err = f.list(ctx, container, directory, prefix, addContainer, false, false, func(entry fs.DirEntry) error {
entries = append(entries, entry)
return nil
return callback(entry)
})
if err != nil {
return nil, err
return err
}
// container must be present if listing succeeded
f.cache.MarkOK(container)
return entries, nil
return nil
}
// listContainers lists the containers
@@ -818,14 +817,46 @@ func (f *Fs) listContainers(ctx context.Context) (entries fs.DirEntries, err err
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(ctx context.Context, dir string) (entries fs.DirEntries, err error) {
return list.WithListP(ctx, dir, f)
}
// ListP lists the objects and directories of the Fs starting
// from dir non recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
func (f *Fs) ListP(ctx context.Context, dir string, callback fs.ListRCallback) error {
list := list.NewHelper(callback)
container, directory := f.split(dir)
if container == "" {
if directory != "" {
return nil, fs.ErrorListBucketRequired
return fs.ErrorListBucketRequired
}
entries, err := f.listContainers(ctx)
if err != nil {
return err
}
for _, entry := range entries {
err = list.Add(entry)
if err != nil {
return err
}
}
} else {
err := f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "", list.Add)
if err != nil {
return err
}
return f.listContainers(ctx)
}
return f.listDir(ctx, container, directory, f.rootDirectory, f.rootContainer == "")
return list.Flush()
}
// ListR lists the objects and directories of the Fs starting
@@ -1650,6 +1681,7 @@ var (
_ fs.PutStreamer = &Fs{}
_ fs.Copier = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.ListPer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)

159
bin/make_bisync_docs.go Normal file
View File

@@ -0,0 +1,159 @@
//go:build ignore
package main
import (
"bytes"
"cmp"
"context"
"encoding/json"
"flag"
"fmt"
"os"
"path/filepath"
"slices"
"strings"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/operations"
"github.com/rclone/rclone/fstest/runs"
"github.com/stretchr/testify/assert/yaml"
)
var path = flag.String("path", "./docs/content/", "root path")
const (
configFile = "fstest/test_all/config.yaml"
startListIgnores = "<!--- start list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
endListIgnores = "<!--- end list_ignores - DO NOT EDIT THIS SECTION - use make commanddocs --->"
startListFailures = "<!--- start list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
endListFailures = "<!--- end list_failures - DO NOT EDIT THIS SECTION - use make commanddocs --->"
integrationTestsJSONURL = "https://pub.rclone.org/integration-tests/current/index.json"
integrationTestsHTMLURL = "https://pub.rclone.org/integration-tests/current/"
)
func main() {
err := replaceBetween(*path, startListIgnores, endListIgnores, getIgnores)
if err != nil {
fs.Errorf(*path, "error replacing ignores: %v", err)
}
err = replaceBetween(*path, startListFailures, endListFailures, getFailures)
if err != nil {
fs.Errorf(*path, "error replacing failures: %v", err)
}
}
// replaceBetween replaces the text between startSep and endSep with fn()
func replaceBetween(path, startSep, endSep string, fn func() (string, error)) error {
b, err := os.ReadFile(filepath.Join(path, "bisync.md"))
if err != nil {
return err
}
doc := string(b)
before, after, found := strings.Cut(doc, startSep)
if !found {
return fmt.Errorf("could not find: %v", startSep)
}
_, after, found = strings.Cut(after, endSep)
if !found {
return fmt.Errorf("could not find: %v", endSep)
}
replaceSection, err := fn()
if err != nil {
return err
}
newDoc := before + startSep + "\n" + strings.TrimSpace(replaceSection) + "\n" + endSep + after
err = os.WriteFile(filepath.Join(path, "bisync.md"), []byte(newDoc), 0777)
if err != nil {
return err
}
return nil
}
// getIgnores updates the list of ignores from config.yaml
func getIgnores() (string, error) {
config, err := parseConfig()
if err != nil {
return "", fmt.Errorf("failed to parse config: %v", err)
}
s := ""
slices.SortFunc(config.Backends, func(a, b runs.Backend) int {
return cmp.Compare(a.Remote, b.Remote)
})
for _, backend := range config.Backends {
include := false
if slices.Contains(backend.IgnoreTests, "cmd/bisync") {
include = true
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
}
for _, ignore := range backend.Ignore {
if strings.Contains(strings.ToLower(ignore), "bisync") {
if !include { // don't have header row yet
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(backend.Remote, ":"), backend.Backend)
}
include = true
s += fmt.Sprintf(" - `%s`\n", ignore)
// TODO: might be neat to add a "reason" param displaying the reason the test is ignored
}
}
}
return s, nil
}
// getFailures updates the list of currently failing tests from the integration tests server
func getFailures() (string, error) {
var buf bytes.Buffer
err := operations.CopyURLToWriter(context.Background(), integrationTestsJSONURL, &buf)
if err != nil {
return "", err
}
r := runs.Report{}
err = json.Unmarshal(buf.Bytes(), &r)
if err != nil {
return "", fmt.Errorf("failed to unmarshal json: %v", err)
}
s := ""
for _, run := range r.Failed {
for i, t := range run.FailedTests {
if strings.Contains(strings.ToLower(t), "bisync") {
if i == 0 { // don't have header row yet
s += fmt.Sprintf("- `%s` (`%s`)\n", strings.TrimSuffix(run.Remote, ":"), run.Backend)
}
url := integrationTestsHTMLURL + run.TrialName
url = url[:len(url)-5] + "1.txt" // numbers higher than 1 could change from night to night
s += fmt.Sprintf(" - [`%s`](%v)\n", t, url)
if i == 4 && len(run.FailedTests) > 5 { // stop after 5
s += fmt.Sprintf(" - [%v more](%v)\n", len(run.FailedTests)-5, integrationTestsHTMLURL)
break
}
}
}
}
s += fmt.Sprintf("- Updated: %v", r.DateTime)
return s, nil
}
// parseConfig reads and parses the config.yaml file
func parseConfig() (*runs.Config, error) {
d, err := os.ReadFile(configFile)
if err != nil {
return nil, fmt.Errorf("failed to read config file: %w", err)
}
config := &runs.Config{}
err = yaml.Unmarshal(d, &config)
if err != nil {
return nil, fmt.Errorf("failed to parse config file: %w", err)
}
return config, nil
}

17
bin/markdown-lint Executable file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env bash
#
# Run markdown linting locally
set -e
# Workflow
build=.github/workflows/build.yml
# Globs read from from $build
globs=$(awk '/- name: Check Markdown format/{f=1;next} f && /globs:/{f=2;next} f==2 && NF{if($1=="-"){exit} print $0}' $build)
if [ -z "$globs" ]; then
echo "Error: No globs found in Check Markdown step in $build" >&2
exit 1
fi
docker run -v $PWD:/workdir --user $(id -u):$(id -g) davidanson/markdownlint-cli2 $globs

View File

@@ -51,47 +51,52 @@ output. The output is typically used, free, quota and trash contents.
E.g. Typical output from ` + "`rclone about remote:`" + ` is:
Total: 17 GiB
Used: 7.444 GiB
Free: 1.315 GiB
Trashed: 100.000 MiB
Other: 8.241 GiB
` + "```text" + `
Total: 17 GiB
Used: 7.444 GiB
Free: 1.315 GiB
Trashed: 100.000 MiB
Other: 8.241 GiB
` + "```" + `
Where the fields are:
* Total: Total size available.
* Used: Total size used.
* Free: Total space available to this user.
* Trashed: Total space used by trash.
* Other: Total amount in other storage (e.g. Gmail, Google Photos).
* Objects: Total number of objects in the storage.
- Total: Total size available.
- Used: Total size used.
- Free: Total space available to this user.
- Trashed: Total space used by trash.
- Other: Total amount in other storage (e.g. Gmail, Google Photos).
- Objects: Total number of objects in the storage.
All sizes are in number of bytes.
Applying a ` + "`--full`" + ` flag to the command prints the bytes in full, e.g.
Total: 18253611008
Used: 7993453766
Free: 1411001220
Trashed: 104857602
Other: 8849156022
` + "```text" + `
Total: 18253611008
Used: 7993453766
Free: 1411001220
Trashed: 104857602
Other: 8849156022
` + "```" + `
A ` + "`--json`" + ` flag generates conveniently machine-readable output, e.g.
{
"total": 18253611008,
"used": 7993453766,
"trashed": 104857602,
"other": 8849156022,
"free": 1411001220
}
` + "```json" + `
{
"total": 18253611008,
"used": 7993453766,
"trashed": 104857602,
"other": 8849156022,
"free": 1411001220
}
` + "```" + `
Not all backends print all fields. Information is not included if it is not
provided by a backend. Where the value is unlimited it is omitted.
Some backends does not support the ` + "`rclone about`" + ` command at all,
see complete list in [documentation](https://rclone.org/overview/#optional-features).
`,
see complete list in [documentation](https://rclone.org/overview/#optional-features).`,
Annotations: map[string]string{
"versionIntroduced": "v1.41",
// "groups": "",

View File

@@ -30,14 +30,16 @@ rclone from a machine with a browser - use as instructed by
rclone config.
The command requires 1-3 arguments:
- fs name (e.g., "drive", "s3", etc.)
- Either a base64 encoded JSON blob obtained from a previous rclone config session
- Or a client_id and client_secret pair obtained from the remote service
- fs name (e.g., "drive", "s3", etc.)
- Either a base64 encoded JSON blob obtained from a previous rclone config session
- Or a client_id and client_secret pair obtained from the remote service
Use --auth-no-open-browser to prevent rclone to open auth
link in default browser automatically.
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
Use --template to generate HTML output via a custom Go template. If a blank
string is provided as an argument to this flag, the default template is used.`,
Annotations: map[string]string{
"versionIntroduced": "v1.27",
},

View File

@@ -37,26 +37,33 @@ see the backend docs for definitions.
You can discover what commands a backend implements by using
rclone backend help remote:
rclone backend help <backendname>
` + "```sh" + `
rclone backend help remote:
rclone backend help <backendname>
` + "```" + `
You can also discover information about the backend using (see
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
for more info).
rclone backend features remote:
` + "```sh" + `
rclone backend features remote:
` + "```" + `
Pass options to the backend command with -o. This should be key=value or key, e.g.:
rclone backend stats remote:path stats -o format=json -o long
` + "```sh" + `
rclone backend stats remote:path stats -o format=json -o long
` + "```" + `
Pass arguments to the backend by placing them on the end of the line
rclone backend cleanup remote:path file1 file2 file3
` + "```sh" + `
rclone backend cleanup remote:path file1 file2 file3
` + "```" + `
Note to run these commands on a running backend then see
[backend/command](/rc/#backend-command) in the rc docs.
`,
[backend/command](/rc/#backend-command) in the rc docs.`,
Annotations: map[string]string{
"versionIntroduced": "v1.52",
"groups": "Important",

View File

@@ -177,6 +177,7 @@ var (
// "src and dst identical but can't set mod time without deleting and re-uploading"
argRefreshTimes = flag.Bool("refresh-times", false, "Force refreshing the target modtime, useful for Dropbox (default: false)")
ignoreLogs = flag.Bool("ignore-logs", false, "skip comparing log lines but still compare listings")
argPCount = flag.Int("pcount", 2, "number of parallel subtests to run for TestBisyncConcurrent") // go test ./cmd/bisync -race -pcount 10
)
// bisyncTest keeps all test data in a single place
@@ -284,6 +285,15 @@ func TestBisyncConcurrent(t *testing.T) {
if !isLocal(*fstest.RemoteName) {
t.Skip("TestBisyncConcurrent is skipped on non-local")
}
if *argTestCase != "" && *argTestCase != "basic" {
t.Skip("TestBisyncConcurrent only tests 'basic'")
}
if *argPCount < 2 {
t.Skip("TestBisyncConcurrent is pointless with -pcount < 2")
}
if *argGolden {
t.Skip("skip TestBisyncConcurrent when goldenizing")
}
oldArgTestCase := argTestCase
*argTestCase = "basic"
*ignoreLogs = true // not useful to compare logs here because both runs will be logging at once
@@ -292,8 +302,9 @@ func TestBisyncConcurrent(t *testing.T) {
*ignoreLogs = false
})
t.Run("test1", testParallel)
t.Run("test2", testParallel)
for i := 0; i < *argPCount; i++ {
t.Run(fmt.Sprintf("test%v", i), testParallel)
}
}
func testParallel(t *testing.T) {
@@ -465,6 +476,7 @@ func (b *bisyncTest) runTestCase(ctx context.Context, t *testing.T, testCase str
// Prepare initial content
b.cleanupCase(ctx)
ctx = accounting.WithStatsGroup(ctx, random.String(8))
fstest.CheckListingWithPrecision(b.t, b.fs1, []fstest.Item{}, []string{}, b.fs1.Precision()) // verify starting from empty
fstest.CheckListingWithPrecision(b.t, b.fs2, []fstest.Item{}, []string{}, b.fs2.Precision())
initFs, err := cache.Get(ctx, b.initDir)
@@ -641,12 +653,11 @@ func (b *bisyncTest) cleanupCase(ctx context.Context) {
_ = operations.Purge(ctx, b.fs1, "")
_ = operations.Purge(ctx, b.fs2, "")
_ = os.RemoveAll(b.workDir)
accounting.Stats(ctx).ResetCounters()
}
func (b *bisyncTest) runTestStep(ctx context.Context, line string) (err error) {
var fsrc, fdst fs.Fs
accounting.Stats(ctx).ResetErrors()
ctx = accounting.WithStatsGroup(ctx, random.String(8))
b.logPrintf("%s %s", color(terminal.CyanFg, b.stepStr), color(terminal.BlueFg, line))
ci := fs.GetConfig(ctx)
@@ -1007,6 +1018,7 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
}
// test if modtimes are writeable
testSetModtime := func(f fs.Fs) {
ctx := accounting.WithStatsGroup(ctx, random.String(8)) // keep stats separate
in := bytes.NewBufferString("modtime_write_test")
objinfo := object.NewStaticObjectInfo("modtime_write_test", initDate, int64(len("modtime_write_test")), true, nil, nil)
obj, err := f.Put(ctx, in, objinfo)
@@ -1018,6 +1030,11 @@ func (b *bisyncTest) checkPreReqs(ctx context.Context, opt *bisync.Options) (con
if err == fs.ErrorCantSetModTime {
b.t.Skip("skipping test as at least one remote does not support setting modtime")
}
if err == fs.ErrorCantSetModTimeWithoutDelete { // transfers stats expected to differ on this backend
logReplacements = append(logReplacements, `^.*There was nothing to transfer.*$`, dropMe)
} else {
require.NoError(b.t, err)
}
if !f.Features().IsLocal {
time.Sleep(time.Second) // avoid GoogleCloudStorage Error 429 rateLimitExceeded
}
@@ -1619,6 +1636,14 @@ func (b *bisyncTest) mangleResult(dir, file string, golden bool) string {
`^.*not equal on recheck.*$`, dropMe,
)
}
if b.ignoreBlankHash || !b.fs1.Hashes().Contains(hash.MD5) || !b.fs2.Hashes().Contains(hash.MD5) {
// if either side lacks support for md5, need to ignore the "nothing to transfer" log,
// as sync may in fact need to transfer, where it would otherwise skip based on hash or just update modtime.
// transfer stats will also differ in fs.ErrorCantSetModTimeWithoutDelete scenario, and where --download-hash is needed.
logReplacements = append(logReplacements,
`^.*There was nothing to transfer.*$`, dropMe,
)
}
rep := logReplacements
if b.testCase == "dry_run" {
rep = append(rep, dryrunReplacements...)

View File

@@ -51,14 +51,15 @@ var longHelp = shortHelp + makeHelp(`
bidirectional cloud sync solution in rclone.
It retains the Path1 and Path2 filesystem listings from the prior run.
On each successive run it will:
- list files on Path1 and Path2, and check for changes on each side.
Changes include |New|, |Newer|, |Older|, and |Deleted| files.
- Propagate changes on Path1 to Path2, and vice-versa.
Bisync is considered an **advanced command**, so use with care.
Make sure you have read and understood the entire [manual](https://rclone.org/bisync)
(especially the [Limitations](https://rclone.org/bisync/#limitations) section) before using,
or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/).
(especially the [Limitations](https://rclone.org/bisync/#limitations) section)
before using, or data loss can result. Questions can be asked in the
[Rclone Forum](https://forum.rclone.org/).
See [full bisync description](https://rclone.org/bisync/) for details.
`)
See [full bisync description](https://rclone.org/bisync/) for details.`)

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -59,6 +61,7 @@ INFO : - Path1 Queue copy to Path2 - {
INFO : - Path1 Queue copy to Path2 - {path2/}file1.txt
INFO : - Path1 Queue copy to Path2 - {path2/}subdir/file20.txt
INFO : - Path1 Do queued copies to - Path2
INFO : There was nothing to transfer
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -133,6 +136,7 @@ INFO : - Path1 Queue copy to Path2 - {
INFO : - Path1 Queue copy to Path2 - {path2/}file1.txt
INFO : - Path1 Queue copy to Path2 - {path2/}subdir/file20.txt
INFO : - Path1 Do queued copies to - Path2
INFO : There was nothing to transfer
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -87,6 +89,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"

View File

@@ -21,7 +21,9 @@ INFO : Using filters file {workdir/}exclude-other-filtersfile.txt
INFO : Storing filters file hash to {workdir/}exclude-other-filtersfile.txt.{hashtype}
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -136,7 +138,9 @@ INFO : Using filters file {workdir/}include-other-filtersfile.txt
INFO : Storing filters file hash to {workdir/}include-other-filtersfile.txt.{hashtype}
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -90,7 +92,9 @@ INFO : Copying Path2 files to Path1
INFO : Checking access health
INFO : Found 2 matching ".chk_file" files on both paths
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -102,7 +104,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -15,7 +15,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -15,7 +15,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -23,7 +23,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -80,7 +82,7 @@ INFO : Path2 checking for diffs
INFO : Applying changes
INFO : - Path1 Queue copy to Path2 - {path2/}subdir
INFO : - Path1 Do queued copies to - Path2
INFO : subdir: Making directory
INFO : There was nothing to transfer
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -124,6 +126,7 @@ INFO : Path2: 1 changes:  0 new,  0 modified, 
INFO : Applying changes
INFO : - Path2 Queue delete - {path2/}RCLONE_TEST
INFO : - Path1 Do queued copies to - Path2
INFO : There was nothing to transfer
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -148,7 +151,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -188,6 +193,7 @@ INFO : Path2 checking for diffs
INFO : Applying changes
INFO : - Path2 Queue delete - {path2/}subdir
INFO : - Path1 Do queued copies to - Path2
INFO : There was nothing to transfer
INFO : subdir: Removing directory
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -27,7 +27,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}測試Русский ěáñ/" with Path2 "{path2/}測試Русский ěáñ/"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}測試Русский ěáñ/" vs Path2 "{path2/}測試Русский ěáñ/"
INFO : Bisync successful
@@ -84,7 +86,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -174,7 +178,9 @@ INFO : Using filters file {workdir/}測試_filtersfile.txt
INFO : Storing filters file hash to {workdir/}測試_filtersfile.txt.{hashtype}
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -20,7 +20,9 @@ INFO : Using filters file {workdir/}filtersfile.flt
INFO : Storing filters file hash to {workdir/}filtersfile.flt.{hashtype}
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -81,7 +83,9 @@ INFO : Using filters file {workdir/}filtersfile.txt
INFO : Storing filters file hash to {workdir/}filtersfile.txt.{hashtype}
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -146,7 +150,9 @@ INFO : Using filters file {workdir/}filtersfile.txt
INFO : Skipped storing filters file hash to {workdir/}filtersfile.txt.{hashtype} as --dry-run is set
INFO : Copying Path2 files to Path1
NOTICE: - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
NOTICE: - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Bisync successful

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -33,7 +35,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -84,6 +86,7 @@ INFO : - Path2 Queue delete - {
INFO : - Path2 Queue delete - {path2/}file4.txt
INFO : - Path2 Queue delete - {path2/}file5.txt
INFO : - Path1 Do queued copies to - Path2
INFO : There was nothing to transfer
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -84,6 +86,7 @@ INFO : - Path1 Queue delete - {
INFO : - Path1 Queue delete - {path1/}file4.txt
INFO : - Path1 Queue delete - {path1/}file5.txt
INFO : - Path2 Do queued copies to - Path1
INFO : There was nothing to transfer
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -15,7 +15,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -17,7 +17,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -115,7 +117,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -154,6 +158,7 @@ INFO : Applying changes
INFO : - Path2 Queue copy to Path1 - {path1/}file2.txt
INFO : - Path2 Queue copy to Path1 - {path1/}subdir/file21.txt
INFO : - Path2 Do queued copies to - Path1
INFO : There was nothing to transfer
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -171,6 +176,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -39,6 +39,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"

View File

@@ -22,6 +22,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
@@ -129,6 +130,7 @@ INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : file1.txt: Path1 is smaller. Path1: 33, Path2: 42, Difference: 9
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : file1.txt: Path1 is smaller. Path1: 33, Path2: 42, Difference: 9
INFO : Resync updating listings
@@ -158,6 +160,7 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"

View File

@@ -16,7 +16,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful
@@ -45,6 +47,7 @@ INFO : Path2 checking for diffs
INFO : Applying changes
INFO : - Path2 Queue delete - {path2/}subdir/file20.txt
INFO : - Path1 Do queued copies to - Path2
INFO : There was nothing to transfer
INFO : Updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -15,7 +15,9 @@ INFO : Bisyncing with Comparison Settings:
INFO : Synching Path1 "{path1/}" with Path2 "{path2/}"
INFO : Copying Path2 files to Path1
INFO : - Path2 Resync is copying files to - Path1
INFO : There was nothing to transfer
INFO : - Path1 Resync is copying files to - Path2
INFO : There was nothing to transfer
INFO : Resync updating listings
INFO : Validating listings for Path1 "{path1/}" vs Path2 "{path2/}"
INFO : Bisync successful

View File

@@ -43,15 +43,21 @@ var commandDefinition = &cobra.Command{
You can use it like this to output a single file
rclone cat remote:path/to/file
|||sh
rclone cat remote:path/to/file
|||
Or like this to output any file in dir or its subdirectories.
rclone cat remote:path/to/dir
|||sh
rclone cat remote:path/to/dir
|||
Or like this to output any .txt files in dir or its subdirectories.
rclone --include "*.txt" cat remote:path/to/dir
|||sh
rclone --include "*.txt" cat remote:path/to/dir
|||
Use the |--head| flag to print characters only at the start, |--tail| for
the end and |--offset| and |--count| to print a section in the middle.
@@ -62,14 +68,17 @@ Use the |--separator| flag to print a separator value between files. Be sure to
shell-escape special characters. For example, to print a newline between
files, use:
* bash:
- bash:
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|||sh
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|||
* powershell:
- powershell:
rclone --include "*.txt" --separator "|n" cat remote:path/to/dir
`, "|", "`"),
|||powershell
rclone --include "*.txt" --separator "|n" cat remote:path/to/dir
|||`, "|", "`"),
Annotations: map[string]string{
"versionIntroduced": "v1.33",
"groups": "Filter,Listing",

View File

@@ -74,8 +74,7 @@ you what happened to it. These are reminiscent of diff files.
- |! path| means there was an error reading or hashing the source or dest.
The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int)
option for more information.
`, "|", "`")
option for more information.`, "|", "`")
// GetCheckOpt gets the options corresponding to the check flags
func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err error) {

View File

@@ -17,8 +17,7 @@ var commandDefinition = &cobra.Command{
Use: "cleanup remote:path",
Short: `Clean up the remote if possible.`,
Long: `Clean up the remote if possible. Empty the trash or delete old file
versions. Not supported by all remotes.
`,
versions. Not supported by all remotes.`,
Annotations: map[string]string{
"versionIntroduced": "v1.31",
"groups": "Important",

View File

@@ -44,8 +44,7 @@ var configCommand = &cobra.Command{
Short: `Enter an interactive configuration session.`,
Long: `Enter an interactive configuration session where you can setup new
remotes and manage existing ones. You may also set or remove a
password to protect your configuration.
`,
password to protect your configuration.`,
Annotations: map[string]string{
"versionIntroduced": "v1.39",
},
@@ -134,9 +133,7 @@ sensitive info with XXX.
This makes the config file suitable for posting online for support.
It should be double checked before posting as the redaction may not be perfect.
`,
It should be double checked before posting as the redaction may not be perfect.`,
Annotations: map[string]string{
"versionIntroduced": "v1.64",
},
@@ -178,8 +175,8 @@ var configProvidersCommand = &cobra.Command{
var updateRemoteOpt config.UpdateRemoteOpt
var configPasswordHelp = strings.ReplaceAll(`
Note that if the config process would normally ask a question the
var configPasswordHelp = strings.ReplaceAll(
`Note that if the config process would normally ask a question the
default is taken (unless |--non-interactive| is used). Each time
that happens rclone will print or DEBUG a message saying how to
affect the value taken.
@@ -205,29 +202,29 @@ it.
This will look something like (some irrelevant detail removed):
|||
|||json
{
"State": "*oauth-islocal,teamdrive,,",
"Option": {
"Name": "config_is_local",
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
"Default": true,
"Examples": [
{
"Value": "true",
"Help": "Yes"
},
{
"Value": "false",
"Help": "No"
}
],
"Required": false,
"IsPassword": false,
"Type": "bool",
"Exclusive": true,
},
"Error": "",
"State": "*oauth-islocal,teamdrive,,",
"Option": {
"Name": "config_is_local",
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
"Default": true,
"Examples": [
{
"Value": "true",
"Help": "Yes"
},
{
"Value": "false",
"Help": "No"
}
],
"Required": false,
"IsPassword": false,
"Type": "bool",
"Exclusive": true,
},
"Error": "",
}
|||
@@ -250,7 +247,9 @@ The keys of |Option| are used as follows:
If |Error| is set then it should be shown to the user at the same
time as the question.
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
|||sh
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
|||
Note that when using |--continue| all passwords should be passed in
the clear (not obscured). Any default config values should be passed
@@ -264,8 +263,7 @@ not just the post config questions. Any parameters are used as
defaults for questions as usual.
Note that |bin/config.py| in the rclone source implements this protocol
as a readable demonstration.
`, "|", "`")
as a readable demonstration.`, "|", "`")
var configCreateCommand = &cobra.Command{
Use: "create name type [key value]*",
Short: `Create a new remote with name, type and options.`,
@@ -275,13 +273,18 @@ should be passed in pairs of |key| |value| or as |key=value|.
For example, to make a swift remote of name myremote using auto config
you would do:
rclone config create myremote swift env_auth true
rclone config create myremote swift env_auth=true
|||sh
rclone config create myremote swift env_auth true
rclone config create myremote swift env_auth=true
|||
So for example if you wanted to configure a Google Drive remote but
using remote authorization you would do this:
rclone config create mydrive drive config_is_local=false
|||sh
rclone config create mydrive drive config_is_local=false
|||
`, "|", "`") + configPasswordHelp,
Annotations: map[string]string{
"versionIntroduced": "v1.39",
@@ -344,13 +347,18 @@ pairs of |key| |value| or as |key=value|.
For example, to update the env_auth field of a remote of name myremote
you would do:
rclone config update myremote env_auth true
rclone config update myremote env_auth=true
|||sh
rclone config update myremote env_auth true
rclone config update myremote env_auth=true
|||
If the remote uses OAuth the token will be updated, if you don't
require this add an extra parameter thus:
rclone config update myremote env_auth=true config_refresh_token=false
|||sh
rclone config update myremote env_auth=true config_refresh_token=false
|||
`, "|", "`") + configPasswordHelp,
Annotations: map[string]string{
"versionIntroduced": "v1.39",
@@ -388,12 +396,13 @@ The |password| should be passed in in clear (unobscured).
For example, to set password of a remote of name myremote you would do:
rclone config password myremote fieldname mypassword
rclone config password myremote fieldname=mypassword
|||sh
rclone config password myremote fieldname mypassword
rclone config password myremote fieldname=mypassword
|||
This command is obsolete now that "config update" and "config create"
both support obscuring passwords directly.
`, "|", "`"),
both support obscuring passwords directly.`, "|", "`"),
Annotations: map[string]string{
"versionIntroduced": "v1.39",
},
@@ -441,8 +450,7 @@ var configReconnectCommand = &cobra.Command{
To disconnect the remote use "rclone config disconnect".
This normally means going through the interactive oauth flow again.
`,
This normally means going through the interactive oauth flow again.`,
RunE: func(command *cobra.Command, args []string) error {
ctx := context.Background()
cmd.CheckArgs(1, 1, command, args)
@@ -461,8 +469,7 @@ var configDisconnectCommand = &cobra.Command{
This normally means revoking the oauth token.
To reconnect use "rclone config reconnect".
`,
To reconnect use "rclone config reconnect".`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)
@@ -490,8 +497,7 @@ var configUserInfoCommand = &cobra.Command{
Use: "userinfo remote:",
Short: `Prints info about logged in user of remote.`,
Long: `This prints the details of the person logged in to the cloud storage
system.
`,
system.`,
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(1, 1, command, args)
f := cmd.NewFsSrc(args)
@@ -534,8 +540,7 @@ var configEncryptionCommand = &cobra.Command{
Use: "encryption",
Short: `set, remove and check the encryption for the config file`,
Long: `This command sets, clears and checks the encryption for the config file using
the subcommands below.
`,
the subcommands below.`,
}
var configEncryptionSetCommand = &cobra.Command{
@@ -559,8 +564,7 @@ variable to distinguish which password you must supply.
Alternatively you can remove the password first (with |rclone config
encryption remove|), then set it again with this command which may be
easier if you don't mind the unencrypted config file being on the disk
briefly.
`, "|", "`"),
briefly.`, "|", "`"),
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 0, command, args)
config.LoadedData()
@@ -580,8 +584,7 @@ If |--password-command| is in use, this will be called to supply the old config
password.
If the config was not encrypted then no error will be returned and
this command will do nothing.
`, "|", "`"),
this command will do nothing.`, "|", "`"),
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 0, command, args)
config.LoadedData()
@@ -600,8 +603,7 @@ It will attempt to decrypt the config using the password you supply.
If decryption fails it will return a non-zero exit code if using
|--password-command|, otherwise it will prompt again for the password.
If the config file is not encrypted it will return a non zero exit code.
`, "|", "`"),
If the config file is not encrypted it will return a non zero exit code.`, "|", "`"),
RunE: func(command *cobra.Command, args []string) error {
cmd.CheckArgs(0, 0, command, args)
config.LoadedData()

View File

@@ -31,18 +31,27 @@ var commandDefinition = &cobra.Command{
Use: "convmv dest:path --name-transform XXX",
Short: `Convert file and directory names in place.`,
// Warning¡ "¡" will be replaced by backticks below
Long: strings.ReplaceAll(`
convmv supports advanced path name transformations for converting and renaming files and directories by applying prefixes, suffixes, and other alterations.
Long: strings.ReplaceAll(`convmv supports advanced path name transformations for converting and renaming
files and directories by applying prefixes, suffixes, and other alterations.
`+transform.Help()+`Multiple transformations can be used in sequence, applied in the order they are specified on the command line.
`+transform.Help()+`The regex command generally accepts Perl-style regular expressions, the exact
syntax is defined in the [Go regular expression reference](https://golang.org/pkg/regexp/syntax/).
The replacement string may contain capturing group variables, referencing
capturing groups using the syntax ¡$name¡ or ¡${name}¡, where the name can
refer to a named capturing group or it can simply be the index as a number.
To insert a literal $, use $$.
Multiple transformations can be used in sequence, applied
in the order they are specified on the command line.
The ¡--name-transform¡ flag is also available in ¡sync¡, ¡copy¡, and ¡move¡.
## Files vs Directories
### Files vs Directories
By default ¡--name-transform¡ will only apply to file names. The means only the leaf file name will be transformed.
However some of the transforms would be better applied to the whole path or just directories.
To choose which which part of the file path is affected some tags can be added to the ¡--name-transform¡.
By default ¡--name-transform¡ will only apply to file names. The means only the
leaf file name will be transformed. However some of the transforms would be
better applied to the whole path or just directories. To choose which which
part of the file path is affected some tags can be added to the ¡--name-transform¡.
| Tag | Effect |
|------|------|
@@ -50,42 +59,58 @@ To choose which which part of the file path is affected some tags can be added t
| ¡dir¡ | Only transform name of directories - these may appear anywhere in the path |
| ¡all¡ | Transform the entire path for files and directories |
This is used by adding the tag into the transform name like this: ¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
This is used by adding the tag into the transform name like this:
¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
For some conversions using all is more likely to be useful, for example ¡--name-transform all,nfc¡.
For some conversions using all is more likely to be useful, for example
¡--name-transform all,nfc¡.
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name. This will cause an error.
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name.
This will cause an error.
## Ordering and Conflicts
### Ordering and Conflicts
* Transformations will be applied in the order specified by the user.
* If the ¡file¡ tag is in use (the default) then only the leaf name of files will be transformed.
* If the ¡dir¡ tag is in use then directories anywhere in the path will be transformed
* If the ¡all¡ tag is in use then directories and files anywhere in the path will be transformed
* Each transformation will be run one path segment at a time.
* If a transformation adds a ¡/¡ or ends up with an empty path segment then that will be an error.
* It is up to the user to put the transformations in a sensible order.
* Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or ¡nfc¡ followed by ¡nfd¡, are possible.
* Instead of enforcing mutual exclusivity, transformations are applied in sequence as specified by the
user, allowing for intentional use cases (e.g., trimming one prefix before adding another).
* Users should be aware that certain combinations may lead to unexpected results and should verify
transformations using ¡--dry-run¡ before execution.
- Transformations will be applied in the order specified by the user.
- If the ¡file¡ tag is in use (the default) then only the leaf name of files
will be transformed.
- If the ¡dir¡ tag is in use then directories anywhere in the path will be
transformed
- If the ¡all¡ tag is in use then directories and files anywhere in the path
will be transformed
- Each transformation will be run one path segment at a time.
- If a transformation adds a ¡/¡ or ends up with an empty path segment then
that will be an error.
- It is up to the user to put the transformations in a sensible order.
- Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or
¡nfc¡ followed by ¡nfd¡, are possible.
- Instead of enforcing mutual exclusivity, transformations are applied in
sequence as specified by the user, allowing for intentional use cases
(e.g., trimming one prefix before adding another).
- Users should be aware that certain combinations may lead to unexpected
results and should verify transformations using ¡--dry-run¡ before execution.
## Race Conditions and Non-Deterministic Behavior
### Race Conditions and Non-Deterministic Behavior
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where multiple source files map to the same destination name.
This can lead to race conditions when performing concurrent transfers. It is up to the user to anticipate these.
* If two files from the source are transformed into the same name at the destination, the final state may be non-deterministic.
* Running rclone check after a sync using such transformations may erroneously report missing or differing files due to overwritten results.
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where
multiple source files map to the same destination name. This can lead to race
conditions when performing concurrent transfers. It is up to the user to
anticipate these.
- If two files from the source are transformed into the same name at the
destination, the final state may be non-deterministic.
- Running rclone check after a sync using such transformations may erroneously
report missing or differing files due to overwritten results.
To minimize risks, users should:
* Carefully review transformations that may introduce conflicts.
* Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations).
* Avoid transformations that cause multiple distinct source files to map to the same destination name.
* Consider disabling concurrency with ¡--transfers=1¡ if necessary.
* Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every time they are used. Avoid these when using ¡bisync¡.
`, "¡", "`"),
- Carefully review transformations that may introduce conflicts.
- Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind
that it won't show the effect of non-deterministic transformations).
- Avoid transformations that cause multiple distinct source files to map to the
same destination name.
- Consider disabling concurrency with ¡--transfers=1¡ if necessary.
- Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every
time they are used. Avoid these when using ¡bisync¡.`, "¡", "`"),
Annotations: map[string]string{
"versionIntroduced": "v1.70",
"groups": "Filter,Listing,Important,Copy",

View File

@@ -50,22 +50,30 @@ go there.
For example
rclone copy source:sourcepath dest:destpath
|||sh
rclone copy source:sourcepath dest:destpath
|||
Let's say there are two files in sourcepath
sourcepath/one.txt
sourcepath/two.txt
|||text
sourcepath/one.txt
sourcepath/two.txt
|||
This copies them to
destpath/one.txt
destpath/two.txt
|||text
destpath/one.txt
destpath/two.txt
|||
Not to
destpath/sourcepath/one.txt
destpath/sourcepath/two.txt
|||text
destpath/sourcepath/one.txt
destpath/sourcepath/two.txt
|||
If you are familiar with |rsync|, rclone always works as if you had
written a trailing |/| - meaning "copy the contents of this directory".
@@ -81,20 +89,22 @@ For example, if you have many files in /path/to/src but only a few of
them change every day, you can copy all the files which have changed
recently very efficiently like this:
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|||sh
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|||
Rclone will sync the modification times of files and directories if
the backend supports it. If metadata syncing is required then use the
|--metadata| flag.
Note that the modification time and metadata for the root directory
will **not** be synced. See https://github.com/rclone/rclone/issues/7652
will **not** be synced. See [issue #7652](https://github.com/rclone/rclone/issues/7652)
for more info.
**Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics.
**Note**: Use the |--dry-run| or the |--interactive|/|-i| flag to test without copying anything.
**Note**: Use the |--dry-run| or the |--interactive|/|-i| flag to test without
copying anything.
`, "|", "`") + operationsflags.Help(),
Annotations: map[string]string{

View File

@@ -35,26 +35,32 @@ name. If the source is a directory then it acts exactly like the
So
rclone copyto src dst
` + "```sh" + `
rclone copyto src dst
` + "```" + `
where src and dst are rclone paths, either remote:path or
/path/to/local or C:\windows\path\if\on\windows.
where src and dst are rclone paths, either ` + "`remote:path`" + ` or
` + "`/path/to/local`" + ` or ` + "`C:\\windows\\path\\if\\on\\windows`" + `.
This will:
if src is file
copy it to dst, overwriting an existing file if it exists
if src is directory
copy it to dst, overwriting existing files if they exist
see copy command for full details
` + "```text" + `
if src is file
copy it to dst, overwriting an existing file if it exists
if src is directory
copy it to dst, overwriting existing files if they exist
see copy command for full details
` + "```" + `
This doesn't transfer files that are identical on src and dst, testing
by size and modification time or MD5SUM. It doesn't delete files from
the destination.
*If you are looking to copy just a byte range of a file, please see 'rclone cat --offset X --count Y'*
*If you are looking to copy just a byte range of a file, please see
` + "`rclone cat --offset X --count Y`" + `.*
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view
real-time transfer statistics.
` + operationsflags.Help(),
Annotations: map[string]string{

View File

@@ -48,7 +48,7 @@ set in HTTP headers, it will be used instead of the name from the URL.
With |--print-filename| in addition, the resulting file name will be
printed.
Setting |--no-clobber| will prevent overwriting file on the
Setting |--no-clobber| will prevent overwriting file on the
destination if there is one with the same name.
Setting |--stdout| or making the output file name |-|
@@ -62,9 +62,7 @@ If you can't get |rclone copyurl| to work then here are some things you can try:
- |--bind 0.0.0.0| rclone will use IPv6 if available - try disabling it
- |--bind ::0| to disable IPv4
- |--user agent curl| - some sites have whitelists for curl's user-agent - try that
- Make sure the site works with |curl| directly
`, "|", "`"),
- Make sure the site works with |curl| directly`, "|", "`"),
Annotations: map[string]string{
"versionIntroduced": "v1.43",
"groups": "Important",

View File

@@ -37,14 +37,18 @@ checksum of the file it has just encrypted.
Use it like this
rclone cryptcheck /path/to/files encryptedremote:path
` + "```sh" + `
rclone cryptcheck /path/to/files encryptedremote:path
` + "```" + `
You can use it like this also, but that will involve downloading all
the files in remote:path.
the files in ` + "`remote:path`" + `.
rclone cryptcheck remote:path encryptedremote:path
` + "```sh" + `
rclone cryptcheck remote:path encryptedremote:path
` + "```" + `
After it has run it will log the status of the encryptedremote:.
After it has run it will log the status of the ` + "`encryptedremote:`" + `.
` + check.FlagsHelp,
Annotations: map[string]string{
"versionIntroduced": "v1.36",

View File

@@ -33,13 +33,13 @@ If you supply the ` + "`--reverse`" + ` flag, it will return encrypted file name
use it like this
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
` + "```sh" + `
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
rclone cryptdecode --reverse encryptedremote: filename1 filename2
` + "```" + `
rclone cryptdecode --reverse encryptedremote: filename1 filename2
Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + ` command.
See the documentation on the [crypt](/crypt/) overlay for more info.
`,
Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + `
command. See the documentation on the [crypt](/crypt/) overlay for more info.`,
Annotations: map[string]string{
"versionIntroduced": "v1.38",
},

View File

@@ -47,15 +47,15 @@ directories have been merged.
Next, if deduping by name, for every group of duplicate file names /
hashes, it will delete all but one identical file it finds without
confirmation. This means that for most duplicated files the ` +
"`dedupe`" + ` command will not be interactive.
confirmation. This means that for most duplicated files the
` + "`dedupe`" + ` command will not be interactive.
` + "`dedupe`" + ` considers files to be identical if they have the
same file path and the same hash. If the backend does not support hashes (e.g. crypt wrapping
Google Drive) then they will never be found to be identical. If you
use the ` + "`--size-only`" + ` flag then files will be considered
identical if they have the same size (any hash will be ignored). This
can be useful on crypt backends which do not support hashes.
same file path and the same hash. If the backend does not support
hashes (e.g. crypt wrapping Google Drive) then they will never be found
to be identical. If you use the ` + "`--size-only`" + ` flag then files
will be considered identical if they have the same size (any hash will be
ignored). This can be useful on crypt backends which do not support hashes.
Next rclone will resolve the remaining duplicates. Exactly which
action is taken depends on the dedupe mode. By default, rclone will
@@ -68,71 +68,82 @@ Here is an example run.
Before - with duplicates
$ rclone lsl drive:dupes
6048320 2016-03-05 16:23:16.798000000 one.txt
6048320 2016-03-05 16:23:11.775000000 one.txt
564374 2016-03-05 16:23:06.731000000 one.txt
6048320 2016-03-05 16:18:26.092000000 one.txt
6048320 2016-03-05 16:22:46.185000000 two.txt
1744073 2016-03-05 16:22:38.104000000 two.txt
564374 2016-03-05 16:22:52.118000000 two.txt
` + "```sh" + `
$ rclone lsl drive:dupes
6048320 2016-03-05 16:23:16.798000000 one.txt
6048320 2016-03-05 16:23:11.775000000 one.txt
564374 2016-03-05 16:23:06.731000000 one.txt
6048320 2016-03-05 16:18:26.092000000 one.txt
6048320 2016-03-05 16:22:46.185000000 two.txt
1744073 2016-03-05 16:22:38.104000000 two.txt
564374 2016-03-05 16:22:52.118000000 two.txt
` + "```" + `
Now the ` + "`dedupe`" + ` session
$ rclone dedupe drive:dupes
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
one.txt: Found 4 files with duplicate names
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
one.txt: 2 duplicates remain
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
s) Skip and do nothing
k) Keep just one (choose which in next step)
r) Rename all to be different (by changing file.jpg to file-1.jpg)
s/k/r> k
Enter the number of the file to keep> 1
one.txt: Deleted 1 extra copies
two.txt: Found 3 files with duplicate names
two.txt: 3 duplicates remain
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
s) Skip and do nothing
k) Keep just one (choose which in next step)
r) Rename all to be different (by changing file.jpg to file-1.jpg)
s/k/r> r
two-1.txt: renamed from: two.txt
two-2.txt: renamed from: two.txt
two-3.txt: renamed from: two.txt
` + "```sh" + `
$ rclone dedupe drive:dupes
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
one.txt: Found 4 files with duplicate names
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
one.txt: 2 duplicates remain
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
s) Skip and do nothing
k) Keep just one (choose which in next step)
r) Rename all to be different (by changing file.jpg to file-1.jpg)
s/k/r> k
Enter the number of the file to keep> 1
one.txt: Deleted 1 extra copies
two.txt: Found 3 files with duplicate names
two.txt: 3 duplicates remain
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
s) Skip and do nothing
k) Keep just one (choose which in next step)
r) Rename all to be different (by changing file.jpg to file-1.jpg)
s/k/r> r
two-1.txt: renamed from: two.txt
two-2.txt: renamed from: two.txt
two-3.txt: renamed from: two.txt
` + "```" + `
The result being
$ rclone lsl drive:dupes
6048320 2016-03-05 16:23:16.798000000 one.txt
564374 2016-03-05 16:22:52.118000000 two-1.txt
6048320 2016-03-05 16:22:46.185000000 two-2.txt
1744073 2016-03-05 16:22:38.104000000 two-3.txt
` + "```sh" + `
$ rclone lsl drive:dupes
6048320 2016-03-05 16:23:16.798000000 one.txt
564374 2016-03-05 16:22:52.118000000 two-1.txt
6048320 2016-03-05 16:22:46.185000000 two-2.txt
1744073 2016-03-05 16:22:38.104000000 two-3.txt
` + "```" + `
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag or by using an extra parameter with the same value
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag
or by using an extra parameter with the same value
* ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
* ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
* ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
* ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
* ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
* ` + "`" + `--dedupe-mode largest` + "`" + ` - removes identical files then keeps the largest one.
* ` + "`" + `--dedupe-mode smallest` + "`" + ` - removes identical files then keeps the smallest one.
* ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
* ` + "`" + `--dedupe-mode list` + "`" + ` - lists duplicate dirs and files only and changes nothing.
- ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
- ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
- ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
- ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
- ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
- ` + "`" + `--dedupe-mode largest` + "`" + ` - removes identical files then keeps the largest one.
- ` + "`" + `--dedupe-mode smallest` + "`" + ` - removes identical files then keeps the smallest one.
- ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
- ` + "`" + `--dedupe-mode list` + "`" + ` - lists duplicate dirs and files only and changes nothing.
For example, to rename all the identically named photos in your Google Photos directory, do
For example, to rename all the identically named photos in your Google Photos
directory, do
rclone dedupe --dedupe-mode rename "drive:Google Photos"
` + "```sh" + `
rclone dedupe --dedupe-mode rename "drive:Google Photos"
` + "```" + `
Or
rclone dedupe rename "drive:Google Photos"
`,
` + "```sh" + `
rclone dedupe rename "drive:Google Photos"
` + "```",
Annotations: map[string]string{
"versionIntroduced": "v1.27",
"groups": "Important",

View File

@@ -32,26 +32,29 @@ obeys include/exclude filters so can be used to selectively delete files.
alone. If you want to delete a directory and all of its contents use
the [purge](/commands/rclone_purge/) command.
If you supply the |--rmdirs| flag, it will remove all empty directories along with it.
You can also use the separate command [rmdir](/commands/rclone_rmdir/) or
[rmdirs](/commands/rclone_rmdirs/) to delete empty directories only.
If you supply the |--rmdirs| flag, it will remove all empty directories along
with it. You can also use the separate command [rmdir](/commands/rclone_rmdir/)
or [rmdirs](/commands/rclone_rmdirs/) to delete empty directories only.
For example, to delete all files bigger than 100 MiB, you may first want to
check what would be deleted (use either):
rclone --min-size 100M lsl remote:path
rclone --dry-run --min-size 100M delete remote:path
|||sh
rclone --min-size 100M lsl remote:path
rclone --dry-run --min-size 100M delete remote:path
|||
Then proceed with the actual delete:
rclone --min-size 100M delete remote:path
|||sh
rclone --min-size 100M delete remote:path
|||
That reads "delete everything with a minimum size of 100 MiB", hence
delete all files bigger than 100 MiB.
**Important**: Since this can cause data loss, test first with the
|--dry-run| or the |--interactive|/|-i| flag.
`, "|", "`"),
|--dry-run| or the |--interactive|/|-i| flag.`, "|", "`"),
Annotations: map[string]string{
"versionIntroduced": "v1.27",
"groups": "Important,Filter,Listing",

View File

@@ -19,9 +19,8 @@ var commandDefinition = &cobra.Command{
Use: "deletefile remote:path",
Short: `Remove a single file from remote.`,
Long: `Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot be used to
remove a directory and it doesn't obey include/exclude filters - if the specified file exists,
it will always be removed.
`,
remove a directory and it doesn't obey include/exclude filters - if the
specified file exists, it will always be removed.`,
Annotations: map[string]string{
"versionIntroduced": "v1.42",
"groups": "Important",

View File

@@ -14,8 +14,7 @@ var completionDefinition = &cobra.Command{
Use: "completion [shell]",
Short: `Output completion script for a given shell.`,
Long: `Generates a shell completion script for rclone.
Run with ` + "`--help`" + ` to list the supported shells.
`,
Run with ` + "`--help`" + ` to list the supported shells.`,
Annotations: map[string]string{
"versionIntroduced": "v1.33",
},

View File

@@ -18,17 +18,21 @@ var bashCommandDefinition = &cobra.Command{
Short: `Output bash completion script for rclone.`,
Long: `Generates a bash shell autocompletion script for rclone.
By default, when run without any arguments,
By default, when run without any arguments,
rclone completion bash
` + "```sh" + `
rclone completion bash
` + "```" + `
the generated script will be written to
/etc/bash_completion.d/rclone
` + "```sh" + `
/etc/bash_completion.d/rclone
` + "```" + `
and so rclone will probably need to be run as root, or with sudo.
If you supply a path to a file as the command line argument, then
If you supply a path to a file as the command line argument, then
the generated script will be written to that file, in which case
you should not need root privileges.
@@ -39,11 +43,12 @@ can logout and login again to use the autocompletion script.
Alternatively, you can source the script directly
. /path/to/my_bash_completion_scripts/rclone
` + "```sh" + `
. /path/to/my_bash_completion_scripts/rclone
` + "```" + `
and the autocompletion functionality will be added to your
current shell.
`,
current shell.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
out := "/etc/bash_completion.d/rclone"

View File

@@ -21,18 +21,21 @@ var fishCommandDefinition = &cobra.Command{
This writes to /etc/fish/completions/rclone.fish by default so will
probably need to be run with sudo or as root, e.g.
sudo rclone completion fish
` + "```sh" + `
sudo rclone completion fish
` + "```" + `
Logout and login again to use the autocompletion scripts, or source
them directly
. /etc/fish/completions/rclone.fish
` + "```sh" + `
. /etc/fish/completions/rclone.fish
` + "```" + `
If you supply a command line argument the script will be written
there.
If output_file is "-", then the output will be written to stdout.
`,
If output_file is "-", then the output will be written to stdout.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
out := "/etc/fish/completions/rclone.fish"

View File

@@ -20,13 +20,14 @@ var powershellCommandDefinition = &cobra.Command{
To load completions in your current shell session:
rclone completion powershell | Out-String | Invoke-Expression
` + "```sh" + `
rclone completion powershell | Out-String | Invoke-Expression
` + "```" + `
To load completions for every new session, add the output of the above command
to your powershell profile.
If output_file is "-" or missing, then the output will be written to stdout.
`,
If output_file is "-" or missing, then the output will be written to stdout.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
if len(args) == 0 || (len(args) > 0 && args[0] == "-") {

View File

@@ -21,18 +21,21 @@ var zshCommandDefinition = &cobra.Command{
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
probably need to be run with sudo or as root, e.g.
sudo rclone completion zsh
` + "```sh" + `
sudo rclone completion zsh
` + "```" + `
Logout and login again to use the autocompletion scripts, or source
them directly
autoload -U compinit && compinit
` + "```sh" + `
autoload -U compinit && compinit
` + "```" + `
If you supply a command line argument the script will be written
there.
If output_file is "-", then the output will be written to stdout.
`,
If output_file is "-", then the output will be written to stdout.`,
Run: func(command *cobra.Command, args []string) {
cmd.CheckArgs(0, 1, command, args)
out := "/usr/share/zsh/vendor-completions/_rclone"

View File

@@ -8,6 +8,7 @@ import (
"path"
"path/filepath"
"regexp"
"runtime"
"strings"
"text/template"
"time"
@@ -91,7 +92,7 @@ rclone.org website.`,
Aliases []string
Annotations map[string]string
}
var commands = map[string]commandDetails{}
commands := map[string]commandDetails{}
var addCommandDetails func(root *cobra.Command, parentAliases []string)
addCommandDetails = func(root *cobra.Command, parentAliases []string) {
name := strings.ReplaceAll(root.CommandPath(), " ", "_") + ".md"
@@ -158,7 +159,7 @@ rclone.org website.`,
return err
}
var outdentTitle = regexp.MustCompile(`(?m)^#(#+)`)
outdentTitle := regexp.MustCompile(`(?m)^#(#+)`)
// Munge the files to add a link to the global flags page
err = filepath.Walk(out, func(path string, info os.FileInfo, err error) error {
@@ -169,6 +170,20 @@ rclone.org website.`,
name := filepath.Base(path)
cmd, ok := commands[name]
if !ok {
switch name {
case "rclone_mount.md":
switch runtime.GOOS {
case "darwin", "windows":
fs.Logf(nil, "Skipping docs for command not available without the cmount build tag: %v", name)
return nil
}
case "rclone_nfsmount.md", "rclone_serve_nfs.md":
switch runtime.GOOS {
case "windows":
fs.Logf(nil, "Skipping docs for command not supported on %v: %v", runtime.GOOS, name)
return nil
}
}
return fmt.Errorf("didn't find command for %q", name)
}
b, err := os.ReadFile(path)
@@ -184,7 +199,12 @@ rclone.org website.`,
return fmt.Errorf("internal error: failed to find cut points: startCut = %d, endCut = %d", startCut, endCut)
}
if endCut >= 0 {
doc = doc[:endCut] + "### See Also" + doc[endCut+12:]
doc = doc[:endCut] + `### See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->` + doc[endCut+12:] + `
<!-- markdownlint-restore -->
`
}
} else {
var out strings.Builder
@@ -196,7 +216,7 @@ rclone.org website.`,
if group.Flags.HasFlags() {
_, _ = fmt.Fprintf(&out, "#### %s Options\n\n", group.Name)
_, _ = fmt.Fprintf(&out, "%s\n\n", group.Help)
_, _ = out.WriteString("```\n")
_, _ = out.WriteString("```text\n")
_, _ = out.WriteString(group.Flags.FlagUsages())
_, _ = out.WriteString("```\n\n")
}
@@ -204,7 +224,12 @@ rclone.org website.`,
} else {
_, _ = out.WriteString("See the [global flags page](/flags/) for global options not listed here.\n\n")
}
doc = doc[:startCut] + out.String() + "### See Also" + doc[endCut+12:]
doc = doc[:startCut] + out.String() + `### See Also
<!-- markdownlint-capture -->
<!-- markdownlint-disable ul-style line-length -->` + doc[endCut+12:] + `
<!-- markdownlint-restore -->
`
}
// outdent all the titles by one

View File

@@ -539,7 +539,7 @@ var command = &cobra.Command{
Aliases: []string{uniqueCommandName},
Use: subcommandName,
Short: "Speaks with git-annex over stdin/stdout.",
Long: gitannexHelp,
Long: strings.TrimSpace(gitannexHelp),
Annotations: map[string]string{
"versionIntroduced": "v1.67.0",
},

View File

@@ -4,8 +4,7 @@ users.
[git-annex]: https://git-annex.branchable.com/
Installation on Linux
---------------------
### Installation on Linux
1. Skip this step if your version of git-annex is [10.20240430] or newer.
Otherwise, you must create a symlink somewhere on your PATH with a particular

View File

@@ -103,14 +103,17 @@ as a relative path).
Run without a hash to see the list of all supported hashes, e.g.
$ rclone hashsum
` + hash.HelpString(4) + `
` + "```sh" + `
$ rclone hashsum
` + hash.HelpString(0) + "```" + `
Then
$ rclone hashsum MD5 remote:path
` + "```sh" + `
rclone hashsum MD5 remote:path
` + "```" + `
Note that hash names are case insensitive and values are output in lower case.
`,
Note that hash names are case insensitive and values are output in lower case.`,
Annotations: map[string]string{
"versionIntroduced": "v1.41",
"groups": "Filter,Listing",

View File

@@ -30,9 +30,7 @@ var Root = &cobra.Command{
mounting them, listing them in lots of different ways.
See the home page (https://rclone.org/) for installation, usage,
documentation, changelog and configuration walkthroughs.
`,
documentation, changelog and configuration walkthroughs.`,
PersistentPostRun: func(cmd *cobra.Command, args []string) {
fs.Debugf("rclone", "Version %q finishing with parameters %q", fs.Version, os.Args)
atexit.Run()

View File

@@ -29,10 +29,12 @@ var commandDefinition = &cobra.Command{
Short: `Generate public link to file/folder.`,
Long: `Create, retrieve or remove a public link to the given file or folder.
rclone link remote:path/to/file
rclone link remote:path/to/folder/
rclone link --unlink remote:path/to/folder/
rclone link --expire 1d remote:path/to/file
` + "```sh" + `
rclone link remote:path/to/file
rclone link remote:path/to/folder/
rclone link --unlink remote:path/to/folder/
rclone link --expire 1d remote:path/to/file
` + "```" + `
If you supply the --expire flag, it will set the expiration time
otherwise it will use the default (100 years). **Note** not all
@@ -45,9 +47,8 @@ don't will just ignore it.
If successful, the last line of the output will contain the
link. Exact capabilities depend on the remote, but the link will
always by default be created with the least constraints e.g. no
expiry, no password protection, accessible without account.
`,
always by default be created with the least constraints - e.g. no
expiry, no password protection, accessible without account.`,
Annotations: map[string]string{
"versionIntroduced": "v1.41",
},

View File

@@ -114,8 +114,7 @@ func newLess(orderBy string) (less lessFn, err error) {
var commandDefinition = &cobra.Command{
Use: "listremotes [<filter>]",
Short: `List all the remotes in the config file and defined in environment variables.`,
Long: `
Lists all the available remotes from the config file, or the remotes matching
Long: `Lists all the available remotes from the config file, or the remotes matching
an optional filter.
Prints the result in human-readable format by default, and as a simple list of
@@ -126,8 +125,7 @@ the source (file or environment).
Result can be filtered by a filter argument which applies to all attributes,
and/or filter flags specific for each attribute. The values must be specified
according to regular rclone filtering pattern syntax.
`,
according to regular rclone filtering pattern syntax.`,
Annotations: map[string]string{
"versionIntroduced": "v1.34",
},

View File

@@ -21,13 +21,15 @@ var commandDefinition = &cobra.Command{
Long: `Lists the objects in the source path to standard output in a human
readable format with size and path. Recurses by default.
Eg
E.g.
$ rclone ls swift:bucket
60295 bevajer5jef
90613 canole
94467 diwogej7
37600 fubuwic
` + "```sh" + `
$ rclone ls swift:bucket
60295 bevajer5jef
90613 canole
94467 diwogej7
37600 fubuwic
` + "```" + `
` + lshelp.Help,
Annotations: map[string]string{

View File

@@ -7,16 +7,15 @@ import (
// Help describes the common help for all the list commands
// Warning! "|" will be replaced by backticks below
var Help = strings.ReplaceAll(`
Any of the filtering options can be applied to this command.
var Help = strings.ReplaceAll(`Any of the filtering options can be applied to this command.
There are several related list commands
* |ls| to list size and path of objects only
* |lsl| to list modification time, size and path of objects only
* |lsd| to list directories only
* |lsf| to list objects and directories in easy to parse format
* |lsjson| to list objects and directories in JSON format
- |ls| to list size and path of objects only
- |lsl| to list modification time, size and path of objects only
- |lsd| to list directories only
- |lsf| to list objects and directories in easy to parse format
- |lsjson| to list objects and directories in JSON format
|ls|,|lsl|,|lsd| are designed to be human-readable.
|lsf| is designed to be human and machine-readable.
@@ -24,9 +23,9 @@ There are several related list commands
Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the recursion.
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default - use |-R| to make them recurse.
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default -
use |-R| to make them recurse.
Listing a nonexistent directory will produce an error except for
remotes which can't have empty directories (e.g. s3, swift, or gcs -
the bucket-based remotes).
`, "|", "`")
the bucket-based remotes).`, "|", "`")

View File

@@ -32,18 +32,22 @@ recurse by default. Use the ` + "`-R`" + ` flag to recurse.
This command lists the total size of the directory (if known, -1 if
not), the modification time (if known, the current time if not), the
number of objects in the directory (if known, -1 if not) and the name
of the directory, Eg
of the directory, E.g.
$ rclone lsd swift:
494000 2018-04-26 08:43:20 10000 10000files
65 2018-04-26 08:43:20 1 1File
` + "```sh" + `
$ rclone lsd swift:
494000 2018-04-26 08:43:20 10000 10000files
65 2018-04-26 08:43:20 1 1File
` + "```" + `
Or
$ rclone lsd drive:test
-1 2016-10-17 17:41:53 -1 1000files
-1 2017-01-03 14:40:54 -1 2500files
-1 2017-07-08 14:39:28 -1 4000files
` + "```sh" + `
$ rclone lsd drive:test
-1 2016-10-17 17:41:53 -1 1000files
-1 2017-01-03 14:40:54 -1 2500files
-1 2017-07-08 14:39:28 -1 4000files
` + "```" + `
If you just want the directory names use ` + "`rclone lsf --dirs-only`" + `.

View File

@@ -33,7 +33,7 @@ func init() {
cmd.Root.AddCommand(commandDefinition)
cmdFlags := commandDefinition.Flags()
flags.StringVarP(cmdFlags, &format, "format", "F", "p", "Output format - see help for details", "")
flags.StringVarP(cmdFlags, &timeFormat, "time-format", "t", "", "Specify a custom time format, or 'max' for max precision supported by remote (default: 2006-01-02 15:04:05)", "")
flags.StringVarP(cmdFlags, &timeFormat, "time-format", "t", "", "Specify a custom time format - see docs for details (default: 2006-01-02 15:04:05)", "")
flags.StringVarP(cmdFlags, &separator, "separator", "s", ";", "Separator for the items in the format", "")
flags.BoolVarP(cmdFlags, &dirSlash, "dir-slash", "d", true, "Append a slash to directory names", "")
flags.FVarP(cmdFlags, &hashType, "hash", "", "Use this hash when `h` is used in the format MD5|SHA-1|DropboxHash", "")
@@ -52,41 +52,47 @@ standard output in a form which is easy to parse by scripts. By
default this will just be the names of the objects and directories,
one per line. The directories will have a / suffix.
Eg
E.g.
$ rclone lsf swift:bucket
bevajer5jef
canole
diwogej7
ferejej3gux/
fubuwic
` + "```sh" + `
$ rclone lsf swift:bucket
bevajer5jef
canole
diwogej7
ferejej3gux/
fubuwic
` + "```" + `
Use the ` + "`--format`" + ` option to control what gets listed. By default this
is just the path, but you can use these parameters to control the
output:
p - path
s - size
t - modification time
h - hash
i - ID of object
o - Original ID of underlying object
m - MimeType of object if known
e - encrypted name
T - tier of storage if known, e.g. "Hot" or "Cool"
M - Metadata of object in JSON blob format, eg {"key":"value"}
` + "```text" + `
p - path
s - size
t - modification time
h - hash
i - ID of object
o - Original ID of underlying object
m - MimeType of object if known
e - encrypted name
T - tier of storage if known, e.g. "Hot" or "Cool"
M - Metadata of object in JSON blob format, eg {"key":"value"}
` + "```" + `
So if you wanted the path, size and modification time, you would use
` + "`--format \"pst\"`, or maybe `--format \"tsp\"`" + ` to put the path last.
Eg
E.g.
$ rclone lsf --format "tsp" swift:bucket
2016-06-25 18:55:41;60295;bevajer5jef
2016-06-25 18:55:43;90613;canole
2016-06-25 18:55:43;94467;diwogej7
2018-04-26 08:50:45;0;ferejej3gux/
2016-06-25 18:55:40;37600;fubuwic
` + "```sh" + `
$ rclone lsf --format "tsp" swift:bucket
2016-06-25 18:55:41;60295;bevajer5jef
2016-06-25 18:55:43;90613;canole
2016-06-25 18:55:43;94467;diwogej7
2018-04-26 08:50:45;0;ferejej3gux/
2016-06-25 18:55:40;37600;fubuwic
` + "```" + `
If you specify "h" in the format you will get the MD5 hash by default,
use the ` + "`--hash`" + ` flag to change which hash you want. Note that this
@@ -97,16 +103,20 @@ type.
For example, to emulate the md5sum command you can use
rclone lsf -R --hash MD5 --format hp --separator " " --files-only .
` + "```sh" + `
rclone lsf -R --hash MD5 --format hp --separator " " --files-only .
` + "```" + `
Eg
E.g.
$ rclone lsf -R --hash MD5 --format hp --separator " " --files-only swift:bucket
7908e352297f0f530b84a756f188baa3 bevajer5jef
cd65ac234e6fea5925974a51cdd865cc canole
03b5341b4f234b9d984d03ad076bae91 diwogej7
8fd37c3810dd660778137ac3a66cc06d fubuwic
99713e14a4c4ff553acaf1930fad985b gixacuh7ku
` + "```sh" + `
$ rclone lsf -R --hash MD5 --format hp --separator " " --files-only swift:bucket
7908e352297f0f530b84a756f188baa3 bevajer5jef
cd65ac234e6fea5925974a51cdd865cc canole
03b5341b4f234b9d984d03ad076bae91 diwogej7
8fd37c3810dd660778137ac3a66cc06d fubuwic
99713e14a4c4ff553acaf1930fad985b gixacuh7ku
` + "```" + `
(Though "rclone md5sum ." is an easier way of typing this.)
@@ -114,24 +124,28 @@ By default the separator is ";" this can be changed with the
` + "`--separator`" + ` flag. Note that separators aren't escaped in the path so
putting it last is a good strategy.
Eg
E.g.
$ rclone lsf --separator "," --format "tshp" swift:bucket
2016-06-25 18:55:41,60295,7908e352297f0f530b84a756f188baa3,bevajer5jef
2016-06-25 18:55:43,90613,cd65ac234e6fea5925974a51cdd865cc,canole
2016-06-25 18:55:43,94467,03b5341b4f234b9d984d03ad076bae91,diwogej7
2018-04-26 08:52:53,0,,ferejej3gux/
2016-06-25 18:55:40,37600,8fd37c3810dd660778137ac3a66cc06d,fubuwic
` + "```sh" + `
$ rclone lsf --separator "," --format "tshp" swift:bucket
2016-06-25 18:55:41,60295,7908e352297f0f530b84a756f188baa3,bevajer5jef
2016-06-25 18:55:43,90613,cd65ac234e6fea5925974a51cdd865cc,canole
2016-06-25 18:55:43,94467,03b5341b4f234b9d984d03ad076bae91,diwogej7
2018-04-26 08:52:53,0,,ferejej3gux/
2016-06-25 18:55:40,37600,8fd37c3810dd660778137ac3a66cc06d,fubuwic
` + "```" + `
You can output in CSV standard format. This will escape things in "
if they contain ,
if they contain,
Eg
E.g.
$ rclone lsf --csv --files-only --format ps remote:path
test.log,22355
test.sh,449
"this file contains a comma, in the file name.txt",6
` + "```sh" + `
$ rclone lsf --csv --files-only --format ps remote:path
test.log,22355
test.sh,449
"this file contains a comma, in the file name.txt",6
` + "```" + `
Note that the ` + "`--absolute`" + ` parameter is useful for making lists of files
to pass to an rclone copy with the ` + "`--files-from-raw`" + ` flag.
@@ -139,20 +153,27 @@ to pass to an rclone copy with the ` + "`--files-from-raw`" + ` flag.
For example, to find all the files modified within one day and copy
those only (without traversing the whole directory structure):
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
rclone copy --files-from-raw new_files /path/to/local remote:path
` + "```sh" + `
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
rclone copy --files-from-raw new_files /path/to/local remote:path
` + "```" + `
The default time format is ` + "`'2006-01-02 15:04:05'`" + `.
[Other formats](https://pkg.go.dev/time#pkg-constants) can be specified with the ` + "`--time-format`" + ` flag.
Examples:
[Other formats](https://pkg.go.dev/time#pkg-constants) can be specified with
the ` + "`--time-format`" + ` flag. Examples:
rclone lsf remote:path --format pt --time-format 'Jan 2, 2006 at 3:04pm (MST)'
rclone lsf remote:path --format pt --time-format '2006-01-02 15:04:05.000000000'
rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z07:00'
rclone lsf remote:path --format pt --time-format RFC3339
rclone lsf remote:path --format pt --time-format DateOnly
rclone lsf remote:path --format pt --time-format max
` + "`--time-format max`" + ` will automatically truncate ` + "'`2006-01-02 15:04:05.000000000`'" + `
` + "```sh" + `
rclone lsf remote:path --format pt --time-format 'Jan 2, 2006 at 3:04pm (MST)'
rclone lsf remote:path --format pt --time-format '2006-01-02 15:04:05.000000000'
rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z07:00'
rclone lsf remote:path --format pt --time-format RFC3339
rclone lsf remote:path --format pt --time-format DateOnly
rclone lsf remote:path --format pt --time-format max
rclone lsf remote:path --format pt --time-format unix
rclone lsf remote:path --format pt --time-format unixnano
` + "```" + `
` + "`--time-format max`" + ` will automatically truncate ` + "`2006-01-02 15:04:05.000000000`" + `
to the maximum precision supported by the remote.
` + lshelp.Help,

View File

@@ -43,25 +43,27 @@ var commandDefinition = &cobra.Command{
The output is an array of Items, where each Item looks like this:
{
"Hashes" : {
"SHA-1" : "f572d396fae9206628714fb2ce00f72e94f2258f",
"MD5" : "b1946ac92492d2347c6235b4d2611184",
"DropboxHash" : "ecb65bb98f9d905b70458986c39fcbad7715e5f2fcc3b1f07767d7c83e2438cc"
},
"ID": "y2djkhiujf83u33",
"OrigID": "UYOJVTUW00Q1RzTDA",
"IsBucket" : false,
"IsDir" : false,
"MimeType" : "application/octet-stream",
"ModTime" : "2017-05-31T16:15:57.034468261+01:00",
"Name" : "file.txt",
"Encrypted" : "v0qpsdq8anpci8n929v3uu9338",
"EncryptedPath" : "kja9098349023498/v0qpsdq8anpci8n929v3uu9338",
"Path" : "full/path/goes/here/file.txt",
"Size" : 6,
"Tier" : "hot",
}
` + "```json" + `
{
"Hashes" : {
"SHA-1" : "f572d396fae9206628714fb2ce00f72e94f2258f",
"MD5" : "b1946ac92492d2347c6235b4d2611184",
"DropboxHash" : "ecb65bb98f9d905b70458986c39fcbad7715e5f2fcc3b1f07767d7c83e2438cc"
},
"ID": "y2djkhiujf83u33",
"OrigID": "UYOJVTUW00Q1RzTDA",
"IsBucket" : false,
"IsDir" : false,
"MimeType" : "application/octet-stream",
"ModTime" : "2017-05-31T16:15:57.034468261+01:00",
"Name" : "file.txt",
"Encrypted" : "v0qpsdq8anpci8n929v3uu9338",
"EncryptedPath" : "kja9098349023498/v0qpsdq8anpci8n929v3uu9338",
"Path" : "full/path/goes/here/file.txt",
"Size" : 6,
"Tier" : "hot",
}
` + "```" + `
The exact set of properties included depends on the backend:
@@ -118,6 +120,7 @@ will be shown ("2017-05-31T16:15:57+01:00").
The whole output can be processed as a JSON blob, or alternatively it
can be processed line by line as each item is written on individual lines
(except with ` + "`--stat`" + `).
` + lshelp.Help,
Annotations: map[string]string{
"versionIntroduced": "v1.37",

View File

@@ -21,13 +21,15 @@ var commandDefinition = &cobra.Command{
Long: `Lists the objects in the source path to standard output in a human
readable format with modification time, size and path. Recurses by default.
Eg
E.g.
$ rclone lsl swift:bucket
60295 2016-06-25 18:55:41.062626927 bevajer5jef
90613 2016-06-25 18:55:43.302607074 canole
94467 2016-06-25 18:55:43.046609333 diwogej7
37600 2016-06-25 18:55:40.814629136 fubuwic
` + "```sh" + `
$ rclone lsl swift:bucket
60295 2016-06-25 18:55:41.062626927 bevajer5jef
90613 2016-06-25 18:55:43.302607074 canole
94467 2016-06-25 18:55:43.046609333 diwogej7
37600 2016-06-25 18:55:40.814629136 fubuwic
` + "```" + `
` + lshelp.Help,
Annotations: map[string]string{

View File

@@ -35,8 +35,7 @@ to running ` + "`rclone hashsum MD5 remote:path`" + `.
This command can also hash data received on standard input (stdin),
by not passing a remote:path, or by passing a hyphen as remote:path
when there is data to read (if not, the hyphen will be treated literally,
as a relative path).
`,
as a relative path).`,
Annotations: map[string]string{
"versionIntroduced": "v1.02",
"groups": "Filter,Listing",

View File

@@ -273,7 +273,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
Use: commandName + " remote:path /path/to/mountpoint",
Hidden: hidden,
Short: `Mount the remote as file system on a mountpoint.`,
Long: help(commandName) + vfs.Help(),
Long: help(commandName) + strings.TrimSpace(vfs.Help()),
Annotations: map[string]string{
"versionIntroduced": "v1.33",
"groups": "Filter",
@@ -396,34 +396,14 @@ func (m *MountPoint) Wait() error {
if err := m.Unmount(); err != nil {
fs.Errorf(m.MountPoint, "Failed to unmount: %v", err)
} else {
fs.Errorf(m.MountPoint, "Unmounted rclone mount")
fs.Logf(m.MountPoint, "Unmounted rclone mount")
}
})
}
fnHandle := atexit.Register(finalise)
defer atexit.Unregister(fnHandle)
// Reload VFS cache on SIGHUP
sigHup := make(chan os.Signal, 1)
NotifyOnSigHup(sigHup)
var err error
waiting := true
for waiting {
select {
// umount triggered outside the app
case err = <-m.ErrChan:
waiting = false
// user sent SIGHUP to clear the cache
case <-sigHup:
root, err := m.VFS.Root()
if err != nil {
fs.Errorf(m.VFS.Fs(), "Error reading root: %v", err)
} else {
root.ForgetAll()
}
}
}
err := <-m.ErrChan
finalise()

View File

@@ -1,7 +1,7 @@
Rclone @ allows Linux, FreeBSD, macOS and Windows to
mount any of Rclone's cloud storage systems as a file system with FUSE.
First set up your remote using `rclone config`. Check it works with `rclone ls` etc.
First set up your remote using `rclone config`. Check it works with `rclone ls` etc.
On Linux and macOS, you can run mount in either foreground or background (aka
daemon) mode. Mount runs in foreground mode by default. Use the `--daemon` flag
@@ -16,7 +16,9 @@ mount, waits until success or timeout and exits with appropriate code
On Linux/macOS/FreeBSD start the mount like this, where `/path/to/local/mount`
is an **empty** **existing** directory:
rclone @ remote:path/to/files /path/to/local/mount
```sh
rclone @ remote:path/to/files /path/to/local/mount
```
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
for details. If foreground mount is used interactively from a console window,
@@ -26,26 +28,30 @@ used to work with the mount until rclone is interrupted e.g. by pressing Ctrl-C.
The following examples will mount to an automatically assigned drive,
to specific drive letter `X:`, to path `C:\path\parent\mount`
(where parent directory or drive must exist, and mount must **not** exist,
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
the last example will mount as network share `\\cloud\remote` and map it to an
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)),
and the last example will mount as network share `\\cloud\remote` and map it to an
automatically assigned drive:
rclone @ remote:path/to/files *
rclone @ remote:path/to/files X:
rclone @ remote:path/to/files C:\path\parent\mount
rclone @ remote:path/to/files \\cloud\remote
```sh
rclone @ remote:path/to/files *
rclone @ remote:path/to/files X:
rclone @ remote:path/to/files C:\path\parent\mount
rclone @ remote:path/to/files \\cloud\remote
```
When the program ends while in foreground mode, either via Ctrl+C or receiving
a SIGINT or SIGTERM signal, the mount should be automatically stopped.
When running in background mode the user will have to stop the mount manually:
# Linux
fusermount -u /path/to/local/mount
#... or on some systems
fusermount3 -u /path/to/local/mount
# OS X or Linux when using nfsmount
umount /path/to/local/mount
```sh
# Linux
fusermount -u /path/to/local/mount
#... or on some systems
fusermount3 -u /path/to/local/mount
# OS X or Linux when using nfsmount
umount /path/to/local/mount
```
The umount operation can fail, for example when the mountpoint is busy.
When that happens, it is the user's responsibility to stop the mount manually.
@@ -80,20 +86,22 @@ thumbnails for image and video files on network drives.
In most cases, rclone will mount the remote as a normal, fixed disk drive by default.
However, you can also choose to mount it as a remote network drive, often described
as a network share. If you mount an rclone remote using the default, fixed drive mode
and experience unexpected program errors, freezes or other issues, consider mounting
as a network drive instead.
as a network share. If you mount an rclone remote using the default, fixed drive
mode and experience unexpected program errors, freezes or other issues, consider
mounting as a network drive instead.
When mounting as a fixed disk drive you can either mount to an unused drive letter,
or to a path representing a **nonexistent** subdirectory of an **existing** parent
directory or drive. Using the special value `*` will tell rclone to
automatically assign the next available drive letter, starting with Z: and moving backward.
Examples:
automatically assign the next available drive letter, starting with Z: and moving
backward. Examples:
rclone @ remote:path/to/files *
rclone @ remote:path/to/files X:
rclone @ remote:path/to/files C:\path\parent\mount
rclone @ remote:path/to/files X:
```sh
rclone @ remote:path/to/files *
rclone @ remote:path/to/files X:
rclone @ remote:path/to/files C:\path\parent\mount
rclone @ remote:path/to/files X:
```
Option `--volname` can be used to set a custom volume name for the mounted
file system. The default is to use the remote name and path.
@@ -103,24 +111,28 @@ to your @ command. Mounting to a directory path is not supported in
this mode, it is a limitation Windows imposes on junctions, so the remote must always
be mounted to a drive letter.
rclone @ remote:path/to/files X: --network-mode
```sh
rclone @ remote:path/to/files X: --network-mode
```
A volume name specified with `--volname` will be used to create the network share path.
A complete UNC path, such as `\\cloud\remote`, optionally with path
A volume name specified with `--volname` will be used to create the network share
path. A complete UNC path, such as `\\cloud\remote`, optionally with path
`\\cloud\remote\madeup\path`, will be used as is. Any other
string will be used as the share part, after a default prefix `\\server\`.
If no volume name is specified then `\\server\share` will be used.
You must make sure the volume name is unique when you are mounting more than one drive,
or else the mount command will fail. The share name will treated as the volume label for
the mapped drive, shown in Windows Explorer etc, while the complete
You must make sure the volume name is unique when you are mounting more than one
drive, or else the mount command will fail. The share name will treated as the
volume label for the mapped drive, shown in Windows Explorer etc, while the complete
`\\server\share` will be reported as the remote UNC path by
`net use` etc, just like a normal network drive mapping.
If you specify a full network share UNC path with `--volname`, this will implicitly
set the `--network-mode` option, so the following two examples have same result:
rclone @ remote:path/to/files X: --network-mode
rclone @ remote:path/to/files X: --volname \\server\share
```sh
rclone @ remote:path/to/files X: --network-mode
rclone @ remote:path/to/files X: --volname \\server\share
```
You may also specify the network share UNC path as the mountpoint itself. Then rclone
will automatically assign a drive letter, same as with `*` and use that as
@@ -128,15 +140,16 @@ mountpoint, and instead use the UNC path specified as the volume name, as if it
specified with the `--volname` option. This will also implicitly set
the `--network-mode` option. This means the following two examples have same result:
rclone @ remote:path/to/files \\cloud\remote
rclone @ remote:path/to/files * --volname \\cloud\remote
```sh
rclone @ remote:path/to/files \\cloud\remote
rclone @ remote:path/to/files * --volname \\cloud\remote
```
There is yet another way to enable network mode, and to set the share path,
and that is to pass the "native" libfuse/WinFsp option directly:
`--fuse-flag --VolumePrefix=\server\share`. Note that the path
must be with just a single backslash prefix in this case.
*Note:* In previous versions of rclone this was the only supported method.
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
@@ -149,11 +162,11 @@ The FUSE emulation layer on Windows must convert between the POSIX-based
permission model used in FUSE, and the permission model used in Windows,
based on access-control lists (ACL).
The mounted filesystem will normally get three entries in its access-control list (ACL),
representing permissions for the POSIX permission scopes: Owner, group and others.
By default, the owner and group will be taken from the current user, and the built-in
group "Everyone" will be used to represent others. The user/group can be customized
with FUSE options "UserName" and "GroupName",
The mounted filesystem will normally get three entries in its access-control list
(ACL), representing permissions for the POSIX permission scopes: Owner, group and
others. By default, the owner and group will be taken from the current user, and
the built-in group "Everyone" will be used to represent others. The user/group can
be customized with FUSE options "UserName" and "GroupName",
e.g. `-o UserName=user123 -o GroupName="Authenticated Users"`.
The permissions on each entry will be set according to [options](#options)
`--dir-perms` and `--file-perms`, which takes a value in traditional Unix
@@ -253,58 +266,63 @@ does not suffer from the same limitations.
### Mounting on macOS
Mounting on macOS can be done either via [built-in NFS server](/commands/rclone_serve_nfs/), [macFUSE](https://osxfuse.github.io/)
(also known as osxfuse) or [FUSE-T](https://www.fuse-t.org/). macFUSE is a traditional
FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system
which "mounts" via an NFSv4 local server.
Mounting on macOS can be done either via [built-in NFS server](/commands/rclone_serve_nfs/),
[macFUSE](https://osxfuse.github.io/) (also known as osxfuse) or
[FUSE-T](https://www.fuse-t.org/).macFUSE is a traditional FUSE driver utilizing
a macOS kernel extension (kext). FUSE-T is an alternative FUSE system which
"mounts" via an NFSv4 local server.
##### Unicode Normalization
#### Unicode Normalization
It is highly recommended to keep the default of `--no-unicode-normalization=false`
for all `mount` and `serve` commands on macOS. For details, see [vfs-case-sensitivity](https://rclone.org/commands/rclone_mount/#vfs-case-sensitivity).
#### NFS mount
This method spins up an NFS server using [serve nfs](/commands/rclone_serve_nfs/) command and mounts
it to the specified mountpoint. If you run this in background mode using |--daemon|, you will need to
send SIGTERM signal to the rclone process using |kill| command to stop the mount.
This method spins up an NFS server using [serve nfs](/commands/rclone_serve_nfs/)
command and mounts it to the specified mountpoint. If you run this in background
mode using |--daemon|, you will need to send SIGTERM signal to the rclone process
using |kill| command to stop the mount.
Note that `--nfs-cache-handle-limit` controls the maximum number of cached file handles stored by the `nfsmount` caching handler.
This should not be set too low or you may experience errors when trying to access files. The default is 1000000,
Note that `--nfs-cache-handle-limit` controls the maximum number of cached file
handles stored by the `nfsmount` caching handler. This should not be set too low
or you may experience errors when trying to access files. The default is 1000000,
but consider lowering this limit if the server's system resource usage causes problems.
#### macFUSE Notes
If installing macFUSE using [dmg packages](https://github.com/osxfuse/osxfuse/releases) from
the website, rclone will locate the macFUSE libraries without any further intervention.
If however, macFUSE is installed using the [macports](https://www.macports.org/) package manager,
the following addition steps are required.
If installing macFUSE using [dmg packages](https://github.com/osxfuse/osxfuse/releases)
from the website, rclone will locate the macFUSE libraries without any further intervention.
If however, macFUSE is installed using the [macports](https://www.macports.org/)
package manager, the following addition steps are required.
sudo mkdir /usr/local/lib
cd /usr/local/lib
sudo ln -s /opt/local/lib/libfuse.2.dylib
```sh
sudo mkdir /usr/local/lib
cd /usr/local/lib
sudo ln -s /opt/local/lib/libfuse.2.dylib
```
#### FUSE-T Limitations, Caveats, and Notes
There are some limitations, caveats, and notes about how it works. These are current as
of FUSE-T version 1.0.14.
There are some limitations, caveats, and notes about how it works. These are
current as of FUSE-T version 1.0.14.
##### ModTime update on read
As per the [FUSE-T wiki](https://github.com/macos-fuse-t/fuse-t/wiki#caveats):
> File access and modification times cannot be set separately as it seems to be an
> issue with the NFS client which always modifies both. Can be reproduced with
> File access and modification times cannot be set separately as it seems to be an
> issue with the NFS client which always modifies both. Can be reproduced with
> 'touch -m' and 'touch -a' commands
This means that viewing files with various tools, notably macOS Finder, will cause rlcone
to update the modification time of the file. This may make rclone upload a full new copy
of the file.
This means that viewing files with various tools, notably macOS Finder, will cause
rlcone to update the modification time of the file. This may make rclone upload a
full new copy of the file.
##### Read Only mounts
When mounting with `--read-only`, attempts to write to files will fail *silently* as
opposed to with a clear warning as in macFUSE.
When mounting with `--read-only`, attempts to write to files will fail *silently*
as opposed to with a clear warning as in macFUSE.
### Limitations
@@ -405,12 +423,14 @@ helper you should symlink rclone binary to `/sbin/mount.rclone` and optionally
rclone will detect it and translate command-line arguments appropriately.
Now you can run classic mounts like this:
```
```sh
mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/path/to/pem
```
or create systemd mount units:
```
```ini
# /etc/systemd/system/mnt-data.mount
[Unit]
Description=Mount for /mnt/data
@@ -422,7 +442,8 @@ Options=rw,_netdev,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone
```
optionally accompanied by systemd automount unit
```
```ini
# /etc/systemd/system/mnt-data.automount
[Unit]
Description=AutoMount for /mnt/data
@@ -434,7 +455,8 @@ WantedBy=multi-user.target
```
or add in `/etc/fstab` a line like
```
```sh
sftp1:subdir /mnt/data rclone rw,noauto,nofail,_netdev,x-systemd.automount,args2env,vfs_cache_mode=writes,config=/etc/rclone.conf,cache_dir=/var/cache/rclone 0 0
```

View File

@@ -65,14 +65,18 @@ This takes the following parameters:
Example:
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
` + "```sh" + `
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
` + "```" + `
The vfsOpt are as described in options/get and can be seen in the the
"vfs" section when running and the mountOpt can be seen in the "mount" section:
rclone rc options/get
` + "```sh" + `
rclone rc options/get
` + "```" + `
`,
})
}

View File

@@ -64,7 +64,7 @@ the backend supports it. If metadata syncing is required then use the
|--metadata| flag.
Note that the modification time and metadata for the root directory
will **not** be synced. See https://github.com/rclone/rclone/issues/7652
will **not** be synced. See <https://github.com/rclone/rclone/issues/7652>
for more info.
**Important**: Since this can cause data loss, test first with the

View File

@@ -35,18 +35,22 @@ like the [move](/commands/rclone_move/) command.
So
rclone moveto src dst
` + "```sh" + `
rclone moveto src dst
` + "```" + `
where src and dst are rclone paths, either remote:path or
/path/to/local or C:\windows\path\if\on\windows.
This will:
if src is file
move it to dst, overwriting an existing file if it exists
if src is directory
move it to dst, overwriting existing files if they exist
see move command for full details
` + "```text" + `
if src is file
move it to dst, overwriting an existing file if it exists
if src is directory
move it to dst, overwriting existing files if they exist
see move command for full details
` + "```" + `
This doesn't transfer files that are identical on src and dst, testing
by size and modification time or MD5SUM. src will be deleted on

View File

@@ -47,22 +47,26 @@ structure as it goes along.
You can interact with the user interface using key presses,
press '?' to toggle the help on and off. The supported keys are:
` + strings.Join(helpText()[1:], "\n ") + `
` + "```text" + `
` + strings.Join(helpText()[1:], "\n") + `
` + "```" + `
Listed files/directories may be prefixed by a one-character flag,
some of them combined with a description in brackets at end of line.
These flags have the following meaning:
e means this is an empty directory, i.e. contains no files (but
may contain empty subdirectories)
~ means this is a directory where some of the files (possibly in
subdirectories) have unknown size, and therefore the directory
size may be underestimated (and average size inaccurate, as it
is average of the files with known sizes).
. means an error occurred while reading a subdirectory, and
therefore the directory size may be underestimated (and average
size inaccurate)
! means an error occurred while reading this directory
` + "```text" + `
e means this is an empty directory, i.e. contains no files (but
may contain empty subdirectories)
~ means this is a directory where some of the files (possibly in
subdirectories) have unknown size, and therefore the directory
size may be underestimated (and average size inaccurate, as it
is average of the files with known sizes).
. means an error occurred while reading a subdirectory, and
therefore the directory size may be underestimated (and average
size inaccurate)
! means an error occurred while reading this directory
` + "```" + `
This an homage to the [ncdu tool](https://dev.yorhel.nl/ncdu) but for
rclone remotes. It is missing lots of features at the moment
@@ -73,8 +77,7 @@ UI won't respond in the meantime since the deletion is done synchronously.
For a non-interactive listing of the remote, see the
[tree](/commands/rclone_tree/) command. To just get the total size of
the remote you can also use the [size](/commands/rclone_size/) command.
`,
the remote you can also use the [size](/commands/rclone_size/) command.`,
Annotations: map[string]string{
"versionIntroduced": "v1.37",
"groups": "Filter,Listing",

View File

@@ -22,9 +22,8 @@ var commandDefinition = &cobra.Command{
Long: `In the rclone config file, human-readable passwords are
obscured. Obscuring them is done by encrypting them and writing them
out in base64. This is **not** a secure way of encrypting these
passwords as rclone can decrypt them - it is to prevent "eyedropping"
- namely someone seeing a password in the rclone config file by
accident.
passwords as rclone can decrypt them - it is to prevent "eyedropping" -
namely someone seeing a password in the rclone config file by accident.
Many equally important things (like access tokens) are not obscured in
the config file. However it is very hard to shoulder surf a 64
@@ -34,7 +33,9 @@ This command can also accept a password through STDIN instead of an
argument by passing a hyphen as an argument. This will use the first
line of STDIN as the password not including the trailing newline.
echo "secretpassword" | rclone obscure -
` + "```sh" + `
echo "secretpassword" | rclone obscure -
` + "```" + `
If there is no data on STDIN to read, rclone obscure will default to
obfuscating the hyphen itself.

View File

@@ -24,12 +24,12 @@ include/exclude filters - everything will be removed. Use the
delete files. To delete empty directories only, use command
[rmdir](/commands/rclone_rmdir/) or [rmdirs](/commands/rclone_rmdirs/).
The concurrency of this operation is controlled by the ` + "`--checkers`" + ` global flag. However, some backends will
implement this command directly, in which case ` + "`--checkers`" + ` will be ignored.
The concurrency of this operation is controlled by the ` + "`--checkers`" + ` global flag.
However, some backends will implement this command directly, in which
case ` + "`--checkers`" + ` will be ignored.
**Important**: Since this can cause data loss, test first with the
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
`,
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.`,
Annotations: map[string]string{
"groups": "Important",
},

View File

@@ -53,8 +53,8 @@ var commandDefinition = &cobra.Command{
Short: `Run a command against a running rclone.`,
Long: strings.ReplaceAll(`This runs a command against a running rclone. Use the |--url| flag to
specify an non default URL to connect on. This can be either a
":port" which is taken to mean "http://localhost:port" or a
"host:port" which is taken to mean "http://host:port"
":port" which is taken to mean <http://localhost:port> or a
"host:port" which is taken to mean <http://host:port>.
A username and password can be passed in with |--user| and |--pass|.
@@ -63,10 +63,12 @@ Note that |--rc-addr|, |--rc-user|, |--rc-pass| will be read also for
The |--unix-socket| flag can be used to connect over a unix socket like this
# start server on /tmp/my.socket
rclone rcd --rc-addr unix:///tmp/my.socket
# Connect to it
rclone rc --unix-socket /tmp/my.socket core/stats
|||sh
# start server on /tmp/my.socket
rclone rcd --rc-addr unix:///tmp/my.socket
# Connect to it
rclone rc --unix-socket /tmp/my.socket core/stats
|||
Arguments should be passed in as parameter=value.
@@ -81,29 +83,38 @@ options in the form |-o key=value| or |-o key|. It can be repeated as
many times as required. This is useful for rc commands which take the
"opt" parameter which by convention is a dictionary of strings.
-o key=value -o key2
|||text
-o key=value -o key2
|||
Will place this in the "opt" value
{"key":"value", "key2","")
|||json
{"key":"value", "key2","")
|||
The |-a|/|--arg| option can be used to set strings in the "arg" value. It
can be repeated as many times as required. This is useful for rc
commands which take the "arg" parameter which by convention is a list
of strings.
-a value -a value2
|||text
-a value -a value2
|||
Will place this in the "arg" value
["value", "value2"]
|||json
["value", "value2"]
|||
Use |--loopback| to connect to the rclone instance running |rclone rc|.
This is very useful for testing commands without having to run an
rclone rc server, e.g.:
rclone rc --loopback operations/about fs=/
|||sh
rclone rc --loopback operations/about fs=/
|||
Use |rclone rc| to see a list of all possible commands.`, "|", "`"),
Annotations: map[string]string{

View File

@@ -28,8 +28,10 @@ var commandDefinition = &cobra.Command{
Short: `Copies standard input to file on remote.`,
Long: `Reads from standard input (stdin) and copies it to a single remote file.
echo "hello world" | rclone rcat remote:path/to/file
ffmpeg - | rclone rcat remote:path/to/file
` + "```sh" + `
echo "hello world" | rclone rcat remote:path/to/file
ffmpeg - | rclone rcat remote:path/to/file
` + "```" + `
If the remote file already exists, it will be overwritten.

View File

@@ -3,6 +3,7 @@ package rcd
import (
"context"
"strings"
"github.com/rclone/rclone/cmd"
"github.com/rclone/rclone/fs"
@@ -31,7 +32,7 @@ the browser when rclone is run.
See the [rc documentation](/rc/) for more info on the rc flags.
` + libhttp.Help(rcflags.FlagPrefix) + libhttp.TemplateHelp(rcflags.FlagPrefix) + libhttp.AuthHelp(rcflags.FlagPrefix),
` + strings.TrimSpace(libhttp.Help(rcflags.FlagPrefix)+libhttp.TemplateHelp(rcflags.FlagPrefix)+libhttp.AuthHelp(rcflags.FlagPrefix)),
Annotations: map[string]string{
"versionIntroduced": "v1.45",
"groups": "RC",

View File

@@ -21,8 +21,7 @@ has any objects in it, not even empty subdirectories. Use
command [rmdirs](/commands/rclone_rmdirs/) (or [delete](/commands/rclone_delete/)
with option ` + "`--rmdirs`" + `) to do that.
To delete a path and any objects in it, use [purge](/commands/rclone_purge/) command.
`,
To delete a path and any objects in it, use [purge](/commands/rclone_purge/) command.`,
Annotations: map[string]string{
"groups": "Important",
},

View File

@@ -38,8 +38,7 @@ This will delete ` + "`--checkers`" + ` directories concurrently so
if you have thousands of empty directories consider increasing this number.
To delete a path and any objects in it, use the [purge](/commands/rclone_purge/)
command.
`,
command.`,
Annotations: map[string]string{
"versionIntroduced": "v1.35",
"groups": "Important",

View File

@@ -65,7 +65,7 @@ var cmdSelfUpdate = &cobra.Command{
Use: "selfupdate",
Aliases: []string{"self-update"},
Short: `Update the rclone binary.`,
Long: selfUpdateHelp,
Long: strings.TrimSpace(selfUpdateHelp),
Annotations: map[string]string{
"versionIntroduced": "v1.55",
},

View File

@@ -43,5 +43,5 @@ command will rename the old executable to 'rclone.old.exe' upon success.
Please note that this command was not available before rclone version 1.55.
If it fails for you with the message `unknown command "selfupdate"` then
you will need to update manually following the install instructions located
at https://rclone.org/install/
you will need to update manually following the
[install documentation](https://rclone.org/install/).

View File

@@ -123,7 +123,7 @@ default "rclone (hostname)".
Use ` + "`--log-trace` in conjunction with `-vv`" + ` to enable additional debug
logging of all UPNP traffic.
` + vfs.Help(),
` + strings.TrimSpace(vfs.Help()),
Annotations: map[string]string{
"versionIntroduced": "v1.46",
"groups": "Filter",

View File

@@ -59,7 +59,7 @@ func init() {
var Command = &cobra.Command{
Use: "docker",
Short: `Serve any remote on docker's volume plugin API.`,
Long: help() + vfs.Help(),
Long: help() + strings.TrimSpace(vfs.Help()),
Annotations: map[string]string{
"versionIntroduced": "v1.56",
"groups": "Filter",

View File

@@ -8,7 +8,8 @@ docker daemon and runs the corresponding code when necessary.
Docker plugins can run as a managed plugin under control of the docker daemon
or as an independent native service. For testing, you can just run it directly
from the command line, for example:
```
```sh
sudo rclone serve docker --base-dir /tmp/rclone-volumes --socket-addr localhost:8787 -vv
```

View File

@@ -76,7 +76,6 @@ func NewDriver(ctx context.Context, root string, mntOpt *mountlib.Options, vfsOp
// start mount monitoring
drv.hupChan = make(chan os.Signal, 1)
drv.monChan = make(chan bool, 1)
mountlib.NotifyOnSigHup(drv.hupChan)
go drv.monitor()
// unmount all volumes on exit

View File

@@ -1,6 +1,7 @@
package docker
import (
"context"
"fmt"
"strings"
@@ -112,6 +113,7 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
}
mntMap := configmap.Simple{}
vfsMap := configmap.Simple{}
globalMap := configmap.Simple{}
for key := range opt {
var ok bool
var err error
@@ -144,6 +146,13 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
ok = true
}
}
if !ok {
// try as a global option in globalMap
if fs.ConfigOptionsInfo.Get(underscoreKey) != nil {
globalMap[underscoreKey] = vol.Options[key]
ok = true
}
}
if !ok {
// try as a backend option in fsOpt (backends use "_" instead of "-")
@@ -172,6 +181,20 @@ func (vol *Volume) applyOptions(volOpt VolOpts) error {
return fmt.Errorf("cannot parse mount options: %w", err)
}
// Parse Global options
if len(globalMap) > 0 {
ctx := context.Background()
ci := fs.GetConfig(ctx)
err = configstruct.Set(globalMap, ci)
if err != nil {
return fmt.Errorf("cannot parse global options: %w", err)
}
err = ci.Reload(ctx)
if err != nil {
return fmt.Errorf("failed to reload global options: %w", err)
}
}
// build remote string from fsName, fsType, fsOpt, fsPath
colon := ":"
comma := ","

Some files were not shown because too many files have changed in this diff Show More