mirror of
https://github.com/rclone/rclone.git
synced 2025-12-06 00:03:32 +00:00
Compare commits
30 Commits
dependabot
...
v1.71.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1a98c36a73 | ||
|
|
931ab7c4db | ||
|
|
2b531ada34 | ||
|
|
45f45c987c | ||
|
|
d2351e60b6 | ||
|
|
f8de6b48f1 | ||
|
|
616a280aac | ||
|
|
e1833f4090 | ||
|
|
135d89d0f9 | ||
|
|
ea54bddbd5 | ||
|
|
baf6167930 | ||
|
|
1d91618d9e | ||
|
|
4a7e62b79c | ||
|
|
dcca477f39 | ||
|
|
7304ecaf18 | ||
|
|
c3932ecde1 | ||
|
|
c9df7b1cd7 | ||
|
|
3985496e5d | ||
|
|
0d2ef2eb20 | ||
|
|
836e19243d | ||
|
|
0a6cce1bc1 | ||
|
|
cffb6732a4 | ||
|
|
236f247c59 | ||
|
|
3b07f9d34d | ||
|
|
bad77c642f | ||
|
|
41eef6608b | ||
|
|
fc6bd9ff79 | ||
|
|
ee83cd214c | ||
|
|
2c2642a927 | ||
|
|
32eed8dd36 |
8200
MANUAL.html
generated
8200
MANUAL.html
generated
File diff suppressed because it is too large
Load Diff
2043
MANUAL.txt
generated
2043
MANUAL.txt
generated
File diff suppressed because it is too large
Load Diff
1
Makefile
1
Makefile
@@ -144,6 +144,7 @@ MANUAL.txt: MANUAL.md
|
||||
pandoc -s --from markdown-smart --to plain MANUAL.md -o MANUAL.txt
|
||||
|
||||
commanddocs: rclone
|
||||
go generate ./lib/transform
|
||||
-@rmdir -p '$$HOME/.config/rclone'
|
||||
XDG_CACHE_HOME="" XDG_CONFIG_HOME="" HOME="\$$HOME" USER="\$$USER" rclone gendocs --config=/notfound docs/content/
|
||||
@[ ! -e '$$HOME' ] || (echo 'Error: created unwanted directory named $$HOME' && exit 1)
|
||||
|
||||
@@ -2765,8 +2765,6 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
blockList blockblob.GetBlockListResponse
|
||||
properties *blob.GetPropertiesResponse
|
||||
options *blockblob.CommitBlockListOptions
|
||||
// Use temporary pacer as this can be called recursively which can cause a deadlock with --max-connections
|
||||
pacer = fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant)))
|
||||
)
|
||||
|
||||
properties, err = o.readMetaDataAlways(ctx)
|
||||
@@ -2778,7 +2776,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
|
||||
if objectExists {
|
||||
// Get the committed block list
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
blockList, err = blockBlobSVC.GetBlockList(ctx, blockblob.BlockListTypeAll, nil)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
@@ -2820,7 +2818,7 @@ func (o *Object) clearUncommittedBlocks(ctx context.Context) (err error) {
|
||||
|
||||
// Commit only the committed blocks
|
||||
fs.Debugf(o, "Committing %d blocks to remove uncommitted blocks", len(blockIDs))
|
||||
err = pacer.Call(func() (bool, error) {
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
_, err := blockBlobSVC.CommitBlockList(ctx, blockIDs, options)
|
||||
return o.fs.shouldRetry(ctx, err)
|
||||
})
|
||||
|
||||
@@ -125,10 +125,21 @@ type FolderItems struct {
|
||||
Offset int `json:"offset"`
|
||||
Limit int `json:"limit"`
|
||||
NextMarker *string `json:"next_marker,omitempty"`
|
||||
Order []struct {
|
||||
By string `json:"by"`
|
||||
Direction string `json:"direction"`
|
||||
} `json:"order"`
|
||||
// There is some confusion about how this is actually
|
||||
// returned. The []struct has worked for many years, but in
|
||||
// https://github.com/rclone/rclone/issues/8776 box was
|
||||
// returning it returned not as a list. We don't actually use
|
||||
// this so comment it out.
|
||||
//
|
||||
// Order struct {
|
||||
// By string `json:"by"`
|
||||
// Direction string `json:"direction"`
|
||||
// } `json:"order"`
|
||||
//
|
||||
// Order []struct {
|
||||
// By string `json:"by"`
|
||||
// Direction string `json:"direction"`
|
||||
// } `json:"order"`
|
||||
}
|
||||
|
||||
// Parent defined the ID of the parent directory
|
||||
|
||||
@@ -241,18 +241,22 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (outFs fs
|
||||
DirModTimeUpdatesOnWrite: true,
|
||||
PartialUploads: true,
|
||||
}).Fill(ctx, f)
|
||||
canMove := true
|
||||
canMove, slowHash := true, false
|
||||
for _, u := range f.upstreams {
|
||||
features = features.Mask(ctx, u.f) // Mask all upstream fs
|
||||
if !operations.CanServerSideMove(u.f) {
|
||||
canMove = false
|
||||
}
|
||||
slowHash = slowHash || u.f.Features().SlowHash
|
||||
}
|
||||
// We can move if all remotes support Move or Copy
|
||||
if canMove {
|
||||
features.Move = f.Move
|
||||
}
|
||||
|
||||
// If any of upstreams are SlowHash, propagate it
|
||||
features.SlowHash = slowHash
|
||||
|
||||
// Enable ListR when upstreams either support ListR or is local
|
||||
// But not when all upstreams are local
|
||||
if features.ListR == nil {
|
||||
|
||||
@@ -590,7 +590,7 @@ func (f *Fs) PublicLink(ctx context.Context, remote string, expire fs.Duration,
|
||||
return "", err
|
||||
}
|
||||
bucket, bucketPath := f.split(remote)
|
||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, quotePath(bucketPath)), nil
|
||||
return path.Join(f.opt.FrontEndpoint, "/download/", bucket, rest.URLPathEscapeAll(bucketPath)), nil
|
||||
}
|
||||
|
||||
// Copy src to this remote using server-side copy operations.
|
||||
@@ -622,7 +622,7 @@ func (f *Fs) Copy(ctx context.Context, src fs.Object, remote string) (_ fs.Objec
|
||||
"x-archive-auto-make-bucket": "1",
|
||||
"x-archive-queue-derive": "0",
|
||||
"x-archive-keep-old-version": "0",
|
||||
"x-amz-copy-source": quotePath(path.Join("/", srcBucket, srcPath)),
|
||||
"x-amz-copy-source": rest.URLPathEscapeAll(path.Join("/", srcBucket, srcPath)),
|
||||
"x-amz-metadata-directive": "COPY",
|
||||
"x-archive-filemeta-sha1": srcObj.sha1,
|
||||
"x-archive-filemeta-md5": srcObj.md5,
|
||||
@@ -778,7 +778,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
||||
// make a GET request to (frontend)/download/:item/:path
|
||||
opts := rest.Opts{
|
||||
Method: "GET",
|
||||
Path: path.Join("/download/", o.fs.root, quotePath(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
||||
Path: path.Join("/download/", o.fs.root, rest.URLPathEscapeAll(o.fs.opt.Enc.FromStandardPath(o.remote))),
|
||||
Options: optionsFixed,
|
||||
}
|
||||
err = o.fs.pacer.Call(func() (bool, error) {
|
||||
@@ -1334,16 +1334,6 @@ func trimPathPrefix(s, prefix string, enc encoder.MultiEncoder) string {
|
||||
return enc.ToStandardPath(strings.TrimPrefix(s, prefix+"/"))
|
||||
}
|
||||
|
||||
// mimics urllib.parse.quote() on Python; exclude / from url.PathEscape
|
||||
func quotePath(s string) string {
|
||||
seg := strings.Split(s, "/")
|
||||
newValues := []string{}
|
||||
for _, v := range seg {
|
||||
newValues = append(newValues, url.QueryEscape(v))
|
||||
}
|
||||
return strings.Join(newValues, "/")
|
||||
}
|
||||
|
||||
var (
|
||||
_ fs.Fs = &Fs{}
|
||||
_ fs.Copier = &Fs{}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
iofs "io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -841,7 +842,13 @@ func (f *Fs) Rmdir(ctx context.Context, dir string) error {
|
||||
} else if !fi.IsDir() {
|
||||
return fs.ErrorIsFile
|
||||
}
|
||||
return os.Remove(localPath)
|
||||
err := os.Remove(localPath)
|
||||
if runtime.GOOS == "windows" && errors.Is(err, iofs.ErrPermission) { // https://github.com/golang/go/issues/26295
|
||||
if os.Chmod(localPath, 0o600) == nil {
|
||||
err = os.Remove(localPath)
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Precision of the file system
|
||||
|
||||
40
backend/local/local_internal_windows_test.go
Normal file
40
backend/local/local_internal_windows_test.go
Normal file
@@ -0,0 +1,40 @@
|
||||
//go:build windows
|
||||
|
||||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"syscall"
|
||||
"testing"
|
||||
|
||||
"github.com/rclone/rclone/fs/operations"
|
||||
"github.com/rclone/rclone/fstest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestRmdirWindows tests that FILE_ATTRIBUTE_READONLY does not block Rmdir on windows.
|
||||
// Microsoft docs indicate that "This attribute is not honored on directories."
|
||||
// See https://learn.microsoft.com/en-us/windows/win32/fileio/file-attribute-constants#file_attribute_readonly
|
||||
// and https://github.com/golang/go/issues/26295
|
||||
func TestRmdirWindows(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skipf("windows only")
|
||||
}
|
||||
r := fstest.NewRun(t)
|
||||
defer r.Finalise()
|
||||
|
||||
err := operations.Mkdir(context.Background(), r.Flocal, "testdir")
|
||||
require.NoError(t, err)
|
||||
|
||||
ptr, err := syscall.UTF16PtrFromString(filepath.Join(r.Flocal.Root(), "testdir"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = syscall.SetFileAttributes(ptr, uint32(syscall.FILE_ATTRIBUTE_DIRECTORY+syscall.FILE_ATTRIBUTE_READONLY))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = operations.Rmdir(context.Background(), r.Flocal, "testdir")
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
@@ -51,47 +51,52 @@ output. The output is typically used, free, quota and trash contents.
|
||||
|
||||
E.g. Typical output from ` + "`rclone about remote:`" + ` is:
|
||||
|
||||
Total: 17 GiB
|
||||
Used: 7.444 GiB
|
||||
Free: 1.315 GiB
|
||||
Trashed: 100.000 MiB
|
||||
Other: 8.241 GiB
|
||||
` + "```text" + `
|
||||
Total: 17 GiB
|
||||
Used: 7.444 GiB
|
||||
Free: 1.315 GiB
|
||||
Trashed: 100.000 MiB
|
||||
Other: 8.241 GiB
|
||||
` + "```" + `
|
||||
|
||||
Where the fields are:
|
||||
|
||||
* Total: Total size available.
|
||||
* Used: Total size used.
|
||||
* Free: Total space available to this user.
|
||||
* Trashed: Total space used by trash.
|
||||
* Other: Total amount in other storage (e.g. Gmail, Google Photos).
|
||||
* Objects: Total number of objects in the storage.
|
||||
- Total: Total size available.
|
||||
- Used: Total size used.
|
||||
- Free: Total space available to this user.
|
||||
- Trashed: Total space used by trash.
|
||||
- Other: Total amount in other storage (e.g. Gmail, Google Photos).
|
||||
- Objects: Total number of objects in the storage.
|
||||
|
||||
All sizes are in number of bytes.
|
||||
|
||||
Applying a ` + "`--full`" + ` flag to the command prints the bytes in full, e.g.
|
||||
|
||||
Total: 18253611008
|
||||
Used: 7993453766
|
||||
Free: 1411001220
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
` + "```text" + `
|
||||
Total: 18253611008
|
||||
Used: 7993453766
|
||||
Free: 1411001220
|
||||
Trashed: 104857602
|
||||
Other: 8849156022
|
||||
` + "```" + `
|
||||
|
||||
A ` + "`--json`" + ` flag generates conveniently machine-readable output, e.g.
|
||||
|
||||
{
|
||||
"total": 18253611008,
|
||||
"used": 7993453766,
|
||||
"trashed": 104857602,
|
||||
"other": 8849156022,
|
||||
"free": 1411001220
|
||||
}
|
||||
` + "```json" + `
|
||||
{
|
||||
"total": 18253611008,
|
||||
"used": 7993453766,
|
||||
"trashed": 104857602,
|
||||
"other": 8849156022,
|
||||
"free": 1411001220
|
||||
}
|
||||
` + "```" + `
|
||||
|
||||
Not all backends print all fields. Information is not included if it is not
|
||||
provided by a backend. Where the value is unlimited it is omitted.
|
||||
|
||||
Some backends does not support the ` + "`rclone about`" + ` command at all,
|
||||
see complete list in [documentation](https://rclone.org/overview/#optional-features).
|
||||
`,
|
||||
see complete list in [documentation](https://rclone.org/overview/#optional-features).`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
// "groups": "",
|
||||
|
||||
@@ -30,14 +30,16 @@ rclone from a machine with a browser - use as instructed by
|
||||
rclone config.
|
||||
|
||||
The command requires 1-3 arguments:
|
||||
- fs name (e.g., "drive", "s3", etc.)
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
- fs name (e.g., "drive", "s3", etc.)
|
||||
- Either a base64 encoded JSON blob obtained from a previous rclone config session
|
||||
- Or a client_id and client_secret pair obtained from the remote service
|
||||
|
||||
Use --auth-no-open-browser to prevent rclone to open auth
|
||||
link in default browser automatically.
|
||||
|
||||
Use --template to generate HTML output via a custom Go template. If a blank string is provided as an argument to this flag, the default template is used.`,
|
||||
Use --template to generate HTML output via a custom Go template. If a blank
|
||||
string is provided as an argument to this flag, the default template is used.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
},
|
||||
|
||||
@@ -37,26 +37,33 @@ see the backend docs for definitions.
|
||||
|
||||
You can discover what commands a backend implements by using
|
||||
|
||||
rclone backend help remote:
|
||||
rclone backend help <backendname>
|
||||
` + "```sh" + `
|
||||
rclone backend help remote:
|
||||
rclone backend help <backendname>
|
||||
` + "```" + `
|
||||
|
||||
You can also discover information about the backend using (see
|
||||
[operations/fsinfo](/rc/#operations-fsinfo) in the remote control docs
|
||||
for more info).
|
||||
|
||||
rclone backend features remote:
|
||||
` + "```sh" + `
|
||||
rclone backend features remote:
|
||||
` + "```" + `
|
||||
|
||||
Pass options to the backend command with -o. This should be key=value or key, e.g.:
|
||||
|
||||
rclone backend stats remote:path stats -o format=json -o long
|
||||
` + "```sh" + `
|
||||
rclone backend stats remote:path stats -o format=json -o long
|
||||
` + "```" + `
|
||||
|
||||
Pass arguments to the backend by placing them on the end of the line
|
||||
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
` + "```sh" + `
|
||||
rclone backend cleanup remote:path file1 file2 file3
|
||||
` + "```" + `
|
||||
|
||||
Note to run these commands on a running backend then see
|
||||
[backend/command](/rc/#backend-command) in the rc docs.
|
||||
`,
|
||||
[backend/command](/rc/#backend-command) in the rc docs.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.52",
|
||||
"groups": "Important",
|
||||
|
||||
@@ -51,14 +51,15 @@ var longHelp = shortHelp + makeHelp(`
|
||||
bidirectional cloud sync solution in rclone.
|
||||
It retains the Path1 and Path2 filesystem listings from the prior run.
|
||||
On each successive run it will:
|
||||
|
||||
- list files on Path1 and Path2, and check for changes on each side.
|
||||
Changes include |New|, |Newer|, |Older|, and |Deleted| files.
|
||||
- Propagate changes on Path1 to Path2, and vice-versa.
|
||||
|
||||
Bisync is considered an **advanced command**, so use with care.
|
||||
Make sure you have read and understood the entire [manual](https://rclone.org/bisync)
|
||||
(especially the [Limitations](https://rclone.org/bisync/#limitations) section) before using,
|
||||
or data loss can result. Questions can be asked in the [Rclone Forum](https://forum.rclone.org/).
|
||||
(especially the [Limitations](https://rclone.org/bisync/#limitations) section)
|
||||
before using, or data loss can result. Questions can be asked in the
|
||||
[Rclone Forum](https://forum.rclone.org/).
|
||||
|
||||
See [full bisync description](https://rclone.org/bisync/) for details.
|
||||
`)
|
||||
See [full bisync description](https://rclone.org/bisync/) for details.`)
|
||||
|
||||
@@ -434,7 +434,6 @@ func (b *bisyncRun) listDirsOnly(listingNum int) (*fileList, error) {
|
||||
}
|
||||
|
||||
fulllisting, err = b.loadListingNum(listingNum)
|
||||
|
||||
if err != nil {
|
||||
b.critical = true
|
||||
b.retryable = true
|
||||
@@ -610,6 +609,11 @@ func (b *bisyncRun) modifyListing(ctx context.Context, src fs.Fs, dst fs.Fs, res
|
||||
}
|
||||
}
|
||||
if srcNewName != "" { // if it was renamed and not deleted
|
||||
if new == nil { // should not happen. log error and debug info
|
||||
b.handleErr(b.renames, "internal error", fmt.Errorf("missing info for %q. Please report a bug at https://github.com/rclone/rclone/issues", srcNewName), true, true)
|
||||
fs.PrettyPrint(srcList, "srcList for debugging", fs.LogLevelNotice)
|
||||
continue
|
||||
}
|
||||
srcList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||
dstList.put(srcNewName, new.size, new.time, new.hash, new.id, new.flags)
|
||||
}
|
||||
|
||||
@@ -43,15 +43,21 @@ var commandDefinition = &cobra.Command{
|
||||
|
||||
You can use it like this to output a single file
|
||||
|
||||
rclone cat remote:path/to/file
|
||||
|||sh
|
||||
rclone cat remote:path/to/file
|
||||
|||
|
||||
|
||||
Or like this to output any file in dir or its subdirectories.
|
||||
|
||||
rclone cat remote:path/to/dir
|
||||
|||sh
|
||||
rclone cat remote:path/to/dir
|
||||
|||
|
||||
|
||||
Or like this to output any .txt files in dir or its subdirectories.
|
||||
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
|||sh
|
||||
rclone --include "*.txt" cat remote:path/to/dir
|
||||
|||
|
||||
|
||||
Use the |--head| flag to print characters only at the start, |--tail| for
|
||||
the end and |--offset| and |--count| to print a section in the middle.
|
||||
@@ -62,14 +68,17 @@ Use the |--separator| flag to print a separator value between files. Be sure to
|
||||
shell-escape special characters. For example, to print a newline between
|
||||
files, use:
|
||||
|
||||
* bash:
|
||||
- bash:
|
||||
|
||||
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|
||||
|||sh
|
||||
rclone --include "*.txt" --separator $'\n' cat remote:path/to/dir
|
||||
|||
|
||||
|
||||
* powershell:
|
||||
- powershell:
|
||||
|
||||
rclone --include "*.txt" --separator "|n" cat remote:path/to/dir
|
||||
`, "|", "`"),
|
||||
|||powershell
|
||||
rclone --include "*.txt" --separator "|n" cat remote:path/to/dir
|
||||
|||`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
"groups": "Filter,Listing",
|
||||
|
||||
@@ -74,8 +74,7 @@ you what happened to it. These are reminiscent of diff files.
|
||||
- |! path| means there was an error reading or hashing the source or dest.
|
||||
|
||||
The default number of parallel checks is 8. See the [--checkers](/docs/#checkers-int)
|
||||
option for more information.
|
||||
`, "|", "`")
|
||||
option for more information.`, "|", "`")
|
||||
|
||||
// GetCheckOpt gets the options corresponding to the check flags
|
||||
func GetCheckOpt(fsrc, fdst fs.Fs) (opt *operations.CheckOpt, close func(), err error) {
|
||||
|
||||
@@ -17,8 +17,7 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "cleanup remote:path",
|
||||
Short: `Clean up the remote if possible.`,
|
||||
Long: `Clean up the remote if possible. Empty the trash or delete old file
|
||||
versions. Not supported by all remotes.
|
||||
`,
|
||||
versions. Not supported by all remotes.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.31",
|
||||
"groups": "Important",
|
||||
|
||||
@@ -44,8 +44,7 @@ var configCommand = &cobra.Command{
|
||||
Short: `Enter an interactive configuration session.`,
|
||||
Long: `Enter an interactive configuration session where you can setup new
|
||||
remotes and manage existing ones. You may also set or remove a
|
||||
password to protect your configuration.
|
||||
`,
|
||||
password to protect your configuration.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
@@ -134,9 +133,7 @@ sensitive info with XXX.
|
||||
|
||||
This makes the config file suitable for posting online for support.
|
||||
|
||||
It should be double checked before posting as the redaction may not be perfect.
|
||||
|
||||
`,
|
||||
It should be double checked before posting as the redaction may not be perfect.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.64",
|
||||
},
|
||||
@@ -178,8 +175,8 @@ var configProvidersCommand = &cobra.Command{
|
||||
|
||||
var updateRemoteOpt config.UpdateRemoteOpt
|
||||
|
||||
var configPasswordHelp = strings.ReplaceAll(`
|
||||
Note that if the config process would normally ask a question the
|
||||
var configPasswordHelp = strings.ReplaceAll(
|
||||
`Note that if the config process would normally ask a question the
|
||||
default is taken (unless |--non-interactive| is used). Each time
|
||||
that happens rclone will print or DEBUG a message saying how to
|
||||
affect the value taken.
|
||||
@@ -205,29 +202,29 @@ it.
|
||||
|
||||
This will look something like (some irrelevant detail removed):
|
||||
|
||||
|||
|
||||
|||json
|
||||
{
|
||||
"State": "*oauth-islocal,teamdrive,,",
|
||||
"Option": {
|
||||
"Name": "config_is_local",
|
||||
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
|
||||
"Default": true,
|
||||
"Examples": [
|
||||
{
|
||||
"Value": "true",
|
||||
"Help": "Yes"
|
||||
},
|
||||
{
|
||||
"Value": "false",
|
||||
"Help": "No"
|
||||
}
|
||||
],
|
||||
"Required": false,
|
||||
"IsPassword": false,
|
||||
"Type": "bool",
|
||||
"Exclusive": true,
|
||||
},
|
||||
"Error": "",
|
||||
"State": "*oauth-islocal,teamdrive,,",
|
||||
"Option": {
|
||||
"Name": "config_is_local",
|
||||
"Help": "Use web browser to automatically authenticate rclone with remote?\n * Say Y if the machine running rclone has a web browser you can use\n * Say N if running rclone on a (remote) machine without web browser access\nIf not sure try Y. If Y failed, try N.\n",
|
||||
"Default": true,
|
||||
"Examples": [
|
||||
{
|
||||
"Value": "true",
|
||||
"Help": "Yes"
|
||||
},
|
||||
{
|
||||
"Value": "false",
|
||||
"Help": "No"
|
||||
}
|
||||
],
|
||||
"Required": false,
|
||||
"IsPassword": false,
|
||||
"Type": "bool",
|
||||
"Exclusive": true,
|
||||
},
|
||||
"Error": "",
|
||||
}
|
||||
|||
|
||||
|
||||
@@ -250,7 +247,9 @@ The keys of |Option| are used as follows:
|
||||
If |Error| is set then it should be shown to the user at the same
|
||||
time as the question.
|
||||
|
||||
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
|
||||
|||sh
|
||||
rclone config update name --continue --state "*oauth-islocal,teamdrive,," --result "true"
|
||||
|||
|
||||
|
||||
Note that when using |--continue| all passwords should be passed in
|
||||
the clear (not obscured). Any default config values should be passed
|
||||
@@ -264,8 +263,7 @@ not just the post config questions. Any parameters are used as
|
||||
defaults for questions as usual.
|
||||
|
||||
Note that |bin/config.py| in the rclone source implements this protocol
|
||||
as a readable demonstration.
|
||||
`, "|", "`")
|
||||
as a readable demonstration.`, "|", "`")
|
||||
var configCreateCommand = &cobra.Command{
|
||||
Use: "create name type [key value]*",
|
||||
Short: `Create a new remote with name, type and options.`,
|
||||
@@ -275,13 +273,18 @@ should be passed in pairs of |key| |value| or as |key=value|.
|
||||
For example, to make a swift remote of name myremote using auto config
|
||||
you would do:
|
||||
|
||||
rclone config create myremote swift env_auth true
|
||||
rclone config create myremote swift env_auth=true
|
||||
|||sh
|
||||
rclone config create myremote swift env_auth true
|
||||
rclone config create myremote swift env_auth=true
|
||||
|||
|
||||
|
||||
So for example if you wanted to configure a Google Drive remote but
|
||||
using remote authorization you would do this:
|
||||
|
||||
rclone config create mydrive drive config_is_local=false
|
||||
|||sh
|
||||
rclone config create mydrive drive config_is_local=false
|
||||
|||
|
||||
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
@@ -344,13 +347,18 @@ pairs of |key| |value| or as |key=value|.
|
||||
For example, to update the env_auth field of a remote of name myremote
|
||||
you would do:
|
||||
|
||||
rclone config update myremote env_auth true
|
||||
rclone config update myremote env_auth=true
|
||||
|||sh
|
||||
rclone config update myremote env_auth true
|
||||
rclone config update myremote env_auth=true
|
||||
|||
|
||||
|
||||
If the remote uses OAuth the token will be updated, if you don't
|
||||
require this add an extra parameter thus:
|
||||
|
||||
rclone config update myremote env_auth=true config_refresh_token=false
|
||||
|||sh
|
||||
rclone config update myremote env_auth=true config_refresh_token=false
|
||||
|||
|
||||
|
||||
`, "|", "`") + configPasswordHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
@@ -388,12 +396,13 @@ The |password| should be passed in in clear (unobscured).
|
||||
|
||||
For example, to set password of a remote of name myremote you would do:
|
||||
|
||||
rclone config password myremote fieldname mypassword
|
||||
rclone config password myremote fieldname=mypassword
|
||||
|||sh
|
||||
rclone config password myremote fieldname mypassword
|
||||
rclone config password myremote fieldname=mypassword
|
||||
|||
|
||||
|
||||
This command is obsolete now that "config update" and "config create"
|
||||
both support obscuring passwords directly.
|
||||
`, "|", "`"),
|
||||
both support obscuring passwords directly.`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
@@ -441,8 +450,7 @@ var configReconnectCommand = &cobra.Command{
|
||||
|
||||
To disconnect the remote use "rclone config disconnect".
|
||||
|
||||
This normally means going through the interactive oauth flow again.
|
||||
`,
|
||||
This normally means going through the interactive oauth flow again.`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
ctx := context.Background()
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
@@ -461,8 +469,7 @@ var configDisconnectCommand = &cobra.Command{
|
||||
|
||||
This normally means revoking the oauth token.
|
||||
|
||||
To reconnect use "rclone config reconnect".
|
||||
`,
|
||||
To reconnect use "rclone config reconnect".`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
@@ -490,8 +497,7 @@ var configUserInfoCommand = &cobra.Command{
|
||||
Use: "userinfo remote:",
|
||||
Short: `Prints info about logged in user of remote.`,
|
||||
Long: `This prints the details of the person logged in to the cloud storage
|
||||
system.
|
||||
`,
|
||||
system.`,
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(1, 1, command, args)
|
||||
f := cmd.NewFsSrc(args)
|
||||
@@ -534,8 +540,7 @@ var configEncryptionCommand = &cobra.Command{
|
||||
Use: "encryption",
|
||||
Short: `set, remove and check the encryption for the config file`,
|
||||
Long: `This command sets, clears and checks the encryption for the config file using
|
||||
the subcommands below.
|
||||
`,
|
||||
the subcommands below.`,
|
||||
}
|
||||
|
||||
var configEncryptionSetCommand = &cobra.Command{
|
||||
@@ -559,8 +564,7 @@ variable to distinguish which password you must supply.
|
||||
Alternatively you can remove the password first (with |rclone config
|
||||
encryption remove|), then set it again with this command which may be
|
||||
easier if you don't mind the unencrypted config file being on the disk
|
||||
briefly.
|
||||
`, "|", "`"),
|
||||
briefly.`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.LoadedData()
|
||||
@@ -580,8 +584,7 @@ If |--password-command| is in use, this will be called to supply the old config
|
||||
password.
|
||||
|
||||
If the config was not encrypted then no error will be returned and
|
||||
this command will do nothing.
|
||||
`, "|", "`"),
|
||||
this command will do nothing.`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.LoadedData()
|
||||
@@ -600,8 +603,7 @@ It will attempt to decrypt the config using the password you supply.
|
||||
If decryption fails it will return a non-zero exit code if using
|
||||
|--password-command|, otherwise it will prompt again for the password.
|
||||
|
||||
If the config file is not encrypted it will return a non zero exit code.
|
||||
`, "|", "`"),
|
||||
If the config file is not encrypted it will return a non zero exit code.`, "|", "`"),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
cmd.CheckArgs(0, 0, command, args)
|
||||
config.LoadedData()
|
||||
|
||||
@@ -31,18 +31,27 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "convmv dest:path --name-transform XXX",
|
||||
Short: `Convert file and directory names in place.`,
|
||||
// Warning¡ "¡" will be replaced by backticks below
|
||||
Long: strings.ReplaceAll(`
|
||||
convmv supports advanced path name transformations for converting and renaming files and directories by applying prefixes, suffixes, and other alterations.
|
||||
Long: strings.ReplaceAll(`convmv supports advanced path name transformations for converting and renaming
|
||||
files and directories by applying prefixes, suffixes, and other alterations.
|
||||
|
||||
`+transform.Help()+`Multiple transformations can be used in sequence, applied in the order they are specified on the command line.
|
||||
`+transform.Help()+`The regex command generally accepts Perl-style regular expressions, the exact
|
||||
syntax is defined in the [Go regular expression reference](https://golang.org/pkg/regexp/syntax/).
|
||||
The replacement string may contain capturing group variables, referencing
|
||||
capturing groups using the syntax ¡$name¡ or ¡${name}¡, where the name can
|
||||
refer to a named capturing group or it can simply be the index as a number.
|
||||
To insert a literal $, use $$.
|
||||
|
||||
Multiple transformations can be used in sequence, applied
|
||||
in the order they are specified on the command line.
|
||||
|
||||
The ¡--name-transform¡ flag is also available in ¡sync¡, ¡copy¡, and ¡move¡.
|
||||
|
||||
## Files vs Directories
|
||||
### Files vs Directories
|
||||
|
||||
By default ¡--name-transform¡ will only apply to file names. The means only the leaf file name will be transformed.
|
||||
However some of the transforms would be better applied to the whole path or just directories.
|
||||
To choose which which part of the file path is affected some tags can be added to the ¡--name-transform¡.
|
||||
By default ¡--name-transform¡ will only apply to file names. The means only the
|
||||
leaf file name will be transformed. However some of the transforms would be
|
||||
better applied to the whole path or just directories. To choose which which
|
||||
part of the file path is affected some tags can be added to the ¡--name-transform¡.
|
||||
|
||||
| Tag | Effect |
|
||||
|------|------|
|
||||
@@ -50,42 +59,58 @@ To choose which which part of the file path is affected some tags can be added t
|
||||
| ¡dir¡ | Only transform name of directories - these may appear anywhere in the path |
|
||||
| ¡all¡ | Transform the entire path for files and directories |
|
||||
|
||||
This is used by adding the tag into the transform name like this: ¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
|
||||
This is used by adding the tag into the transform name like this:
|
||||
¡--name-transform file,prefix=ABC¡ or ¡--name-transform dir,prefix=DEF¡.
|
||||
|
||||
For some conversions using all is more likely to be useful, for example ¡--name-transform all,nfc¡.
|
||||
For some conversions using all is more likely to be useful, for example
|
||||
¡--name-transform all,nfc¡.
|
||||
|
||||
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name. This will cause an error.
|
||||
Note that ¡--name-transform¡ may not add path separators ¡/¡ to the name.
|
||||
This will cause an error.
|
||||
|
||||
## Ordering and Conflicts
|
||||
### Ordering and Conflicts
|
||||
|
||||
* Transformations will be applied in the order specified by the user.
|
||||
* If the ¡file¡ tag is in use (the default) then only the leaf name of files will be transformed.
|
||||
* If the ¡dir¡ tag is in use then directories anywhere in the path will be transformed
|
||||
* If the ¡all¡ tag is in use then directories and files anywhere in the path will be transformed
|
||||
* Each transformation will be run one path segment at a time.
|
||||
* If a transformation adds a ¡/¡ or ends up with an empty path segment then that will be an error.
|
||||
* It is up to the user to put the transformations in a sensible order.
|
||||
* Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or ¡nfc¡ followed by ¡nfd¡, are possible.
|
||||
* Instead of enforcing mutual exclusivity, transformations are applied in sequence as specified by the
|
||||
user, allowing for intentional use cases (e.g., trimming one prefix before adding another).
|
||||
* Users should be aware that certain combinations may lead to unexpected results and should verify
|
||||
transformations using ¡--dry-run¡ before execution.
|
||||
- Transformations will be applied in the order specified by the user.
|
||||
- If the ¡file¡ tag is in use (the default) then only the leaf name of files
|
||||
will be transformed.
|
||||
- If the ¡dir¡ tag is in use then directories anywhere in the path will be
|
||||
transformed
|
||||
- If the ¡all¡ tag is in use then directories and files anywhere in the path
|
||||
will be transformed
|
||||
- Each transformation will be run one path segment at a time.
|
||||
- If a transformation adds a ¡/¡ or ends up with an empty path segment then
|
||||
that will be an error.
|
||||
- It is up to the user to put the transformations in a sensible order.
|
||||
- Conflicting transformations, such as ¡prefix¡ followed by ¡trimprefix¡ or
|
||||
¡nfc¡ followed by ¡nfd¡, are possible.
|
||||
- Instead of enforcing mutual exclusivity, transformations are applied in
|
||||
sequence as specified by the user, allowing for intentional use cases
|
||||
(e.g., trimming one prefix before adding another).
|
||||
- Users should be aware that certain combinations may lead to unexpected
|
||||
results and should verify transformations using ¡--dry-run¡ before execution.
|
||||
|
||||
## Race Conditions and Non-Deterministic Behavior
|
||||
### Race Conditions and Non-Deterministic Behavior
|
||||
|
||||
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where multiple source files map to the same destination name.
|
||||
This can lead to race conditions when performing concurrent transfers. It is up to the user to anticipate these.
|
||||
* If two files from the source are transformed into the same name at the destination, the final state may be non-deterministic.
|
||||
* Running rclone check after a sync using such transformations may erroneously report missing or differing files due to overwritten results.
|
||||
Some transformations, such as ¡replace=old:new¡, may introduce conflicts where
|
||||
multiple source files map to the same destination name. This can lead to race
|
||||
conditions when performing concurrent transfers. It is up to the user to
|
||||
anticipate these.
|
||||
|
||||
- If two files from the source are transformed into the same name at the
|
||||
destination, the final state may be non-deterministic.
|
||||
- Running rclone check after a sync using such transformations may erroneously
|
||||
report missing or differing files due to overwritten results.
|
||||
|
||||
To minimize risks, users should:
|
||||
* Carefully review transformations that may introduce conflicts.
|
||||
* Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind that it won't show the effect of non-deterministic transformations).
|
||||
* Avoid transformations that cause multiple distinct source files to map to the same destination name.
|
||||
* Consider disabling concurrency with ¡--transfers=1¡ if necessary.
|
||||
* Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every time they are used. Avoid these when using ¡bisync¡.
|
||||
|
||||
`, "¡", "`"),
|
||||
- Carefully review transformations that may introduce conflicts.
|
||||
- Use ¡--dry-run¡ to inspect changes before executing a sync (but keep in mind
|
||||
that it won't show the effect of non-deterministic transformations).
|
||||
- Avoid transformations that cause multiple distinct source files to map to the
|
||||
same destination name.
|
||||
- Consider disabling concurrency with ¡--transfers=1¡ if necessary.
|
||||
- Certain transformations (e.g. ¡prefix¡) will have a multiplying effect every
|
||||
time they are used. Avoid these when using ¡bisync¡.`, "¡", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.70",
|
||||
"groups": "Filter,Listing,Important,Copy",
|
||||
|
||||
@@ -50,22 +50,30 @@ go there.
|
||||
|
||||
For example
|
||||
|
||||
rclone copy source:sourcepath dest:destpath
|
||||
|||sh
|
||||
rclone copy source:sourcepath dest:destpath
|
||||
|||
|
||||
|
||||
Let's say there are two files in sourcepath
|
||||
|
||||
sourcepath/one.txt
|
||||
sourcepath/two.txt
|
||||
|||text
|
||||
sourcepath/one.txt
|
||||
sourcepath/two.txt
|
||||
|||
|
||||
|
||||
This copies them to
|
||||
|
||||
destpath/one.txt
|
||||
destpath/two.txt
|
||||
|||text
|
||||
destpath/one.txt
|
||||
destpath/two.txt
|
||||
|||
|
||||
|
||||
Not to
|
||||
|
||||
destpath/sourcepath/one.txt
|
||||
destpath/sourcepath/two.txt
|
||||
|||text
|
||||
destpath/sourcepath/one.txt
|
||||
destpath/sourcepath/two.txt
|
||||
|||
|
||||
|
||||
If you are familiar with |rsync|, rclone always works as if you had
|
||||
written a trailing |/| - meaning "copy the contents of this directory".
|
||||
@@ -81,20 +89,22 @@ For example, if you have many files in /path/to/src but only a few of
|
||||
them change every day, you can copy all the files which have changed
|
||||
recently very efficiently like this:
|
||||
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|
||||
|||sh
|
||||
rclone copy --max-age 24h --no-traverse /path/to/src remote:
|
||||
|||
|
||||
|
||||
Rclone will sync the modification times of files and directories if
|
||||
the backend supports it. If metadata syncing is required then use the
|
||||
|--metadata| flag.
|
||||
|
||||
Note that the modification time and metadata for the root directory
|
||||
will **not** be synced. See https://github.com/rclone/rclone/issues/7652
|
||||
will **not** be synced. See [issue #7652](https://github.com/rclone/rclone/issues/7652)
|
||||
for more info.
|
||||
|
||||
**Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics.
|
||||
|
||||
**Note**: Use the |--dry-run| or the |--interactive|/|-i| flag to test without copying anything.
|
||||
**Note**: Use the |--dry-run| or the |--interactive|/|-i| flag to test without
|
||||
copying anything.
|
||||
|
||||
`, "|", "`") + operationsflags.Help(),
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -35,26 +35,32 @@ name. If the source is a directory then it acts exactly like the
|
||||
|
||||
So
|
||||
|
||||
rclone copyto src dst
|
||||
` + "```sh" + `
|
||||
rclone copyto src dst
|
||||
` + "```" + `
|
||||
|
||||
where src and dst are rclone paths, either remote:path or
|
||||
/path/to/local or C:\windows\path\if\on\windows.
|
||||
where src and dst are rclone paths, either ` + "`remote:path`" + ` or
|
||||
` + "`/path/to/local`" + ` or ` + "`C:\\windows\\path\\if\\on\\windows`" + `.
|
||||
|
||||
This will:
|
||||
|
||||
if src is file
|
||||
copy it to dst, overwriting an existing file if it exists
|
||||
if src is directory
|
||||
copy it to dst, overwriting existing files if they exist
|
||||
see copy command for full details
|
||||
` + "```text" + `
|
||||
if src is file
|
||||
copy it to dst, overwriting an existing file if it exists
|
||||
if src is directory
|
||||
copy it to dst, overwriting existing files if they exist
|
||||
see copy command for full details
|
||||
` + "```" + `
|
||||
|
||||
This doesn't transfer files that are identical on src and dst, testing
|
||||
by size and modification time or MD5SUM. It doesn't delete files from
|
||||
the destination.
|
||||
|
||||
*If you are looking to copy just a byte range of a file, please see 'rclone cat --offset X --count Y'*
|
||||
*If you are looking to copy just a byte range of a file, please see
|
||||
` + "`rclone cat --offset X --count Y`" + `.*
|
||||
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view real-time transfer statistics
|
||||
**Note**: Use the ` + "`-P`" + `/` + "`--progress`" + ` flag to view
|
||||
real-time transfer statistics.
|
||||
|
||||
` + operationsflags.Help(),
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -48,7 +48,7 @@ set in HTTP headers, it will be used instead of the name from the URL.
|
||||
With |--print-filename| in addition, the resulting file name will be
|
||||
printed.
|
||||
|
||||
Setting |--no-clobber| will prevent overwriting file on the
|
||||
Setting |--no-clobber| will prevent overwriting file on the
|
||||
destination if there is one with the same name.
|
||||
|
||||
Setting |--stdout| or making the output file name |-|
|
||||
@@ -62,9 +62,7 @@ If you can't get |rclone copyurl| to work then here are some things you can try:
|
||||
- |--bind 0.0.0.0| rclone will use IPv6 if available - try disabling it
|
||||
- |--bind ::0| to disable IPv4
|
||||
- |--user agent curl| - some sites have whitelists for curl's user-agent - try that
|
||||
- Make sure the site works with |curl| directly
|
||||
|
||||
`, "|", "`"),
|
||||
- Make sure the site works with |curl| directly`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.43",
|
||||
"groups": "Important",
|
||||
|
||||
@@ -37,14 +37,18 @@ checksum of the file it has just encrypted.
|
||||
|
||||
Use it like this
|
||||
|
||||
rclone cryptcheck /path/to/files encryptedremote:path
|
||||
` + "```sh" + `
|
||||
rclone cryptcheck /path/to/files encryptedremote:path
|
||||
` + "```" + `
|
||||
|
||||
You can use it like this also, but that will involve downloading all
|
||||
the files in remote:path.
|
||||
the files in ` + "`remote:path`" + `.
|
||||
|
||||
rclone cryptcheck remote:path encryptedremote:path
|
||||
` + "```sh" + `
|
||||
rclone cryptcheck remote:path encryptedremote:path
|
||||
` + "```" + `
|
||||
|
||||
After it has run it will log the status of the encryptedremote:.
|
||||
After it has run it will log the status of the ` + "`encryptedremote:`" + `.
|
||||
` + check.FlagsHelp,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.36",
|
||||
|
||||
@@ -33,13 +33,13 @@ If you supply the ` + "`--reverse`" + ` flag, it will return encrypted file name
|
||||
|
||||
use it like this
|
||||
|
||||
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
|
||||
` + "```sh" + `
|
||||
rclone cryptdecode encryptedremote: encryptedfilename1 encryptedfilename2
|
||||
rclone cryptdecode --reverse encryptedremote: filename1 filename2
|
||||
` + "```" + `
|
||||
|
||||
rclone cryptdecode --reverse encryptedremote: filename1 filename2
|
||||
|
||||
Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + ` command.
|
||||
See the documentation on the [crypt](/crypt/) overlay for more info.
|
||||
`,
|
||||
Another way to accomplish this is by using the ` + "`rclone backend encode` (or `decode`)" + `
|
||||
command. See the documentation on the [crypt](/crypt/) overlay for more info.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
},
|
||||
|
||||
@@ -47,15 +47,15 @@ directories have been merged.
|
||||
|
||||
Next, if deduping by name, for every group of duplicate file names /
|
||||
hashes, it will delete all but one identical file it finds without
|
||||
confirmation. This means that for most duplicated files the ` +
|
||||
"`dedupe`" + ` command will not be interactive.
|
||||
confirmation. This means that for most duplicated files the
|
||||
` + "`dedupe`" + ` command will not be interactive.
|
||||
|
||||
` + "`dedupe`" + ` considers files to be identical if they have the
|
||||
same file path and the same hash. If the backend does not support hashes (e.g. crypt wrapping
|
||||
Google Drive) then they will never be found to be identical. If you
|
||||
use the ` + "`--size-only`" + ` flag then files will be considered
|
||||
identical if they have the same size (any hash will be ignored). This
|
||||
can be useful on crypt backends which do not support hashes.
|
||||
same file path and the same hash. If the backend does not support
|
||||
hashes (e.g. crypt wrapping Google Drive) then they will never be found
|
||||
to be identical. If you use the ` + "`--size-only`" + ` flag then files
|
||||
will be considered identical if they have the same size (any hash will be
|
||||
ignored). This can be useful on crypt backends which do not support hashes.
|
||||
|
||||
Next rclone will resolve the remaining duplicates. Exactly which
|
||||
action is taken depends on the dedupe mode. By default, rclone will
|
||||
@@ -68,71 +68,82 @@ Here is an example run.
|
||||
|
||||
Before - with duplicates
|
||||
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
6048320 2016-03-05 16:23:11.775000000 one.txt
|
||||
564374 2016-03-05 16:23:06.731000000 one.txt
|
||||
6048320 2016-03-05 16:18:26.092000000 one.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two.txt
|
||||
` + "```sh" + `
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
6048320 2016-03-05 16:23:11.775000000 one.txt
|
||||
564374 2016-03-05 16:23:06.731000000 one.txt
|
||||
6048320 2016-03-05 16:18:26.092000000 one.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two.txt
|
||||
` + "```" + `
|
||||
|
||||
Now the ` + "`dedupe`" + ` session
|
||||
|
||||
$ rclone dedupe drive:dupes
|
||||
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
||||
one.txt: Found 4 files with duplicate names
|
||||
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: 2 duplicates remain
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> k
|
||||
Enter the number of the file to keep> 1
|
||||
one.txt: Deleted 1 extra copies
|
||||
two.txt: Found 3 files with duplicate names
|
||||
two.txt: 3 duplicates remain
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> r
|
||||
two-1.txt: renamed from: two.txt
|
||||
two-2.txt: renamed from: two.txt
|
||||
two-3.txt: renamed from: two.txt
|
||||
` + "```sh" + `
|
||||
$ rclone dedupe drive:dupes
|
||||
2016/03/05 16:24:37 Google drive root 'dupes': Looking for duplicates using interactive mode.
|
||||
one.txt: Found 4 files with duplicate names
|
||||
one.txt: Deleting 2/3 identical duplicates (MD5 "1eedaa9fe86fd4b8632e2ac549403b36")
|
||||
one.txt: 2 duplicates remain
|
||||
1: 6048320 bytes, 2016-03-05 16:23:16.798000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
2: 564374 bytes, 2016-03-05 16:23:06.731000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> k
|
||||
Enter the number of the file to keep> 1
|
||||
one.txt: Deleted 1 extra copies
|
||||
two.txt: Found 3 files with duplicate names
|
||||
two.txt: 3 duplicates remain
|
||||
1: 564374 bytes, 2016-03-05 16:22:52.118000000, MD5 7594e7dc9fc28f727c42ee3e0749de81
|
||||
2: 6048320 bytes, 2016-03-05 16:22:46.185000000, MD5 1eedaa9fe86fd4b8632e2ac549403b36
|
||||
3: 1744073 bytes, 2016-03-05 16:22:38.104000000, MD5 851957f7fb6f0bc4ce76be966d336802
|
||||
s) Skip and do nothing
|
||||
k) Keep just one (choose which in next step)
|
||||
r) Rename all to be different (by changing file.jpg to file-1.jpg)
|
||||
s/k/r> r
|
||||
two-1.txt: renamed from: two.txt
|
||||
two-2.txt: renamed from: two.txt
|
||||
two-3.txt: renamed from: two.txt
|
||||
` + "```" + `
|
||||
|
||||
The result being
|
||||
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two-1.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two-2.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two-3.txt
|
||||
` + "```sh" + `
|
||||
$ rclone lsl drive:dupes
|
||||
6048320 2016-03-05 16:23:16.798000000 one.txt
|
||||
564374 2016-03-05 16:22:52.118000000 two-1.txt
|
||||
6048320 2016-03-05 16:22:46.185000000 two-2.txt
|
||||
1744073 2016-03-05 16:22:38.104000000 two-3.txt
|
||||
` + "```" + `
|
||||
|
||||
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag or by using an extra parameter with the same value
|
||||
Dedupe can be run non interactively using the ` + "`" + `--dedupe-mode` + "`" + ` flag
|
||||
or by using an extra parameter with the same value
|
||||
|
||||
* ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
|
||||
* ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
|
||||
* ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
|
||||
* ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
|
||||
* ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
|
||||
* ` + "`" + `--dedupe-mode largest` + "`" + ` - removes identical files then keeps the largest one.
|
||||
* ` + "`" + `--dedupe-mode smallest` + "`" + ` - removes identical files then keeps the smallest one.
|
||||
* ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
|
||||
* ` + "`" + `--dedupe-mode list` + "`" + ` - lists duplicate dirs and files only and changes nothing.
|
||||
- ` + "`" + `--dedupe-mode interactive` + "`" + ` - interactive as above.
|
||||
- ` + "`" + `--dedupe-mode skip` + "`" + ` - removes identical files then skips anything left.
|
||||
- ` + "`" + `--dedupe-mode first` + "`" + ` - removes identical files then keeps the first one.
|
||||
- ` + "`" + `--dedupe-mode newest` + "`" + ` - removes identical files then keeps the newest one.
|
||||
- ` + "`" + `--dedupe-mode oldest` + "`" + ` - removes identical files then keeps the oldest one.
|
||||
- ` + "`" + `--dedupe-mode largest` + "`" + ` - removes identical files then keeps the largest one.
|
||||
- ` + "`" + `--dedupe-mode smallest` + "`" + ` - removes identical files then keeps the smallest one.
|
||||
- ` + "`" + `--dedupe-mode rename` + "`" + ` - removes identical files then renames the rest to be different.
|
||||
- ` + "`" + `--dedupe-mode list` + "`" + ` - lists duplicate dirs and files only and changes nothing.
|
||||
|
||||
For example, to rename all the identically named photos in your Google Photos directory, do
|
||||
For example, to rename all the identically named photos in your Google Photos
|
||||
directory, do
|
||||
|
||||
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
||||
` + "```sh" + `
|
||||
rclone dedupe --dedupe-mode rename "drive:Google Photos"
|
||||
` + "```" + `
|
||||
|
||||
Or
|
||||
|
||||
rclone dedupe rename "drive:Google Photos"
|
||||
`,
|
||||
` + "```sh" + `
|
||||
rclone dedupe rename "drive:Google Photos"
|
||||
` + "```",
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
"groups": "Important",
|
||||
|
||||
@@ -32,26 +32,29 @@ obeys include/exclude filters so can be used to selectively delete files.
|
||||
alone. If you want to delete a directory and all of its contents use
|
||||
the [purge](/commands/rclone_purge/) command.
|
||||
|
||||
If you supply the |--rmdirs| flag, it will remove all empty directories along with it.
|
||||
You can also use the separate command [rmdir](/commands/rclone_rmdir/) or
|
||||
[rmdirs](/commands/rclone_rmdirs/) to delete empty directories only.
|
||||
If you supply the |--rmdirs| flag, it will remove all empty directories along
|
||||
with it. You can also use the separate command [rmdir](/commands/rclone_rmdir/)
|
||||
or [rmdirs](/commands/rclone_rmdirs/) to delete empty directories only.
|
||||
|
||||
For example, to delete all files bigger than 100 MiB, you may first want to
|
||||
check what would be deleted (use either):
|
||||
|
||||
rclone --min-size 100M lsl remote:path
|
||||
rclone --dry-run --min-size 100M delete remote:path
|
||||
|||sh
|
||||
rclone --min-size 100M lsl remote:path
|
||||
rclone --dry-run --min-size 100M delete remote:path
|
||||
|||
|
||||
|
||||
Then proceed with the actual delete:
|
||||
|
||||
rclone --min-size 100M delete remote:path
|
||||
|||sh
|
||||
rclone --min-size 100M delete remote:path
|
||||
|||
|
||||
|
||||
That reads "delete everything with a minimum size of 100 MiB", hence
|
||||
delete all files bigger than 100 MiB.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
|--dry-run| or the |--interactive|/|-i| flag.
|
||||
`, "|", "`"),
|
||||
|--dry-run| or the |--interactive|/|-i| flag.`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
"groups": "Important,Filter,Listing",
|
||||
|
||||
@@ -19,9 +19,8 @@ var commandDefinition = &cobra.Command{
|
||||
Use: "deletefile remote:path",
|
||||
Short: `Remove a single file from remote.`,
|
||||
Long: `Remove a single file from remote. Unlike ` + "`" + `delete` + "`" + ` it cannot be used to
|
||||
remove a directory and it doesn't obey include/exclude filters - if the specified file exists,
|
||||
it will always be removed.
|
||||
`,
|
||||
remove a directory and it doesn't obey include/exclude filters - if the
|
||||
specified file exists, it will always be removed.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.42",
|
||||
"groups": "Important",
|
||||
|
||||
@@ -14,8 +14,7 @@ var completionDefinition = &cobra.Command{
|
||||
Use: "completion [shell]",
|
||||
Short: `Output completion script for a given shell.`,
|
||||
Long: `Generates a shell completion script for rclone.
|
||||
Run with ` + "`--help`" + ` to list the supported shells.
|
||||
`,
|
||||
Run with ` + "`--help`" + ` to list the supported shells.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
|
||||
@@ -18,17 +18,21 @@ var bashCommandDefinition = &cobra.Command{
|
||||
Short: `Output bash completion script for rclone.`,
|
||||
Long: `Generates a bash shell autocompletion script for rclone.
|
||||
|
||||
By default, when run without any arguments,
|
||||
By default, when run without any arguments,
|
||||
|
||||
rclone completion bash
|
||||
` + "```sh" + `
|
||||
rclone completion bash
|
||||
` + "```" + `
|
||||
|
||||
the generated script will be written to
|
||||
|
||||
/etc/bash_completion.d/rclone
|
||||
` + "```sh" + `
|
||||
/etc/bash_completion.d/rclone
|
||||
` + "```" + `
|
||||
|
||||
and so rclone will probably need to be run as root, or with sudo.
|
||||
|
||||
If you supply a path to a file as the command line argument, then
|
||||
If you supply a path to a file as the command line argument, then
|
||||
the generated script will be written to that file, in which case
|
||||
you should not need root privileges.
|
||||
|
||||
@@ -39,11 +43,12 @@ can logout and login again to use the autocompletion script.
|
||||
|
||||
Alternatively, you can source the script directly
|
||||
|
||||
. /path/to/my_bash_completion_scripts/rclone
|
||||
` + "```sh" + `
|
||||
. /path/to/my_bash_completion_scripts/rclone
|
||||
` + "```" + `
|
||||
|
||||
and the autocompletion functionality will be added to your
|
||||
current shell.
|
||||
`,
|
||||
current shell.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/bash_completion.d/rclone"
|
||||
|
||||
@@ -21,18 +21,21 @@ var fishCommandDefinition = &cobra.Command{
|
||||
This writes to /etc/fish/completions/rclone.fish by default so will
|
||||
probably need to be run with sudo or as root, e.g.
|
||||
|
||||
sudo rclone completion fish
|
||||
` + "```sh" + `
|
||||
sudo rclone completion fish
|
||||
` + "```" + `
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
||||
. /etc/fish/completions/rclone.fish
|
||||
` + "```sh" + `
|
||||
. /etc/fish/completions/rclone.fish
|
||||
` + "```" + `
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
`,
|
||||
If output_file is "-", then the output will be written to stdout.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/etc/fish/completions/rclone.fish"
|
||||
|
||||
@@ -20,13 +20,14 @@ var powershellCommandDefinition = &cobra.Command{
|
||||
|
||||
To load completions in your current shell session:
|
||||
|
||||
rclone completion powershell | Out-String | Invoke-Expression
|
||||
` + "```sh" + `
|
||||
rclone completion powershell | Out-String | Invoke-Expression
|
||||
` + "```" + `
|
||||
|
||||
To load completions for every new session, add the output of the above command
|
||||
to your powershell profile.
|
||||
|
||||
If output_file is "-" or missing, then the output will be written to stdout.
|
||||
`,
|
||||
If output_file is "-" or missing, then the output will be written to stdout.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
if len(args) == 0 || (len(args) > 0 && args[0] == "-") {
|
||||
|
||||
@@ -21,18 +21,21 @@ var zshCommandDefinition = &cobra.Command{
|
||||
This writes to /usr/share/zsh/vendor-completions/_rclone by default so will
|
||||
probably need to be run with sudo or as root, e.g.
|
||||
|
||||
sudo rclone completion zsh
|
||||
` + "```sh" + `
|
||||
sudo rclone completion zsh
|
||||
` + "```" + `
|
||||
|
||||
Logout and login again to use the autocompletion scripts, or source
|
||||
them directly
|
||||
|
||||
autoload -U compinit && compinit
|
||||
` + "```sh" + `
|
||||
autoload -U compinit && compinit
|
||||
` + "```" + `
|
||||
|
||||
If you supply a command line argument the script will be written
|
||||
there.
|
||||
|
||||
If output_file is "-", then the output will be written to stdout.
|
||||
`,
|
||||
If output_file is "-", then the output will be written to stdout.`,
|
||||
Run: func(command *cobra.Command, args []string) {
|
||||
cmd.CheckArgs(0, 1, command, args)
|
||||
out := "/usr/share/zsh/vendor-completions/_rclone"
|
||||
|
||||
@@ -184,7 +184,12 @@ rclone.org website.`,
|
||||
return fmt.Errorf("internal error: failed to find cut points: startCut = %d, endCut = %d", startCut, endCut)
|
||||
}
|
||||
if endCut >= 0 {
|
||||
doc = doc[:endCut] + "### See Also" + doc[endCut+12:]
|
||||
doc = doc[:endCut] + `### See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->` + doc[endCut+12:] + `
|
||||
<!-- markdownlint-restore -->
|
||||
`
|
||||
}
|
||||
} else {
|
||||
var out strings.Builder
|
||||
@@ -196,7 +201,7 @@ rclone.org website.`,
|
||||
if group.Flags.HasFlags() {
|
||||
_, _ = fmt.Fprintf(&out, "#### %s Options\n\n", group.Name)
|
||||
_, _ = fmt.Fprintf(&out, "%s\n\n", group.Help)
|
||||
_, _ = out.WriteString("```\n")
|
||||
_, _ = out.WriteString("```text\n")
|
||||
_, _ = out.WriteString(group.Flags.FlagUsages())
|
||||
_, _ = out.WriteString("```\n\n")
|
||||
}
|
||||
@@ -204,7 +209,12 @@ rclone.org website.`,
|
||||
} else {
|
||||
_, _ = out.WriteString("See the [global flags page](/flags/) for global options not listed here.\n\n")
|
||||
}
|
||||
doc = doc[:startCut] + out.String() + "### See Also" + doc[endCut+12:]
|
||||
doc = doc[:startCut] + out.String() + `### See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->` + doc[endCut+12:] + `
|
||||
<!-- markdownlint-restore -->
|
||||
`
|
||||
}
|
||||
|
||||
// outdent all the titles by one
|
||||
|
||||
@@ -539,7 +539,7 @@ var command = &cobra.Command{
|
||||
Aliases: []string{uniqueCommandName},
|
||||
Use: subcommandName,
|
||||
Short: "Speaks with git-annex over stdin/stdout.",
|
||||
Long: gitannexHelp,
|
||||
Long: strings.TrimSpace(gitannexHelp),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.67.0",
|
||||
},
|
||||
|
||||
@@ -4,8 +4,7 @@ users.
|
||||
|
||||
[git-annex]: https://git-annex.branchable.com/
|
||||
|
||||
Installation on Linux
|
||||
---------------------
|
||||
### Installation on Linux
|
||||
|
||||
1. Skip this step if your version of git-annex is [10.20240430] or newer.
|
||||
Otherwise, you must create a symlink somewhere on your PATH with a particular
|
||||
|
||||
@@ -103,14 +103,17 @@ as a relative path).
|
||||
|
||||
Run without a hash to see the list of all supported hashes, e.g.
|
||||
|
||||
$ rclone hashsum
|
||||
` + hash.HelpString(4) + `
|
||||
` + "```sh" + `
|
||||
$ rclone hashsum
|
||||
` + hash.HelpString(0) + "```" + `
|
||||
|
||||
Then
|
||||
|
||||
$ rclone hashsum MD5 remote:path
|
||||
` + "```sh" + `
|
||||
rclone hashsum MD5 remote:path
|
||||
` + "```" + `
|
||||
|
||||
Note that hash names are case insensitive and values are output in lower case.
|
||||
`,
|
||||
Note that hash names are case insensitive and values are output in lower case.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
"groups": "Filter,Listing",
|
||||
|
||||
@@ -30,9 +30,7 @@ var Root = &cobra.Command{
|
||||
mounting them, listing them in lots of different ways.
|
||||
|
||||
See the home page (https://rclone.org/) for installation, usage,
|
||||
documentation, changelog and configuration walkthroughs.
|
||||
|
||||
`,
|
||||
documentation, changelog and configuration walkthroughs.`,
|
||||
PersistentPostRun: func(cmd *cobra.Command, args []string) {
|
||||
fs.Debugf("rclone", "Version %q finishing with parameters %q", fs.Version, os.Args)
|
||||
atexit.Run()
|
||||
|
||||
@@ -29,10 +29,12 @@ var commandDefinition = &cobra.Command{
|
||||
Short: `Generate public link to file/folder.`,
|
||||
Long: `Create, retrieve or remove a public link to the given file or folder.
|
||||
|
||||
rclone link remote:path/to/file
|
||||
rclone link remote:path/to/folder/
|
||||
rclone link --unlink remote:path/to/folder/
|
||||
rclone link --expire 1d remote:path/to/file
|
||||
` + "```sh" + `
|
||||
rclone link remote:path/to/file
|
||||
rclone link remote:path/to/folder/
|
||||
rclone link --unlink remote:path/to/folder/
|
||||
rclone link --expire 1d remote:path/to/file
|
||||
` + "```" + `
|
||||
|
||||
If you supply the --expire flag, it will set the expiration time
|
||||
otherwise it will use the default (100 years). **Note** not all
|
||||
@@ -45,9 +47,8 @@ don't will just ignore it.
|
||||
|
||||
If successful, the last line of the output will contain the
|
||||
link. Exact capabilities depend on the remote, but the link will
|
||||
always by default be created with the least constraints – e.g. no
|
||||
expiry, no password protection, accessible without account.
|
||||
`,
|
||||
always by default be created with the least constraints - e.g. no
|
||||
expiry, no password protection, accessible without account.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.41",
|
||||
},
|
||||
|
||||
@@ -114,8 +114,7 @@ func newLess(orderBy string) (less lessFn, err error) {
|
||||
var commandDefinition = &cobra.Command{
|
||||
Use: "listremotes [<filter>]",
|
||||
Short: `List all the remotes in the config file and defined in environment variables.`,
|
||||
Long: `
|
||||
Lists all the available remotes from the config file, or the remotes matching
|
||||
Long: `Lists all the available remotes from the config file, or the remotes matching
|
||||
an optional filter.
|
||||
|
||||
Prints the result in human-readable format by default, and as a simple list of
|
||||
@@ -126,8 +125,7 @@ the source (file or environment).
|
||||
|
||||
Result can be filtered by a filter argument which applies to all attributes,
|
||||
and/or filter flags specific for each attribute. The values must be specified
|
||||
according to regular rclone filtering pattern syntax.
|
||||
`,
|
||||
according to regular rclone filtering pattern syntax.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.34",
|
||||
},
|
||||
|
||||
14
cmd/ls/ls.go
14
cmd/ls/ls.go
@@ -21,13 +21,15 @@ var commandDefinition = &cobra.Command{
|
||||
Long: `Lists the objects in the source path to standard output in a human
|
||||
readable format with size and path. Recurses by default.
|
||||
|
||||
Eg
|
||||
E.g.
|
||||
|
||||
$ rclone ls swift:bucket
|
||||
60295 bevajer5jef
|
||||
90613 canole
|
||||
94467 diwogej7
|
||||
37600 fubuwic
|
||||
` + "```sh" + `
|
||||
$ rclone ls swift:bucket
|
||||
60295 bevajer5jef
|
||||
90613 canole
|
||||
94467 diwogej7
|
||||
37600 fubuwic
|
||||
` + "```" + `
|
||||
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -7,16 +7,15 @@ import (
|
||||
|
||||
// Help describes the common help for all the list commands
|
||||
// Warning! "|" will be replaced by backticks below
|
||||
var Help = strings.ReplaceAll(`
|
||||
Any of the filtering options can be applied to this command.
|
||||
var Help = strings.ReplaceAll(`Any of the filtering options can be applied to this command.
|
||||
|
||||
There are several related list commands
|
||||
|
||||
* |ls| to list size and path of objects only
|
||||
* |lsl| to list modification time, size and path of objects only
|
||||
* |lsd| to list directories only
|
||||
* |lsf| to list objects and directories in easy to parse format
|
||||
* |lsjson| to list objects and directories in JSON format
|
||||
- |ls| to list size and path of objects only
|
||||
- |lsl| to list modification time, size and path of objects only
|
||||
- |lsd| to list directories only
|
||||
- |lsf| to list objects and directories in easy to parse format
|
||||
- |lsjson| to list objects and directories in JSON format
|
||||
|
||||
|ls|,|lsl|,|lsd| are designed to be human-readable.
|
||||
|lsf| is designed to be human and machine-readable.
|
||||
@@ -24,9 +23,9 @@ There are several related list commands
|
||||
|
||||
Note that |ls| and |lsl| recurse by default - use |--max-depth 1| to stop the recursion.
|
||||
|
||||
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default - use |-R| to make them recurse.
|
||||
The other list commands |lsd|,|lsf|,|lsjson| do not recurse by default -
|
||||
use |-R| to make them recurse.
|
||||
|
||||
Listing a nonexistent directory will produce an error except for
|
||||
remotes which can't have empty directories (e.g. s3, swift, or gcs -
|
||||
the bucket-based remotes).
|
||||
`, "|", "`")
|
||||
the bucket-based remotes).`, "|", "`")
|
||||
|
||||
@@ -32,18 +32,22 @@ recurse by default. Use the ` + "`-R`" + ` flag to recurse.
|
||||
This command lists the total size of the directory (if known, -1 if
|
||||
not), the modification time (if known, the current time if not), the
|
||||
number of objects in the directory (if known, -1 if not) and the name
|
||||
of the directory, Eg
|
||||
of the directory, E.g.
|
||||
|
||||
$ rclone lsd swift:
|
||||
494000 2018-04-26 08:43:20 10000 10000files
|
||||
65 2018-04-26 08:43:20 1 1File
|
||||
` + "```sh" + `
|
||||
$ rclone lsd swift:
|
||||
494000 2018-04-26 08:43:20 10000 10000files
|
||||
65 2018-04-26 08:43:20 1 1File
|
||||
` + "```" + `
|
||||
|
||||
Or
|
||||
|
||||
$ rclone lsd drive:test
|
||||
-1 2016-10-17 17:41:53 -1 1000files
|
||||
-1 2017-01-03 14:40:54 -1 2500files
|
||||
-1 2017-07-08 14:39:28 -1 4000files
|
||||
` + "```sh" + `
|
||||
$ rclone lsd drive:test
|
||||
-1 2016-10-17 17:41:53 -1 1000files
|
||||
-1 2017-01-03 14:40:54 -1 2500files
|
||||
-1 2017-07-08 14:39:28 -1 4000files
|
||||
` + "```" + `
|
||||
|
||||
If you just want the directory names use ` + "`rclone lsf --dirs-only`" + `.
|
||||
|
||||
|
||||
131
cmd/lsf/lsf.go
131
cmd/lsf/lsf.go
@@ -52,41 +52,47 @@ standard output in a form which is easy to parse by scripts. By
|
||||
default this will just be the names of the objects and directories,
|
||||
one per line. The directories will have a / suffix.
|
||||
|
||||
Eg
|
||||
E.g.
|
||||
|
||||
$ rclone lsf swift:bucket
|
||||
bevajer5jef
|
||||
canole
|
||||
diwogej7
|
||||
ferejej3gux/
|
||||
fubuwic
|
||||
` + "```sh" + `
|
||||
$ rclone lsf swift:bucket
|
||||
bevajer5jef
|
||||
canole
|
||||
diwogej7
|
||||
ferejej3gux/
|
||||
fubuwic
|
||||
` + "```" + `
|
||||
|
||||
Use the ` + "`--format`" + ` option to control what gets listed. By default this
|
||||
is just the path, but you can use these parameters to control the
|
||||
output:
|
||||
|
||||
p - path
|
||||
s - size
|
||||
t - modification time
|
||||
h - hash
|
||||
i - ID of object
|
||||
o - Original ID of underlying object
|
||||
m - MimeType of object if known
|
||||
e - encrypted name
|
||||
T - tier of storage if known, e.g. "Hot" or "Cool"
|
||||
M - Metadata of object in JSON blob format, eg {"key":"value"}
|
||||
` + "```text" + `
|
||||
p - path
|
||||
s - size
|
||||
t - modification time
|
||||
h - hash
|
||||
i - ID of object
|
||||
o - Original ID of underlying object
|
||||
m - MimeType of object if known
|
||||
e - encrypted name
|
||||
T - tier of storage if known, e.g. "Hot" or "Cool"
|
||||
M - Metadata of object in JSON blob format, eg {"key":"value"}
|
||||
` + "```" + `
|
||||
|
||||
So if you wanted the path, size and modification time, you would use
|
||||
` + "`--format \"pst\"`, or maybe `--format \"tsp\"`" + ` to put the path last.
|
||||
|
||||
Eg
|
||||
E.g.
|
||||
|
||||
$ rclone lsf --format "tsp" swift:bucket
|
||||
2016-06-25 18:55:41;60295;bevajer5jef
|
||||
2016-06-25 18:55:43;90613;canole
|
||||
2016-06-25 18:55:43;94467;diwogej7
|
||||
2018-04-26 08:50:45;0;ferejej3gux/
|
||||
2016-06-25 18:55:40;37600;fubuwic
|
||||
` + "```sh" + `
|
||||
$ rclone lsf --format "tsp" swift:bucket
|
||||
2016-06-25 18:55:41;60295;bevajer5jef
|
||||
2016-06-25 18:55:43;90613;canole
|
||||
2016-06-25 18:55:43;94467;diwogej7
|
||||
2018-04-26 08:50:45;0;ferejej3gux/
|
||||
2016-06-25 18:55:40;37600;fubuwic
|
||||
` + "```" + `
|
||||
|
||||
If you specify "h" in the format you will get the MD5 hash by default,
|
||||
use the ` + "`--hash`" + ` flag to change which hash you want. Note that this
|
||||
@@ -97,16 +103,20 @@ type.
|
||||
|
||||
For example, to emulate the md5sum command you can use
|
||||
|
||||
rclone lsf -R --hash MD5 --format hp --separator " " --files-only .
|
||||
` + "```sh" + `
|
||||
rclone lsf -R --hash MD5 --format hp --separator " " --files-only .
|
||||
` + "```" + `
|
||||
|
||||
Eg
|
||||
E.g.
|
||||
|
||||
$ rclone lsf -R --hash MD5 --format hp --separator " " --files-only swift:bucket
|
||||
7908e352297f0f530b84a756f188baa3 bevajer5jef
|
||||
cd65ac234e6fea5925974a51cdd865cc canole
|
||||
03b5341b4f234b9d984d03ad076bae91 diwogej7
|
||||
8fd37c3810dd660778137ac3a66cc06d fubuwic
|
||||
99713e14a4c4ff553acaf1930fad985b gixacuh7ku
|
||||
` + "```sh" + `
|
||||
$ rclone lsf -R --hash MD5 --format hp --separator " " --files-only swift:bucket
|
||||
7908e352297f0f530b84a756f188baa3 bevajer5jef
|
||||
cd65ac234e6fea5925974a51cdd865cc canole
|
||||
03b5341b4f234b9d984d03ad076bae91 diwogej7
|
||||
8fd37c3810dd660778137ac3a66cc06d fubuwic
|
||||
99713e14a4c4ff553acaf1930fad985b gixacuh7ku
|
||||
` + "```" + `
|
||||
|
||||
(Though "rclone md5sum ." is an easier way of typing this.)
|
||||
|
||||
@@ -114,24 +124,28 @@ By default the separator is ";" this can be changed with the
|
||||
` + "`--separator`" + ` flag. Note that separators aren't escaped in the path so
|
||||
putting it last is a good strategy.
|
||||
|
||||
Eg
|
||||
E.g.
|
||||
|
||||
$ rclone lsf --separator "," --format "tshp" swift:bucket
|
||||
2016-06-25 18:55:41,60295,7908e352297f0f530b84a756f188baa3,bevajer5jef
|
||||
2016-06-25 18:55:43,90613,cd65ac234e6fea5925974a51cdd865cc,canole
|
||||
2016-06-25 18:55:43,94467,03b5341b4f234b9d984d03ad076bae91,diwogej7
|
||||
2018-04-26 08:52:53,0,,ferejej3gux/
|
||||
2016-06-25 18:55:40,37600,8fd37c3810dd660778137ac3a66cc06d,fubuwic
|
||||
` + "```sh" + `
|
||||
$ rclone lsf --separator "," --format "tshp" swift:bucket
|
||||
2016-06-25 18:55:41,60295,7908e352297f0f530b84a756f188baa3,bevajer5jef
|
||||
2016-06-25 18:55:43,90613,cd65ac234e6fea5925974a51cdd865cc,canole
|
||||
2016-06-25 18:55:43,94467,03b5341b4f234b9d984d03ad076bae91,diwogej7
|
||||
2018-04-26 08:52:53,0,,ferejej3gux/
|
||||
2016-06-25 18:55:40,37600,8fd37c3810dd660778137ac3a66cc06d,fubuwic
|
||||
` + "```" + `
|
||||
|
||||
You can output in CSV standard format. This will escape things in "
|
||||
if they contain ,
|
||||
if they contain,
|
||||
|
||||
Eg
|
||||
E.g.
|
||||
|
||||
$ rclone lsf --csv --files-only --format ps remote:path
|
||||
test.log,22355
|
||||
test.sh,449
|
||||
"this file contains a comma, in the file name.txt",6
|
||||
` + "```sh" + `
|
||||
$ rclone lsf --csv --files-only --format ps remote:path
|
||||
test.log,22355
|
||||
test.sh,449
|
||||
"this file contains a comma, in the file name.txt",6
|
||||
` + "```" + `
|
||||
|
||||
Note that the ` + "`--absolute`" + ` parameter is useful for making lists of files
|
||||
to pass to an rclone copy with the ` + "`--files-from-raw`" + ` flag.
|
||||
@@ -139,20 +153,25 @@ to pass to an rclone copy with the ` + "`--files-from-raw`" + ` flag.
|
||||
For example, to find all the files modified within one day and copy
|
||||
those only (without traversing the whole directory structure):
|
||||
|
||||
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
|
||||
rclone copy --files-from-raw new_files /path/to/local remote:path
|
||||
` + "```sh" + `
|
||||
rclone lsf --absolute --files-only --max-age 1d /path/to/local > new_files
|
||||
rclone copy --files-from-raw new_files /path/to/local remote:path
|
||||
` + "```" + `
|
||||
|
||||
The default time format is ` + "`'2006-01-02 15:04:05'`" + `.
|
||||
[Other formats](https://pkg.go.dev/time#pkg-constants) can be specified with the ` + "`--time-format`" + ` flag.
|
||||
Examples:
|
||||
[Other formats](https://pkg.go.dev/time#pkg-constants) can be specified with
|
||||
the ` + "`--time-format`" + ` flag. Examples:
|
||||
|
||||
rclone lsf remote:path --format pt --time-format 'Jan 2, 2006 at 3:04pm (MST)'
|
||||
rclone lsf remote:path --format pt --time-format '2006-01-02 15:04:05.000000000'
|
||||
rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z07:00'
|
||||
rclone lsf remote:path --format pt --time-format RFC3339
|
||||
rclone lsf remote:path --format pt --time-format DateOnly
|
||||
rclone lsf remote:path --format pt --time-format max
|
||||
` + "`--time-format max`" + ` will automatically truncate ` + "'`2006-01-02 15:04:05.000000000`'" + `
|
||||
` + "```sh" + `
|
||||
rclone lsf remote:path --format pt --time-format 'Jan 2, 2006 at 3:04pm (MST)'
|
||||
rclone lsf remote:path --format pt --time-format '2006-01-02 15:04:05.000000000'
|
||||
rclone lsf remote:path --format pt --time-format '2006-01-02T15:04:05.999999999Z07:00'
|
||||
rclone lsf remote:path --format pt --time-format RFC3339
|
||||
rclone lsf remote:path --format pt --time-format DateOnly
|
||||
rclone lsf remote:path --format pt --time-format max
|
||||
` + "```" + `
|
||||
|
||||
` + "`--time-format max`" + ` will automatically truncate ` + "`2006-01-02 15:04:05.000000000`" + `
|
||||
to the maximum precision supported by the remote.
|
||||
|
||||
` + lshelp.Help,
|
||||
|
||||
@@ -43,25 +43,27 @@ var commandDefinition = &cobra.Command{
|
||||
|
||||
The output is an array of Items, where each Item looks like this:
|
||||
|
||||
{
|
||||
"Hashes" : {
|
||||
"SHA-1" : "f572d396fae9206628714fb2ce00f72e94f2258f",
|
||||
"MD5" : "b1946ac92492d2347c6235b4d2611184",
|
||||
"DropboxHash" : "ecb65bb98f9d905b70458986c39fcbad7715e5f2fcc3b1f07767d7c83e2438cc"
|
||||
},
|
||||
"ID": "y2djkhiujf83u33",
|
||||
"OrigID": "UYOJVTUW00Q1RzTDA",
|
||||
"IsBucket" : false,
|
||||
"IsDir" : false,
|
||||
"MimeType" : "application/octet-stream",
|
||||
"ModTime" : "2017-05-31T16:15:57.034468261+01:00",
|
||||
"Name" : "file.txt",
|
||||
"Encrypted" : "v0qpsdq8anpci8n929v3uu9338",
|
||||
"EncryptedPath" : "kja9098349023498/v0qpsdq8anpci8n929v3uu9338",
|
||||
"Path" : "full/path/goes/here/file.txt",
|
||||
"Size" : 6,
|
||||
"Tier" : "hot",
|
||||
}
|
||||
` + "```json" + `
|
||||
{
|
||||
"Hashes" : {
|
||||
"SHA-1" : "f572d396fae9206628714fb2ce00f72e94f2258f",
|
||||
"MD5" : "b1946ac92492d2347c6235b4d2611184",
|
||||
"DropboxHash" : "ecb65bb98f9d905b70458986c39fcbad7715e5f2fcc3b1f07767d7c83e2438cc"
|
||||
},
|
||||
"ID": "y2djkhiujf83u33",
|
||||
"OrigID": "UYOJVTUW00Q1RzTDA",
|
||||
"IsBucket" : false,
|
||||
"IsDir" : false,
|
||||
"MimeType" : "application/octet-stream",
|
||||
"ModTime" : "2017-05-31T16:15:57.034468261+01:00",
|
||||
"Name" : "file.txt",
|
||||
"Encrypted" : "v0qpsdq8anpci8n929v3uu9338",
|
||||
"EncryptedPath" : "kja9098349023498/v0qpsdq8anpci8n929v3uu9338",
|
||||
"Path" : "full/path/goes/here/file.txt",
|
||||
"Size" : 6,
|
||||
"Tier" : "hot",
|
||||
}
|
||||
` + "```" + `
|
||||
|
||||
The exact set of properties included depends on the backend:
|
||||
|
||||
@@ -118,6 +120,7 @@ will be shown ("2017-05-31T16:15:57+01:00").
|
||||
The whole output can be processed as a JSON blob, or alternatively it
|
||||
can be processed line by line as each item is written on individual lines
|
||||
(except with ` + "`--stat`" + `).
|
||||
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.37",
|
||||
|
||||
@@ -21,13 +21,15 @@ var commandDefinition = &cobra.Command{
|
||||
Long: `Lists the objects in the source path to standard output in a human
|
||||
readable format with modification time, size and path. Recurses by default.
|
||||
|
||||
Eg
|
||||
E.g.
|
||||
|
||||
$ rclone lsl swift:bucket
|
||||
60295 2016-06-25 18:55:41.062626927 bevajer5jef
|
||||
90613 2016-06-25 18:55:43.302607074 canole
|
||||
94467 2016-06-25 18:55:43.046609333 diwogej7
|
||||
37600 2016-06-25 18:55:40.814629136 fubuwic
|
||||
` + "```sh" + `
|
||||
$ rclone lsl swift:bucket
|
||||
60295 2016-06-25 18:55:41.062626927 bevajer5jef
|
||||
90613 2016-06-25 18:55:43.302607074 canole
|
||||
94467 2016-06-25 18:55:43.046609333 diwogej7
|
||||
37600 2016-06-25 18:55:40.814629136 fubuwic
|
||||
` + "```" + `
|
||||
|
||||
` + lshelp.Help,
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -35,8 +35,7 @@ to running ` + "`rclone hashsum MD5 remote:path`" + `.
|
||||
This command can also hash data received on standard input (stdin),
|
||||
by not passing a remote:path, or by passing a hyphen as remote:path
|
||||
when there is data to read (if not, the hyphen will be treated literally,
|
||||
as a relative path).
|
||||
`,
|
||||
as a relative path).`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.02",
|
||||
"groups": "Filter,Listing",
|
||||
|
||||
@@ -273,7 +273,7 @@ func NewMountCommand(commandName string, hidden bool, mount MountFn) *cobra.Comm
|
||||
Use: commandName + " remote:path /path/to/mountpoint",
|
||||
Hidden: hidden,
|
||||
Short: `Mount the remote as file system on a mountpoint.`,
|
||||
Long: help(commandName) + vfs.Help(),
|
||||
Long: help(commandName) + strings.TrimSpace(vfs.Help()),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
"groups": "Filter",
|
||||
@@ -396,34 +396,14 @@ func (m *MountPoint) Wait() error {
|
||||
if err := m.Unmount(); err != nil {
|
||||
fs.Errorf(m.MountPoint, "Failed to unmount: %v", err)
|
||||
} else {
|
||||
fs.Errorf(m.MountPoint, "Unmounted rclone mount")
|
||||
fs.Logf(m.MountPoint, "Unmounted rclone mount")
|
||||
}
|
||||
})
|
||||
}
|
||||
fnHandle := atexit.Register(finalise)
|
||||
defer atexit.Unregister(fnHandle)
|
||||
|
||||
// Reload VFS cache on SIGHUP
|
||||
sigHup := make(chan os.Signal, 1)
|
||||
NotifyOnSigHup(sigHup)
|
||||
var err error
|
||||
|
||||
waiting := true
|
||||
for waiting {
|
||||
select {
|
||||
// umount triggered outside the app
|
||||
case err = <-m.ErrChan:
|
||||
waiting = false
|
||||
// user sent SIGHUP to clear the cache
|
||||
case <-sigHup:
|
||||
root, err := m.VFS.Root()
|
||||
if err != nil {
|
||||
fs.Errorf(m.VFS.Fs(), "Error reading root: %v", err)
|
||||
} else {
|
||||
root.ForgetAll()
|
||||
}
|
||||
}
|
||||
}
|
||||
err := <-m.ErrChan
|
||||
|
||||
finalise()
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
Rclone @ allows Linux, FreeBSD, macOS and Windows to
|
||||
mount any of Rclone's cloud storage systems as a file system with FUSE.
|
||||
|
||||
First set up your remote using `rclone config`. Check it works with `rclone ls` etc.
|
||||
First set up your remote using `rclone config`. Check it works with `rclone ls` etc.
|
||||
|
||||
On Linux and macOS, you can run mount in either foreground or background (aka
|
||||
daemon) mode. Mount runs in foreground mode by default. Use the `--daemon` flag
|
||||
@@ -16,7 +16,9 @@ mount, waits until success or timeout and exits with appropriate code
|
||||
On Linux/macOS/FreeBSD start the mount like this, where `/path/to/local/mount`
|
||||
is an **empty** **existing** directory:
|
||||
|
||||
rclone @ remote:path/to/files /path/to/local/mount
|
||||
```sh
|
||||
rclone @ remote:path/to/files /path/to/local/mount
|
||||
```
|
||||
|
||||
On Windows you can start a mount in different ways. See [below](#mounting-modes-on-windows)
|
||||
for details. If foreground mount is used interactively from a console window,
|
||||
@@ -26,26 +28,30 @@ used to work with the mount until rclone is interrupted e.g. by pressing Ctrl-C.
|
||||
The following examples will mount to an automatically assigned drive,
|
||||
to specific drive letter `X:`, to path `C:\path\parent\mount`
|
||||
(where parent directory or drive must exist, and mount must **not** exist,
|
||||
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)), and
|
||||
the last example will mount as network share `\\cloud\remote` and map it to an
|
||||
and is not supported when [mounting as a network drive](#mounting-modes-on-windows)),
|
||||
and the last example will mount as network share `\\cloud\remote` and map it to an
|
||||
automatically assigned drive:
|
||||
|
||||
rclone @ remote:path/to/files *
|
||||
rclone @ remote:path/to/files X:
|
||||
rclone @ remote:path/to/files C:\path\parent\mount
|
||||
rclone @ remote:path/to/files \\cloud\remote
|
||||
```sh
|
||||
rclone @ remote:path/to/files *
|
||||
rclone @ remote:path/to/files X:
|
||||
rclone @ remote:path/to/files C:\path\parent\mount
|
||||
rclone @ remote:path/to/files \\cloud\remote
|
||||
```
|
||||
|
||||
When the program ends while in foreground mode, either via Ctrl+C or receiving
|
||||
a SIGINT or SIGTERM signal, the mount should be automatically stopped.
|
||||
|
||||
When running in background mode the user will have to stop the mount manually:
|
||||
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
umount /path/to/local/mount
|
||||
```sh
|
||||
# Linux
|
||||
fusermount -u /path/to/local/mount
|
||||
#... or on some systems
|
||||
fusermount3 -u /path/to/local/mount
|
||||
# OS X or Linux when using nfsmount
|
||||
umount /path/to/local/mount
|
||||
```
|
||||
|
||||
The umount operation can fail, for example when the mountpoint is busy.
|
||||
When that happens, it is the user's responsibility to stop the mount manually.
|
||||
@@ -80,20 +86,22 @@ thumbnails for image and video files on network drives.
|
||||
|
||||
In most cases, rclone will mount the remote as a normal, fixed disk drive by default.
|
||||
However, you can also choose to mount it as a remote network drive, often described
|
||||
as a network share. If you mount an rclone remote using the default, fixed drive mode
|
||||
and experience unexpected program errors, freezes or other issues, consider mounting
|
||||
as a network drive instead.
|
||||
as a network share. If you mount an rclone remote using the default, fixed drive
|
||||
mode and experience unexpected program errors, freezes or other issues, consider
|
||||
mounting as a network drive instead.
|
||||
|
||||
When mounting as a fixed disk drive you can either mount to an unused drive letter,
|
||||
or to a path representing a **nonexistent** subdirectory of an **existing** parent
|
||||
directory or drive. Using the special value `*` will tell rclone to
|
||||
automatically assign the next available drive letter, starting with Z: and moving backward.
|
||||
Examples:
|
||||
automatically assign the next available drive letter, starting with Z: and moving
|
||||
backward. Examples:
|
||||
|
||||
rclone @ remote:path/to/files *
|
||||
rclone @ remote:path/to/files X:
|
||||
rclone @ remote:path/to/files C:\path\parent\mount
|
||||
rclone @ remote:path/to/files X:
|
||||
```sh
|
||||
rclone @ remote:path/to/files *
|
||||
rclone @ remote:path/to/files X:
|
||||
rclone @ remote:path/to/files C:\path\parent\mount
|
||||
rclone @ remote:path/to/files X:
|
||||
```
|
||||
|
||||
Option `--volname` can be used to set a custom volume name for the mounted
|
||||
file system. The default is to use the remote name and path.
|
||||
@@ -103,24 +111,28 @@ to your @ command. Mounting to a directory path is not supported in
|
||||
this mode, it is a limitation Windows imposes on junctions, so the remote must always
|
||||
be mounted to a drive letter.
|
||||
|
||||
rclone @ remote:path/to/files X: --network-mode
|
||||
```sh
|
||||
rclone @ remote:path/to/files X: --network-mode
|
||||
```
|
||||
|
||||
A volume name specified with `--volname` will be used to create the network share path.
|
||||
A complete UNC path, such as `\\cloud\remote`, optionally with path
|
||||
A volume name specified with `--volname` will be used to create the network share
|
||||
path. A complete UNC path, such as `\\cloud\remote`, optionally with path
|
||||
`\\cloud\remote\madeup\path`, will be used as is. Any other
|
||||
string will be used as the share part, after a default prefix `\\server\`.
|
||||
If no volume name is specified then `\\server\share` will be used.
|
||||
You must make sure the volume name is unique when you are mounting more than one drive,
|
||||
or else the mount command will fail. The share name will treated as the volume label for
|
||||
the mapped drive, shown in Windows Explorer etc, while the complete
|
||||
You must make sure the volume name is unique when you are mounting more than one
|
||||
drive, or else the mount command will fail. The share name will treated as the
|
||||
volume label for the mapped drive, shown in Windows Explorer etc, while the complete
|
||||
`\\server\share` will be reported as the remote UNC path by
|
||||
`net use` etc, just like a normal network drive mapping.
|
||||
|
||||
If you specify a full network share UNC path with `--volname`, this will implicitly
|
||||
set the `--network-mode` option, so the following two examples have same result:
|
||||
|
||||
rclone @ remote:path/to/files X: --network-mode
|
||||
rclone @ remote:path/to/files X: --volname \\server\share
|
||||
```sh
|
||||
rclone @ remote:path/to/files X: --network-mode
|
||||
rclone @ remote:path/to/files X: --volname \\server\share
|
||||
```
|
||||
|
||||
You may also specify the network share UNC path as the mountpoint itself. Then rclone
|
||||
will automatically assign a drive letter, same as with `*` and use that as
|
||||
@@ -128,15 +140,16 @@ mountpoint, and instead use the UNC path specified as the volume name, as if it
|
||||
specified with the `--volname` option. This will also implicitly set
|
||||
the `--network-mode` option. This means the following two examples have same result:
|
||||
|
||||
rclone @ remote:path/to/files \\cloud\remote
|
||||
rclone @ remote:path/to/files * --volname \\cloud\remote
|
||||
```sh
|
||||
rclone @ remote:path/to/files \\cloud\remote
|
||||
rclone @ remote:path/to/files * --volname \\cloud\remote
|
||||
```
|
||||
|
||||
There is yet another way to enable network mode, and to set the share path,
|
||||
and that is to pass the "native" libfuse/WinFsp option directly:
|
||||
`--fuse-flag --VolumePrefix=\server\share`. Note that the path
|
||||
must be with just a single backslash prefix in this case.
|
||||
|
||||
|
||||
*Note:* In previous versions of rclone this was the only supported method.
|
||||
|
||||
[Read more about drive mapping](https://en.wikipedia.org/wiki/Drive_mapping)
|
||||
@@ -149,11 +162,11 @@ The FUSE emulation layer on Windows must convert between the POSIX-based
|
||||
permission model used in FUSE, and the permission model used in Windows,
|
||||
based on access-control lists (ACL).
|
||||
|
||||
The mounted filesystem will normally get three entries in its access-control list (ACL),
|
||||
representing permissions for the POSIX permission scopes: Owner, group and others.
|
||||
By default, the owner and group will be taken from the current user, and the built-in
|
||||
group "Everyone" will be used to represent others. The user/group can be customized
|
||||
with FUSE options "UserName" and "GroupName",
|
||||
The mounted filesystem will normally get three entries in its access-control list
|
||||
(ACL), representing permissions for the POSIX permission scopes: Owner, group and
|
||||
others. By default, the owner and group will be taken from the current user, and
|
||||
the built-in group "Everyone" will be used to represent others. The user/group can
|
||||
be customized with FUSE options "UserName" and "GroupName",
|
||||
e.g. `-o UserName=user123 -o GroupName="Authenticated Users"`.
|
||||
The permissions on each entry will be set according to [options](#options)
|
||||
`--dir-perms` and `--file-perms`, which takes a value in traditional Unix
|
||||
@@ -253,58 +266,63 @@ does not suffer from the same limitations.
|
||||
|
||||
### Mounting on macOS
|
||||
|
||||
Mounting on macOS can be done either via [built-in NFS server](/commands/rclone_serve_nfs/), [macFUSE](https://osxfuse.github.io/)
|
||||
(also known as osxfuse) or [FUSE-T](https://www.fuse-t.org/). macFUSE is a traditional
|
||||
FUSE driver utilizing a macOS kernel extension (kext). FUSE-T is an alternative FUSE system
|
||||
which "mounts" via an NFSv4 local server.
|
||||
Mounting on macOS can be done either via [built-in NFS server](/commands/rclone_serve_nfs/),
|
||||
[macFUSE](https://osxfuse.github.io/) (also known as osxfuse) or
|
||||
[FUSE-T](https://www.fuse-t.org/).macFUSE is a traditional FUSE driver utilizing
|
||||
a macOS kernel extension (kext). FUSE-T is an alternative FUSE system which
|
||||
"mounts" via an NFSv4 local server.
|
||||
|
||||
##### Unicode Normalization
|
||||
#### Unicode Normalization
|
||||
|
||||
It is highly recommended to keep the default of `--no-unicode-normalization=false`
|
||||
for all `mount` and `serve` commands on macOS. For details, see [vfs-case-sensitivity](https://rclone.org/commands/rclone_mount/#vfs-case-sensitivity).
|
||||
|
||||
#### NFS mount
|
||||
|
||||
This method spins up an NFS server using [serve nfs](/commands/rclone_serve_nfs/) command and mounts
|
||||
it to the specified mountpoint. If you run this in background mode using |--daemon|, you will need to
|
||||
send SIGTERM signal to the rclone process using |kill| command to stop the mount.
|
||||
This method spins up an NFS server using [serve nfs](/commands/rclone_serve_nfs/)
|
||||
command and mounts it to the specified mountpoint. If you run this in background
|
||||
mode using |--daemon|, you will need to send SIGTERM signal to the rclone process
|
||||
using |kill| command to stop the mount.
|
||||
|
||||
Note that `--nfs-cache-handle-limit` controls the maximum number of cached file handles stored by the `nfsmount` caching handler.
|
||||
This should not be set too low or you may experience errors when trying to access files. The default is 1000000,
|
||||
Note that `--nfs-cache-handle-limit` controls the maximum number of cached file
|
||||
handles stored by the `nfsmount` caching handler. This should not be set too low
|
||||
or you may experience errors when trying to access files. The default is 1000000,
|
||||
but consider lowering this limit if the server's system resource usage causes problems.
|
||||
|
||||
#### macFUSE Notes
|
||||
|
||||
If installing macFUSE using [dmg packages](https://github.com/osxfuse/osxfuse/releases) from
|
||||
the website, rclone will locate the macFUSE libraries without any further intervention.
|
||||
If however, macFUSE is installed using the [macports](https://www.macports.org/) package manager,
|
||||
the following addition steps are required.
|
||||
If installing macFUSE using [dmg packages](https://github.com/osxfuse/osxfuse/releases)
|
||||
from the website, rclone will locate the macFUSE libraries without any further intervention.
|
||||
If however, macFUSE is installed using the [macports](https://www.macports.org/)
|
||||
package manager, the following addition steps are required.
|
||||
|
||||
sudo mkdir /usr/local/lib
|
||||
cd /usr/local/lib
|
||||
sudo ln -s /opt/local/lib/libfuse.2.dylib
|
||||
```sh
|
||||
sudo mkdir /usr/local/lib
|
||||
cd /usr/local/lib
|
||||
sudo ln -s /opt/local/lib/libfuse.2.dylib
|
||||
```
|
||||
|
||||
#### FUSE-T Limitations, Caveats, and Notes
|
||||
|
||||
There are some limitations, caveats, and notes about how it works. These are current as
|
||||
of FUSE-T version 1.0.14.
|
||||
There are some limitations, caveats, and notes about how it works. These are
|
||||
current as of FUSE-T version 1.0.14.
|
||||
|
||||
##### ModTime update on read
|
||||
|
||||
As per the [FUSE-T wiki](https://github.com/macos-fuse-t/fuse-t/wiki#caveats):
|
||||
|
||||
> File access and modification times cannot be set separately as it seems to be an
|
||||
> issue with the NFS client which always modifies both. Can be reproduced with
|
||||
> File access and modification times cannot be set separately as it seems to be an
|
||||
> issue with the NFS client which always modifies both. Can be reproduced with
|
||||
> 'touch -m' and 'touch -a' commands
|
||||
|
||||
This means that viewing files with various tools, notably macOS Finder, will cause rlcone
|
||||
to update the modification time of the file. This may make rclone upload a full new copy
|
||||
of the file.
|
||||
|
||||
This means that viewing files with various tools, notably macOS Finder, will cause
|
||||
rlcone to update the modification time of the file. This may make rclone upload a
|
||||
full new copy of the file.
|
||||
|
||||
##### Read Only mounts
|
||||
|
||||
When mounting with `--read-only`, attempts to write to files will fail *silently* as
|
||||
opposed to with a clear warning as in macFUSE.
|
||||
When mounting with `--read-only`, attempts to write to files will fail *silently*
|
||||
as opposed to with a clear warning as in macFUSE.
|
||||
|
||||
### Limitations
|
||||
|
||||
@@ -405,12 +423,14 @@ helper you should symlink rclone binary to `/sbin/mount.rclone` and optionally
|
||||
rclone will detect it and translate command-line arguments appropriately.
|
||||
|
||||
Now you can run classic mounts like this:
|
||||
```
|
||||
|
||||
```sh
|
||||
mount sftp1:subdir /mnt/data -t rclone -o vfs_cache_mode=writes,sftp_key_file=/path/to/pem
|
||||
```
|
||||
|
||||
or create systemd mount units:
|
||||
```
|
||||
|
||||
```ini
|
||||
# /etc/systemd/system/mnt-data.mount
|
||||
[Unit]
|
||||
Description=Mount for /mnt/data
|
||||
@@ -422,7 +442,8 @@ Options=rw,_netdev,allow_other,args2env,vfs-cache-mode=writes,config=/etc/rclone
|
||||
```
|
||||
|
||||
optionally accompanied by systemd automount unit
|
||||
```
|
||||
|
||||
```ini
|
||||
# /etc/systemd/system/mnt-data.automount
|
||||
[Unit]
|
||||
Description=AutoMount for /mnt/data
|
||||
@@ -434,7 +455,8 @@ WantedBy=multi-user.target
|
||||
```
|
||||
|
||||
or add in `/etc/fstab` a line like
|
||||
```
|
||||
|
||||
```sh
|
||||
sftp1:subdir /mnt/data rclone rw,noauto,nofail,_netdev,x-systemd.automount,args2env,vfs_cache_mode=writes,config=/etc/rclone.conf,cache_dir=/var/cache/rclone 0 0
|
||||
```
|
||||
|
||||
|
||||
@@ -65,14 +65,18 @@ This takes the following parameters:
|
||||
|
||||
Example:
|
||||
|
||||
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint
|
||||
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount
|
||||
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
|
||||
` + "```sh" + `
|
||||
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint
|
||||
rclone rc mount/mount fs=mydrive: mountPoint=/home/<user>/mountPoint mountType=mount
|
||||
rclone rc mount/mount fs=TestDrive: mountPoint=/mnt/tmp vfsOpt='{"CacheMode": 2}' mountOpt='{"AllowOther": true}'
|
||||
` + "```" + `
|
||||
|
||||
The vfsOpt are as described in options/get and can be seen in the the
|
||||
"vfs" section when running and the mountOpt can be seen in the "mount" section:
|
||||
|
||||
rclone rc options/get
|
||||
` + "```sh" + `
|
||||
rclone rc options/get
|
||||
` + "```" + `
|
||||
`,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ the backend supports it. If metadata syncing is required then use the
|
||||
|--metadata| flag.
|
||||
|
||||
Note that the modification time and metadata for the root directory
|
||||
will **not** be synced. See https://github.com/rclone/rclone/issues/7652
|
||||
will **not** be synced. See <https://github.com/rclone/rclone/issues/7652>
|
||||
for more info.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
|
||||
@@ -35,18 +35,22 @@ like the [move](/commands/rclone_move/) command.
|
||||
|
||||
So
|
||||
|
||||
rclone moveto src dst
|
||||
` + "```sh" + `
|
||||
rclone moveto src dst
|
||||
` + "```" + `
|
||||
|
||||
where src and dst are rclone paths, either remote:path or
|
||||
/path/to/local or C:\windows\path\if\on\windows.
|
||||
|
||||
This will:
|
||||
|
||||
if src is file
|
||||
move it to dst, overwriting an existing file if it exists
|
||||
if src is directory
|
||||
move it to dst, overwriting existing files if they exist
|
||||
see move command for full details
|
||||
` + "```text" + `
|
||||
if src is file
|
||||
move it to dst, overwriting an existing file if it exists
|
||||
if src is directory
|
||||
move it to dst, overwriting existing files if they exist
|
||||
see move command for full details
|
||||
` + "```" + `
|
||||
|
||||
This doesn't transfer files that are identical on src and dst, testing
|
||||
by size and modification time or MD5SUM. src will be deleted on
|
||||
|
||||
@@ -47,22 +47,26 @@ structure as it goes along.
|
||||
You can interact with the user interface using key presses,
|
||||
press '?' to toggle the help on and off. The supported keys are:
|
||||
|
||||
` + strings.Join(helpText()[1:], "\n ") + `
|
||||
` + "```text" + `
|
||||
` + strings.Join(helpText()[1:], "\n") + `
|
||||
` + "```" + `
|
||||
|
||||
Listed files/directories may be prefixed by a one-character flag,
|
||||
some of them combined with a description in brackets at end of line.
|
||||
These flags have the following meaning:
|
||||
|
||||
e means this is an empty directory, i.e. contains no files (but
|
||||
may contain empty subdirectories)
|
||||
~ means this is a directory where some of the files (possibly in
|
||||
subdirectories) have unknown size, and therefore the directory
|
||||
size may be underestimated (and average size inaccurate, as it
|
||||
is average of the files with known sizes).
|
||||
. means an error occurred while reading a subdirectory, and
|
||||
therefore the directory size may be underestimated (and average
|
||||
size inaccurate)
|
||||
! means an error occurred while reading this directory
|
||||
` + "```text" + `
|
||||
e means this is an empty directory, i.e. contains no files (but
|
||||
may contain empty subdirectories)
|
||||
~ means this is a directory where some of the files (possibly in
|
||||
subdirectories) have unknown size, and therefore the directory
|
||||
size may be underestimated (and average size inaccurate, as it
|
||||
is average of the files with known sizes).
|
||||
. means an error occurred while reading a subdirectory, and
|
||||
therefore the directory size may be underestimated (and average
|
||||
size inaccurate)
|
||||
! means an error occurred while reading this directory
|
||||
` + "```" + `
|
||||
|
||||
This an homage to the [ncdu tool](https://dev.yorhel.nl/ncdu) but for
|
||||
rclone remotes. It is missing lots of features at the moment
|
||||
@@ -73,8 +77,7 @@ UI won't respond in the meantime since the deletion is done synchronously.
|
||||
|
||||
For a non-interactive listing of the remote, see the
|
||||
[tree](/commands/rclone_tree/) command. To just get the total size of
|
||||
the remote you can also use the [size](/commands/rclone_size/) command.
|
||||
`,
|
||||
the remote you can also use the [size](/commands/rclone_size/) command.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.37",
|
||||
"groups": "Filter,Listing",
|
||||
|
||||
@@ -22,9 +22,8 @@ var commandDefinition = &cobra.Command{
|
||||
Long: `In the rclone config file, human-readable passwords are
|
||||
obscured. Obscuring them is done by encrypting them and writing them
|
||||
out in base64. This is **not** a secure way of encrypting these
|
||||
passwords as rclone can decrypt them - it is to prevent "eyedropping"
|
||||
- namely someone seeing a password in the rclone config file by
|
||||
accident.
|
||||
passwords as rclone can decrypt them - it is to prevent "eyedropping" -
|
||||
namely someone seeing a password in the rclone config file by accident.
|
||||
|
||||
Many equally important things (like access tokens) are not obscured in
|
||||
the config file. However it is very hard to shoulder surf a 64
|
||||
@@ -34,7 +33,9 @@ This command can also accept a password through STDIN instead of an
|
||||
argument by passing a hyphen as an argument. This will use the first
|
||||
line of STDIN as the password not including the trailing newline.
|
||||
|
||||
echo "secretpassword" | rclone obscure -
|
||||
` + "```sh" + `
|
||||
echo "secretpassword" | rclone obscure -
|
||||
` + "```" + `
|
||||
|
||||
If there is no data on STDIN to read, rclone obscure will default to
|
||||
obfuscating the hyphen itself.
|
||||
|
||||
@@ -24,12 +24,12 @@ include/exclude filters - everything will be removed. Use the
|
||||
delete files. To delete empty directories only, use command
|
||||
[rmdir](/commands/rclone_rmdir/) or [rmdirs](/commands/rclone_rmdirs/).
|
||||
|
||||
The concurrency of this operation is controlled by the ` + "`--checkers`" + ` global flag. However, some backends will
|
||||
implement this command directly, in which case ` + "`--checkers`" + ` will be ignored.
|
||||
The concurrency of this operation is controlled by the ` + "`--checkers`" + ` global flag.
|
||||
However, some backends will implement this command directly, in which
|
||||
case ` + "`--checkers`" + ` will be ignored.
|
||||
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.
|
||||
`,
|
||||
` + "`--dry-run` or the `--interactive`/`-i`" + ` flag.`,
|
||||
Annotations: map[string]string{
|
||||
"groups": "Important",
|
||||
},
|
||||
|
||||
35
cmd/rc/rc.go
35
cmd/rc/rc.go
@@ -53,8 +53,8 @@ var commandDefinition = &cobra.Command{
|
||||
Short: `Run a command against a running rclone.`,
|
||||
Long: strings.ReplaceAll(`This runs a command against a running rclone. Use the |--url| flag to
|
||||
specify an non default URL to connect on. This can be either a
|
||||
":port" which is taken to mean "http://localhost:port" or a
|
||||
"host:port" which is taken to mean "http://host:port"
|
||||
":port" which is taken to mean <http://localhost:port> or a
|
||||
"host:port" which is taken to mean <http://host:port>.
|
||||
|
||||
A username and password can be passed in with |--user| and |--pass|.
|
||||
|
||||
@@ -63,10 +63,12 @@ Note that |--rc-addr|, |--rc-user|, |--rc-pass| will be read also for
|
||||
|
||||
The |--unix-socket| flag can be used to connect over a unix socket like this
|
||||
|
||||
# start server on /tmp/my.socket
|
||||
rclone rcd --rc-addr unix:///tmp/my.socket
|
||||
# Connect to it
|
||||
rclone rc --unix-socket /tmp/my.socket core/stats
|
||||
|||sh
|
||||
# start server on /tmp/my.socket
|
||||
rclone rcd --rc-addr unix:///tmp/my.socket
|
||||
# Connect to it
|
||||
rclone rc --unix-socket /tmp/my.socket core/stats
|
||||
|||
|
||||
|
||||
Arguments should be passed in as parameter=value.
|
||||
|
||||
@@ -81,29 +83,38 @@ options in the form |-o key=value| or |-o key|. It can be repeated as
|
||||
many times as required. This is useful for rc commands which take the
|
||||
"opt" parameter which by convention is a dictionary of strings.
|
||||
|
||||
-o key=value -o key2
|
||||
|||text
|
||||
-o key=value -o key2
|
||||
|||
|
||||
|
||||
Will place this in the "opt" value
|
||||
|
||||
{"key":"value", "key2","")
|
||||
|
||||
|||json
|
||||
{"key":"value", "key2","")
|
||||
|||
|
||||
|
||||
The |-a|/|--arg| option can be used to set strings in the "arg" value. It
|
||||
can be repeated as many times as required. This is useful for rc
|
||||
commands which take the "arg" parameter which by convention is a list
|
||||
of strings.
|
||||
|
||||
-a value -a value2
|
||||
|||text
|
||||
-a value -a value2
|
||||
|||
|
||||
|
||||
Will place this in the "arg" value
|
||||
|
||||
["value", "value2"]
|
||||
|||json
|
||||
["value", "value2"]
|
||||
|||
|
||||
|
||||
Use |--loopback| to connect to the rclone instance running |rclone rc|.
|
||||
This is very useful for testing commands without having to run an
|
||||
rclone rc server, e.g.:
|
||||
|
||||
rclone rc --loopback operations/about fs=/
|
||||
|||sh
|
||||
rclone rc --loopback operations/about fs=/
|
||||
|||
|
||||
|
||||
Use |rclone rc| to see a list of all possible commands.`, "|", "`"),
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -28,8 +28,10 @@ var commandDefinition = &cobra.Command{
|
||||
Short: `Copies standard input to file on remote.`,
|
||||
Long: `Reads from standard input (stdin) and copies it to a single remote file.
|
||||
|
||||
echo "hello world" | rclone rcat remote:path/to/file
|
||||
ffmpeg - | rclone rcat remote:path/to/file
|
||||
` + "```sh" + `
|
||||
echo "hello world" | rclone rcat remote:path/to/file
|
||||
ffmpeg - | rclone rcat remote:path/to/file
|
||||
` + "```" + `
|
||||
|
||||
If the remote file already exists, it will be overwritten.
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package rcd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/fs"
|
||||
@@ -31,7 +32,7 @@ the browser when rclone is run.
|
||||
|
||||
See the [rc documentation](/rc/) for more info on the rc flags.
|
||||
|
||||
` + libhttp.Help(rcflags.FlagPrefix) + libhttp.TemplateHelp(rcflags.FlagPrefix) + libhttp.AuthHelp(rcflags.FlagPrefix),
|
||||
` + strings.TrimSpace(libhttp.Help(rcflags.FlagPrefix)+libhttp.TemplateHelp(rcflags.FlagPrefix)+libhttp.AuthHelp(rcflags.FlagPrefix)),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.45",
|
||||
"groups": "RC",
|
||||
|
||||
@@ -21,8 +21,7 @@ has any objects in it, not even empty subdirectories. Use
|
||||
command [rmdirs](/commands/rclone_rmdirs/) (or [delete](/commands/rclone_delete/)
|
||||
with option ` + "`--rmdirs`" + `) to do that.
|
||||
|
||||
To delete a path and any objects in it, use [purge](/commands/rclone_purge/) command.
|
||||
`,
|
||||
To delete a path and any objects in it, use [purge](/commands/rclone_purge/) command.`,
|
||||
Annotations: map[string]string{
|
||||
"groups": "Important",
|
||||
},
|
||||
|
||||
@@ -38,8 +38,7 @@ This will delete ` + "`--checkers`" + ` directories concurrently so
|
||||
if you have thousands of empty directories consider increasing this number.
|
||||
|
||||
To delete a path and any objects in it, use the [purge](/commands/rclone_purge/)
|
||||
command.
|
||||
`,
|
||||
command.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.35",
|
||||
"groups": "Important",
|
||||
|
||||
@@ -65,7 +65,7 @@ var cmdSelfUpdate = &cobra.Command{
|
||||
Use: "selfupdate",
|
||||
Aliases: []string{"self-update"},
|
||||
Short: `Update the rclone binary.`,
|
||||
Long: selfUpdateHelp,
|
||||
Long: strings.TrimSpace(selfUpdateHelp),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.55",
|
||||
},
|
||||
|
||||
@@ -43,5 +43,5 @@ command will rename the old executable to 'rclone.old.exe' upon success.
|
||||
|
||||
Please note that this command was not available before rclone version 1.55.
|
||||
If it fails for you with the message `unknown command "selfupdate"` then
|
||||
you will need to update manually following the install instructions located
|
||||
at https://rclone.org/install/
|
||||
you will need to update manually following the
|
||||
[install documentation](https://rclone.org/install/).
|
||||
|
||||
@@ -123,7 +123,7 @@ default "rclone (hostname)".
|
||||
Use ` + "`--log-trace` in conjunction with `-vv`" + ` to enable additional debug
|
||||
logging of all UPNP traffic.
|
||||
|
||||
` + vfs.Help(),
|
||||
` + strings.TrimSpace(vfs.Help()),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.46",
|
||||
"groups": "Filter",
|
||||
|
||||
@@ -59,7 +59,7 @@ func init() {
|
||||
var Command = &cobra.Command{
|
||||
Use: "docker",
|
||||
Short: `Serve any remote on docker's volume plugin API.`,
|
||||
Long: help() + vfs.Help(),
|
||||
Long: help() + strings.TrimSpace(vfs.Help()),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.56",
|
||||
"groups": "Filter",
|
||||
|
||||
@@ -8,7 +8,8 @@ docker daemon and runs the corresponding code when necessary.
|
||||
Docker plugins can run as a managed plugin under control of the docker daemon
|
||||
or as an independent native service. For testing, you can just run it directly
|
||||
from the command line, for example:
|
||||
```
|
||||
|
||||
```sh
|
||||
sudo rclone serve docker --base-dir /tmp/rclone-volumes --socket-addr localhost:8787 -vv
|
||||
```
|
||||
|
||||
|
||||
@@ -76,7 +76,6 @@ func NewDriver(ctx context.Context, root string, mntOpt *mountlib.Options, vfsOp
|
||||
// start mount monitoring
|
||||
drv.hupChan = make(chan os.Signal, 1)
|
||||
drv.monChan = make(chan bool, 1)
|
||||
mountlib.NotifyOnSigHup(drv.hupChan)
|
||||
go drv.monitor()
|
||||
|
||||
// unmount all volumes on exit
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"os/user"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -140,7 +141,7 @@ By default this will serve files without needing a login.
|
||||
|
||||
You can set a single username and password with the --user and --pass flags.
|
||||
|
||||
` + vfs.Help() + proxy.Help,
|
||||
` + strings.TrimSpace(vfs.Help()+proxy.Help),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.44",
|
||||
"groups": "Filter",
|
||||
|
||||
@@ -110,7 +110,7 @@ The server will log errors. Use ` + "`-v`" + ` to see access logs.
|
||||
` + "`--bwlimit`" + ` will be respected for file transfers. Use ` + "`--stats`" + ` to
|
||||
control the stats printing.
|
||||
|
||||
` + libhttp.Help(flagPrefix) + libhttp.TemplateHelp(flagPrefix) + libhttp.AuthHelp(flagPrefix) + vfs.Help() + proxy.Help,
|
||||
` + strings.TrimSpace(libhttp.Help(flagPrefix)+libhttp.TemplateHelp(flagPrefix)+libhttp.AuthHelp(flagPrefix)+vfs.Help()+proxy.Help),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
"groups": "Filter",
|
||||
|
||||
@@ -125,7 +125,7 @@ var Command = &cobra.Command{
|
||||
Use: "nfs remote:path",
|
||||
Short: `Serve the remote as an NFS mount`,
|
||||
Long: strings.ReplaceAll(`Create an NFS server that serves the given remote over the network.
|
||||
|
||||
|
||||
This implements an NFSv3 server to serve any rclone remote via NFS.
|
||||
|
||||
The primary purpose for this command is to enable the [mount
|
||||
@@ -179,12 +179,16 @@ cache.
|
||||
|
||||
To serve NFS over the network use following command:
|
||||
|
||||
rclone serve nfs remote: --addr 0.0.0.0:$PORT --vfs-cache-mode=full
|
||||
|||sh
|
||||
rclone serve nfs remote: --addr 0.0.0.0:$PORT --vfs-cache-mode=full
|
||||
|||
|
||||
|
||||
This specifies a port that can be used in the mount command. To mount
|
||||
the server under Linux/macOS, use the following command:
|
||||
|
||||
mount -t nfs -o port=$PORT,mountport=$PORT,tcp $HOSTNAME:/ path/to/mountpoint
|
||||
|
||||
|||sh
|
||||
mount -t nfs -o port=$PORT,mountport=$PORT,tcp $HOSTNAME:/ path/to/mountpoint
|
||||
|||
|
||||
|
||||
Where |$PORT| is the same port number used in the |serve nfs| command
|
||||
and |$HOSTNAME| is the network address of the machine that |serve nfs|
|
||||
@@ -198,7 +202,7 @@ is desired.
|
||||
|
||||
This command is only available on Unix platforms.
|
||||
|
||||
`, "|", "`") + vfs.Help(),
|
||||
`, "|", "`") + strings.TrimSpace(vfs.Help()),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.65",
|
||||
"groups": "Filter",
|
||||
|
||||
@@ -46,41 +46,43 @@ options - it is the job of the proxy program to make a complete
|
||||
config.
|
||||
|
||||
This config generated must have this extra parameter
|
||||
|
||||
- |_root| - root to use for the backend
|
||||
|
||||
And it may have this parameter
|
||||
|
||||
- |_obscure| - comma separated strings for parameters to obscure
|
||||
|
||||
If password authentication was used by the client, input to the proxy
|
||||
process (on STDIN) would look similar to this:
|
||||
|
||||
|||
|
||||
|||json
|
||||
{
|
||||
"user": "me",
|
||||
"pass": "mypassword"
|
||||
"user": "me",
|
||||
"pass": "mypassword"
|
||||
}
|
||||
|||
|
||||
|
||||
If public-key authentication was used by the client, input to the
|
||||
proxy process (on STDIN) would look similar to this:
|
||||
|
||||
|||
|
||||
|||json
|
||||
{
|
||||
"user": "me",
|
||||
"public_key": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDuwESFdAe14hVS6omeyX7edc...JQdf"
|
||||
"user": "me",
|
||||
"public_key": "AAAAB3NzaC1yc2EAAAADAQABAAABAQDuwESFdAe14hVS6omeyX7edc...JQdf"
|
||||
}
|
||||
|||
|
||||
|
||||
And as an example return this on STDOUT
|
||||
|
||||
|||
|
||||
|||json
|
||||
{
|
||||
"type": "sftp",
|
||||
"_root": "",
|
||||
"_obscure": "pass",
|
||||
"user": "me",
|
||||
"pass": "mypassword",
|
||||
"host": "sftp.example.com"
|
||||
"type": "sftp",
|
||||
"_root": "",
|
||||
"_obscure": "pass",
|
||||
"user": "me",
|
||||
"pass": "mypassword",
|
||||
"host": "sftp.example.com"
|
||||
}
|
||||
|||
|
||||
|
||||
@@ -102,7 +104,7 @@ password or public-key is changed the cache will need to expire (which takes 5 m
|
||||
before it takes effect.
|
||||
|
||||
This can be used to build general purpose proxies to any kind of
|
||||
backend that rclone supports.
|
||||
backend that rclone supports.
|
||||
|
||||
`, "|", "`")
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ The server will log errors. Use -v to see access logs.
|
||||
` + "`--bwlimit`" + ` will be respected for file transfers.
|
||||
Use ` + "`--stats`" + ` to control the stats printing.
|
||||
|
||||
### Setting up rclone for use by restic ###
|
||||
### Setting up rclone for use by restic
|
||||
|
||||
First [set up a remote for your chosen cloud provider](/docs/#configure).
|
||||
|
||||
@@ -119,7 +119,9 @@ following instructions.
|
||||
|
||||
Now start the rclone restic server
|
||||
|
||||
rclone serve restic -v remote:backup
|
||||
` + "```sh" + `
|
||||
rclone serve restic -v remote:backup
|
||||
` + "```" + `
|
||||
|
||||
Where you can replace "backup" in the above by whatever path in the
|
||||
remote you wish to use.
|
||||
@@ -133,7 +135,7 @@ Adding ` + "`--cache-objects=false`" + ` will cause rclone to stop caching objec
|
||||
returned from the List call. Caching is normally desirable as it speeds
|
||||
up downloading objects, saves transactions and uses very little memory.
|
||||
|
||||
### Setting up restic to use rclone ###
|
||||
### Setting up restic to use rclone
|
||||
|
||||
Now you can [follow the restic
|
||||
instructions](http://restic.readthedocs.io/en/latest/030_preparing_a_new_repo.html#rest-server)
|
||||
@@ -147,38 +149,43 @@ the URL for the REST server.
|
||||
|
||||
For example:
|
||||
|
||||
$ export RESTIC_REPOSITORY=rest:http://localhost:8080/
|
||||
$ export RESTIC_PASSWORD=yourpassword
|
||||
$ restic init
|
||||
created restic backend 8b1a4b56ae at rest:http://localhost:8080/
|
||||
` + "```sh" + `
|
||||
$ export RESTIC_REPOSITORY=rest:http://localhost:8080/
|
||||
$ export RESTIC_PASSWORD=yourpassword
|
||||
$ restic init
|
||||
created restic backend 8b1a4b56ae at rest:http://localhost:8080/
|
||||
|
||||
Please note that knowledge of your password is required to access
|
||||
the repository. Losing your password means that your data is
|
||||
irrecoverably lost.
|
||||
$ restic backup /path/to/files/to/backup
|
||||
scan [/path/to/files/to/backup]
|
||||
scanned 189 directories, 312 files in 0:00
|
||||
[0:00] 100.00% 38.128 MiB / 38.128 MiB 501 / 501 items 0 errors ETA 0:00
|
||||
duration: 0:00
|
||||
snapshot 45c8fdd8 saved
|
||||
Please note that knowledge of your password is required to access
|
||||
the repository. Losing your password means that your data is
|
||||
irrecoverably lost.
|
||||
$ restic backup /path/to/files/to/backup
|
||||
scan [/path/to/files/to/backup]
|
||||
scanned 189 directories, 312 files in 0:00
|
||||
[0:00] 100.00% 38.128 MiB / 38.128 MiB 501 / 501 items 0 errors ETA 0:00
|
||||
duration: 0:00
|
||||
snapshot 45c8fdd8 saved
|
||||
|
||||
#### Multiple repositories ####
|
||||
` + "```" + `
|
||||
|
||||
#### Multiple repositories
|
||||
|
||||
Note that you can use the endpoint to host multiple repositories. Do
|
||||
this by adding a directory name or path after the URL. Note that
|
||||
these **must** end with /. Eg
|
||||
|
||||
$ export RESTIC_REPOSITORY=rest:http://localhost:8080/user1repo/
|
||||
# backup user1 stuff
|
||||
$ export RESTIC_REPOSITORY=rest:http://localhost:8080/user2repo/
|
||||
# backup user2 stuff
|
||||
` + "```sh" + `
|
||||
$ export RESTIC_REPOSITORY=rest:http://localhost:8080/user1repo/
|
||||
# backup user1 stuff
|
||||
$ export RESTIC_REPOSITORY=rest:http://localhost:8080/user2repo/
|
||||
# backup user2 stuff
|
||||
` + "```" + `
|
||||
|
||||
#### Private repositories ####
|
||||
#### Private repositories
|
||||
|
||||
The` + "`--private-repos`" + ` flag can be used to limit users to repositories starting
|
||||
with a path of ` + "`/<username>/`" + `.
|
||||
|
||||
` + libhttp.Help(flagPrefix) + libhttp.AuthHelp(flagPrefix),
|
||||
` + strings.TrimSpace(libhttp.Help(flagPrefix)+libhttp.AuthHelp(flagPrefix)),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.40",
|
||||
},
|
||||
|
||||
@@ -105,7 +105,7 @@ var Command = &cobra.Command{
|
||||
},
|
||||
Use: "s3 remote:path",
|
||||
Short: `Serve remote:path over s3.`,
|
||||
Long: help() + httplib.AuthHelp(flagPrefix) + httplib.Help(flagPrefix) + vfs.Help(),
|
||||
Long: help() + strings.TrimSpace(httplib.AuthHelp(flagPrefix)+httplib.Help(flagPrefix)+vfs.Help()),
|
||||
RunE: func(command *cobra.Command, args []string) error {
|
||||
var f fs.Fs
|
||||
if proxy.Opt.AuthProxy == "" {
|
||||
|
||||
@@ -33,20 +33,20 @@ cause problems for S3 clients which rely on the Etag being the MD5.
|
||||
For a simple set up, to serve `remote:path` over s3, run the server
|
||||
like this:
|
||||
|
||||
```
|
||||
```sh
|
||||
rclone serve s3 --auth-key ACCESS_KEY_ID,SECRET_ACCESS_KEY remote:path
|
||||
```
|
||||
|
||||
For example, to use a simple folder in the filesystem, run the server
|
||||
with a command like this:
|
||||
|
||||
```
|
||||
```sh
|
||||
rclone serve s3 --auth-key ACCESS_KEY_ID,SECRET_ACCESS_KEY local:/path/to/folder
|
||||
```
|
||||
|
||||
The `rclone.conf` for the server could look like this:
|
||||
|
||||
```
|
||||
```ini
|
||||
[local]
|
||||
type = local
|
||||
```
|
||||
@@ -59,7 +59,7 @@ will be visible as a warning in the logs. But it will run nonetheless.
|
||||
This will be compatible with an rclone (client) remote configuration which
|
||||
is defined like this:
|
||||
|
||||
```
|
||||
```ini
|
||||
[serves3]
|
||||
type = s3
|
||||
provider = Rclone
|
||||
@@ -116,20 +116,20 @@ metadata which will be set as the modification time of the file.
|
||||
`serve s3` currently supports the following operations.
|
||||
|
||||
- Bucket
|
||||
- `ListBuckets`
|
||||
- `CreateBucket`
|
||||
- `DeleteBucket`
|
||||
- `ListBuckets`
|
||||
- `CreateBucket`
|
||||
- `DeleteBucket`
|
||||
- Object
|
||||
- `HeadObject`
|
||||
- `ListObjects`
|
||||
- `GetObject`
|
||||
- `PutObject`
|
||||
- `DeleteObject`
|
||||
- `DeleteObjects`
|
||||
- `CreateMultipartUpload`
|
||||
- `CompleteMultipartUpload`
|
||||
- `AbortMultipartUpload`
|
||||
- `CopyObject`
|
||||
- `UploadPart`
|
||||
- `HeadObject`
|
||||
- `ListObjects`
|
||||
- `GetObject`
|
||||
- `PutObject`
|
||||
- `DeleteObject`
|
||||
- `DeleteObjects`
|
||||
- `CreateMultipartUpload`
|
||||
- `CompleteMultipartUpload`
|
||||
- `AbortMultipartUpload`
|
||||
- `CopyObject`
|
||||
- `UploadPart`
|
||||
|
||||
Other operations will return error `Unimplemented`.
|
||||
|
||||
@@ -19,10 +19,11 @@ var Command = &cobra.Command{
|
||||
Long: `Serve a remote over a given protocol. Requires the use of a
|
||||
subcommand to specify the protocol, e.g.
|
||||
|
||||
rclone serve http remote:
|
||||
` + "```sh" + `
|
||||
rclone serve http remote:
|
||||
` + "```" + `
|
||||
|
||||
Each subcommand has its own options which you can see in their help.
|
||||
`,
|
||||
Each subcommand has its own options which you can see in their help.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
},
|
||||
|
||||
@@ -6,6 +6,7 @@ package sftp
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/rclone/rclone/cmd"
|
||||
"github.com/rclone/rclone/cmd/serve"
|
||||
@@ -146,11 +147,13 @@ reachable externally then supply ` + "`--addr :2022`" + ` for example.
|
||||
This also supports being run with socket activation, in which case it will
|
||||
listen on the first passed FD.
|
||||
It can be configured with .socket and .service unit files as described in
|
||||
https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html
|
||||
<https://www.freedesktop.org/software/systemd/man/latest/systemd.socket.html>.
|
||||
|
||||
Socket activation can be tested ad-hoc with the ` + "`systemd-socket-activate`" + `command:
|
||||
|
||||
systemd-socket-activate -l 2222 -- rclone serve sftp :local:vfs/
|
||||
` + "```sh" + `
|
||||
systemd-socket-activate -l 2222 -- rclone serve sftp :local:vfs/
|
||||
` + "```" + `
|
||||
|
||||
This will socket-activate rclone on the first connection to port 2222 over TCP.
|
||||
|
||||
@@ -160,7 +163,9 @@ sftp backend, but it may not be with other SFTP clients.
|
||||
If ` + "`--stdio`" + ` is specified, rclone will serve SFTP over stdio, which can
|
||||
be used with sshd via ~/.ssh/authorized_keys, for example:
|
||||
|
||||
restrict,command="rclone serve sftp --stdio ./photos" ssh-rsa ...
|
||||
` + "```text" + `
|
||||
restrict,command="rclone serve sftp --stdio ./photos" ssh-rsa ...
|
||||
` + "```" + `
|
||||
|
||||
On the client you need to set ` + "`--transfers 1`" + ` when using ` + "`--stdio`" + `.
|
||||
Otherwise multiple instances of the rclone server are started by OpenSSH
|
||||
@@ -173,7 +178,7 @@ used. Omitting "restrict" and using ` + "`--sftp-path-override`" + ` to enable
|
||||
checksumming is possible but less secure and you could use the SFTP server
|
||||
provided by OpenSSH in this case.
|
||||
|
||||
` + vfs.Help() + proxy.Help,
|
||||
` + strings.TrimSpace(vfs.Help()+proxy.Help),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.48",
|
||||
"groups": "Filter",
|
||||
|
||||
@@ -107,7 +107,7 @@ browser, or you can make a remote of type WebDAV to read and write it.
|
||||
|
||||
### WebDAV options
|
||||
|
||||
#### --etag-hash
|
||||
#### --etag-hash
|
||||
|
||||
This controls the ETag header. Without this flag the ETag will be
|
||||
based on the ModTime and Size of the object.
|
||||
@@ -119,44 +119,58 @@ to see the full list.
|
||||
|
||||
### Access WebDAV on Windows
|
||||
|
||||
WebDAV shared folder can be mapped as a drive on Windows, however the default settings prevent it.
|
||||
Windows will fail to connect to the server using insecure Basic authentication.
|
||||
It will not even display any login dialog. Windows requires SSL / HTTPS connection to be used with Basic.
|
||||
If you try to connect via Add Network Location Wizard you will get the following error:
|
||||
WebDAV shared folder can be mapped as a drive on Windows, however the default
|
||||
settings prevent it. Windows will fail to connect to the server using insecure
|
||||
Basic authentication. It will not even display any login dialog. Windows
|
||||
requires SSL / HTTPS connection to be used with Basic. If you try to connect
|
||||
via Add Network Location Wizard you will get the following error:
|
||||
"The folder you entered does not appear to be valid. Please choose another".
|
||||
However, you still can connect if you set the following registry key on a client machine:
|
||||
HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services\WebClient\Parameters\BasicAuthLevel to 2.
|
||||
The BasicAuthLevel can be set to the following values:
|
||||
0 - Basic authentication disabled
|
||||
1 - Basic authentication enabled for SSL connections only
|
||||
2 - Basic authentication enabled for SSL connections and for non-SSL connections
|
||||
However, you still can connect if you set the following registry key on a
|
||||
client machine:
|
||||
` + "`HKEY_LOCAL_MACHINE\\SYSTEM\\CurrentControlSet\\Services\\WebClient\\Parameters\\BasicAuthLevel`" + `
|
||||
to 2. The BasicAuthLevel can be set to the following values:
|
||||
|
||||
` + "```text" + `
|
||||
0 - Basic authentication disabled
|
||||
1 - Basic authentication enabled for SSL connections only
|
||||
2 - Basic authentication enabled for SSL connections and for non-SSL connections
|
||||
` + "```" + `
|
||||
|
||||
If required, increase the FileSizeLimitInBytes to a higher value.
|
||||
Navigate to the Services interface, then restart the WebClient service.
|
||||
|
||||
### Access Office applications on WebDAV
|
||||
|
||||
Navigate to following registry HKEY_CURRENT_USER\Software\Microsoft\Office\[14.0/15.0/16.0]\Common\Internet
|
||||
Navigate to following registry
|
||||
` + "`HKEY_CURRENT_USER\\Software\\Microsoft\\Office\\[14.0/15.0/16.0]\\Common\\Internet`" + `
|
||||
Create a new DWORD BasicAuthLevel with value 2.
|
||||
0 - Basic authentication disabled
|
||||
1 - Basic authentication enabled for SSL connections only
|
||||
2 - Basic authentication enabled for SSL and for non-SSL connections
|
||||
|
||||
https://learn.microsoft.com/en-us/office/troubleshoot/powerpoint/office-opens-blank-from-sharepoint
|
||||
` + "```text" + `
|
||||
0 - Basic authentication disabled
|
||||
1 - Basic authentication enabled for SSL connections only
|
||||
2 - Basic authentication enabled for SSL and for non-SSL connections
|
||||
` + "```" + `
|
||||
|
||||
<https://learn.microsoft.com/en-us/office/troubleshoot/powerpoint/office-opens-blank-from-sharepoint>
|
||||
|
||||
### Serving over a unix socket
|
||||
|
||||
You can serve the webdav on a unix socket like this:
|
||||
|
||||
rclone serve webdav --addr unix:///tmp/my.socket remote:path
|
||||
` + "```sh" + `
|
||||
rclone serve webdav --addr unix:///tmp/my.socket remote:path
|
||||
` + "```" + `
|
||||
|
||||
and connect to it like this using rclone and the webdav backend:
|
||||
|
||||
rclone --webdav-unix-socket /tmp/my.socket --webdav-url http://localhost lsf :webdav:
|
||||
` + "```sh" + `
|
||||
rclone --webdav-unix-socket /tmp/my.socket --webdav-url http://localhost lsf :webdav:
|
||||
` + "```" + `
|
||||
|
||||
Note that there is no authentication on http protocol - this is expected to be
|
||||
done by the permissions on the socket.
|
||||
|
||||
` + libhttp.Help(flagPrefix) + libhttp.TemplateHelp(flagPrefix) + libhttp.AuthHelp(flagPrefix) + vfs.Help() + proxy.Help,
|
||||
` + strings.TrimSpace(libhttp.Help(flagPrefix)+libhttp.TemplateHelp(flagPrefix)+libhttp.AuthHelp(flagPrefix)+vfs.Help()+proxy.Help),
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
"groups": "Filter",
|
||||
|
||||
@@ -29,16 +29,21 @@ inaccessible.true
|
||||
|
||||
You can use it to tier single object
|
||||
|
||||
rclone settier Cool remote:path/file
|
||||
` + "```sh" + `
|
||||
rclone settier Cool remote:path/file
|
||||
` + "```" + `
|
||||
|
||||
Or use rclone filters to set tier on only specific files
|
||||
|
||||
rclone --include "*.txt" settier Hot remote:path/dir
|
||||
` + "```sh" + `
|
||||
rclone --include "*.txt" settier Hot remote:path/dir
|
||||
` + "```" + `
|
||||
|
||||
Or just provide remote directory and all files in directory will be tiered
|
||||
|
||||
rclone settier tier remote:path/dir
|
||||
`,
|
||||
` + "```sh" + `
|
||||
rclone settier tier remote:path/dir
|
||||
` + "```",
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.44",
|
||||
},
|
||||
|
||||
@@ -38,8 +38,7 @@ when there is data to read (if not, the hyphen will be treated literally,
|
||||
as a relative path).
|
||||
|
||||
This command can also hash data received on STDIN, if not passing
|
||||
a remote:path.
|
||||
`,
|
||||
a remote:path.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.27",
|
||||
"groups": "Filter,Listing",
|
||||
|
||||
@@ -41,8 +41,7 @@ Some backends do not always provide file sizes, see for example
|
||||
[Google Docs](/drive/#limitations-of-google-docs).
|
||||
Rclone will then show a notice in the log indicating how many such
|
||||
files were encountered, and count them in as empty files in the output
|
||||
of the size command.
|
||||
`,
|
||||
of the size command.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.23",
|
||||
"groups": "Filter,Listing",
|
||||
|
||||
@@ -42,7 +42,9 @@ want to delete files from destination, use the
|
||||
**Important**: Since this can cause data loss, test first with the
|
||||
|--dry-run| or the |--interactive|/|i| flag.
|
||||
|
||||
rclone sync --interactive SOURCE remote:DESTINATION
|
||||
|||sh
|
||||
rclone sync --interactive SOURCE remote:DESTINATION
|
||||
|||
|
||||
|
||||
Files in the destination won't be deleted if there were any errors at any
|
||||
point. Duplicate objects (files with the same name, on those providers that
|
||||
@@ -59,7 +61,7 @@ If dest:path doesn't exist, it is created and the source:path contents
|
||||
go there.
|
||||
|
||||
It is not possible to sync overlapping remotes. However, you may exclude
|
||||
the destination from the sync with a filter rule or by putting an
|
||||
the destination from the sync with a filter rule or by putting an
|
||||
exclude-if-present file inside the destination directory and sync to a
|
||||
destination that is inside the source directory.
|
||||
|
||||
@@ -68,13 +70,15 @@ the backend supports it. If metadata syncing is required then use the
|
||||
|--metadata| flag.
|
||||
|
||||
Note that the modification time and metadata for the root directory
|
||||
will **not** be synced. See https://github.com/rclone/rclone/issues/7652
|
||||
will **not** be synced. See <https://github.com/rclone/rclone/issues/7652>
|
||||
for more info.
|
||||
|
||||
**Note**: Use the |-P|/|--progress| flag to view real-time transfer statistics
|
||||
|
||||
**Note**: Use the |rclone dedupe| command to deal with "Duplicate object/directory found in source/destination - ignoring" errors.
|
||||
See [this forum post](https://forum.rclone.org/t/sync-not-clearing-duplicates/14372) for more info.
|
||||
**Note**: Use the |rclone dedupe| command to deal with "Duplicate
|
||||
object/directory found in source/destination - ignoring" errors.
|
||||
See [this forum post](https://forum.rclone.org/t/sync-not-clearing-duplicates/14372)
|
||||
for more info.
|
||||
|
||||
`, "|", "`") + operationsflags.Help(),
|
||||
Annotations: map[string]string{
|
||||
|
||||
@@ -26,8 +26,7 @@ var commandDefinition = &cobra.Command{
|
||||
in filenames in the remote:path specified.
|
||||
|
||||
The data doesn't contain any identifying information but is useful for
|
||||
the rclone developers when developing filename compression.
|
||||
`,
|
||||
the rclone developers when developing filename compression.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.55",
|
||||
},
|
||||
|
||||
@@ -68,8 +68,7 @@ paths passed in and how long they can be. It can take some time. It will
|
||||
write test files into the remote:path passed in. It outputs a bit of go
|
||||
code for each one.
|
||||
|
||||
**NB** this can create undeletable files and other hazards - use with care
|
||||
`,
|
||||
**NB** this can create undeletable files and other hazards - use with care!`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.55",
|
||||
},
|
||||
|
||||
@@ -18,13 +18,14 @@ var Command = &cobra.Command{
|
||||
|
||||
Select which test command you want with the subcommand, eg
|
||||
|
||||
rclone test memory remote:
|
||||
` + "```sh" + `
|
||||
rclone test memory remote:
|
||||
` + "```" + `
|
||||
|
||||
Each subcommand has its own options which you can see in their help.
|
||||
|
||||
**NB** Be careful running these commands, they may do strange things
|
||||
so reading their documentation first is recommended.
|
||||
`,
|
||||
so reading their documentation first is recommended.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.55",
|
||||
},
|
||||
|
||||
@@ -61,8 +61,7 @@ time instead of the current time. Times may be specified as one of:
|
||||
- 'YYYY-MM-DDTHH:MM:SS.SSS' - e.g. 2006-01-02T15:04:05.123456789
|
||||
|
||||
Note that value of ` + "`--timestamp`" + ` is in UTC. If you want local time
|
||||
then add the ` + "`--localtime`" + ` flag.
|
||||
`,
|
||||
then add the ` + "`--localtime`" + ` flag.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.39",
|
||||
"groups": "Filter,Listing,Important",
|
||||
|
||||
@@ -73,16 +73,18 @@ var commandDefinition = &cobra.Command{
|
||||
|
||||
For example
|
||||
|
||||
$ rclone tree remote:path
|
||||
/
|
||||
├── file1
|
||||
├── file2
|
||||
├── file3
|
||||
└── subdir
|
||||
├── file4
|
||||
└── file5
|
||||
` + "```text" + `
|
||||
$ rclone tree remote:path
|
||||
/
|
||||
├── file1
|
||||
├── file2
|
||||
├── file3
|
||||
└── subdir
|
||||
├── file4
|
||||
└── file5
|
||||
|
||||
1 directories, 5 files
|
||||
1 directories, 5 files
|
||||
` + "```" + `
|
||||
|
||||
You can use any of the filtering options with the tree command (e.g.
|
||||
` + "`--include` and `--exclude`" + `. You can also use ` + "`--fast-list`" + `.
|
||||
@@ -93,8 +95,7 @@ sizes with ` + "`--size`" + `. Note that not all of them have
|
||||
short options as they conflict with rclone's short options.
|
||||
|
||||
For a more interactive navigation of the remote see the
|
||||
[ncdu](/commands/rclone_ncdu/) command.
|
||||
`,
|
||||
[ncdu](/commands/rclone_ncdu/) command.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.38",
|
||||
"groups": "Filter,Listing",
|
||||
|
||||
@@ -42,15 +42,17 @@ build tags and the type of executable (static or dynamic).
|
||||
|
||||
For example:
|
||||
|
||||
$ rclone version
|
||||
rclone v1.55.0
|
||||
- os/version: ubuntu 18.04 (64 bit)
|
||||
- os/kernel: 4.15.0-136-generic (x86_64)
|
||||
- os/type: linux
|
||||
- os/arch: amd64
|
||||
- go/version: go1.16
|
||||
- go/linking: static
|
||||
- go/tags: none
|
||||
` + "```sh" + `
|
||||
$ rclone version
|
||||
rclone v1.55.0
|
||||
- os/version: ubuntu 18.04 (64 bit)
|
||||
- os/kernel: 4.15.0-136-generic (x86_64)
|
||||
- os/type: linux
|
||||
- os/arch: amd64
|
||||
- go/version: go1.16
|
||||
- go/linking: static
|
||||
- go/tags: none
|
||||
` + "```" + `
|
||||
|
||||
Note: before rclone version 1.55 the os/type and os/arch lines were merged,
|
||||
and the "go/version" line was tagged as "go version".
|
||||
@@ -58,24 +60,27 @@ Note: before rclone version 1.55 the os/type and os/arch lines were merged,
|
||||
If you supply the --check flag, then it will do an online check to
|
||||
compare your version with the latest release and the latest beta.
|
||||
|
||||
$ rclone version --check
|
||||
yours: 1.42.0.6
|
||||
latest: 1.42 (released 2018-06-16)
|
||||
beta: 1.42.0.5 (released 2018-06-17)
|
||||
` + "```sh" + `
|
||||
$ rclone version --check
|
||||
yours: 1.42.0.6
|
||||
latest: 1.42 (released 2018-06-16)
|
||||
beta: 1.42.0.5 (released 2018-06-17)
|
||||
` + "```" + `
|
||||
|
||||
Or
|
||||
|
||||
$ rclone version --check
|
||||
yours: 1.41
|
||||
latest: 1.42 (released 2018-06-16)
|
||||
upgrade: https://downloads.rclone.org/v1.42
|
||||
beta: 1.42.0.5 (released 2018-06-17)
|
||||
upgrade: https://beta.rclone.org/v1.42-005-g56e1e820
|
||||
` + "```sh" + `
|
||||
$ rclone version --check
|
||||
yours: 1.41
|
||||
latest: 1.42 (released 2018-06-16)
|
||||
upgrade: https://downloads.rclone.org/v1.42
|
||||
beta: 1.42.0.5 (released 2018-06-17)
|
||||
upgrade: https://beta.rclone.org/v1.42-005-g56e1e820
|
||||
` + "```" + `
|
||||
|
||||
If you supply the --deps flag then rclone will print a list of all the
|
||||
packages it depends on and their versions along with some other
|
||||
information about the build.
|
||||
`,
|
||||
information about the build.`,
|
||||
Annotations: map[string]string{
|
||||
"versionIntroduced": "v1.33",
|
||||
},
|
||||
|
||||
@@ -85,11 +85,11 @@ Rclone helps you:
|
||||
## Features {#features}
|
||||
|
||||
- Transfers
|
||||
- MD5, SHA1 hashes are checked at all times for file integrity
|
||||
- Timestamps are preserved on files
|
||||
- Operations can be restarted at any time
|
||||
- Can be to and from network, e.g. two different cloud providers
|
||||
- Can use multi-threaded downloads to local disk
|
||||
- MD5, SHA1 hashes are checked at all times for file integrity
|
||||
- Timestamps are preserved on files
|
||||
- Operations can be restarted at any time
|
||||
- Can be to and from network, e.g. two different cloud providers
|
||||
- Can use multi-threaded downloads to local disk
|
||||
- [Copy](/commands/rclone_copy/) new or changed files to cloud storage
|
||||
- [Sync](/commands/rclone_sync/) (one way) to make a directory identical
|
||||
- [Bisync](/bisync/) (two way) to keep two directories in sync bidirectionally
|
||||
@@ -216,10 +216,9 @@ These backends adapt or modify other storage providers:
|
||||
{{< provider name="Hasher: Hash files" home="/hasher/" config="/hasher/" >}}
|
||||
{{< provider name="Union: Join multiple remotes to work together" home="/union/" config="/union/" >}}
|
||||
|
||||
|
||||
## Links
|
||||
|
||||
* {{< icon "fa fa-home" >}} [Home page](https://rclone.org/)
|
||||
* {{< icon "fab fa-github" >}} [GitHub project page for source and bug tracker](https://github.com/rclone/rclone)
|
||||
* {{< icon "fa fa-comments" >}} [Rclone Forum](https://forum.rclone.org)
|
||||
* {{< icon "fas fa-cloud-download-alt" >}}[Downloads](/downloads/)
|
||||
- {{< icon "fa fa-home" >}} [Home page](https://rclone.org/)
|
||||
- {{< icon "fab fa-github" >}} [GitHub project page for source and bug tracker](https://github.com/rclone/rclone)
|
||||
- {{< icon "fa fa-comments" >}} [Rclone Forum](https://forum.rclone.org)
|
||||
- {{< icon "fas fa-cloud-download-alt" >}}[Downloads](/downloads/)
|
||||
|
||||
@@ -8,7 +8,7 @@ versionIntroduced: "v1.40"
|
||||
|
||||
The `alias` remote provides a new name for another remote.
|
||||
|
||||
Paths may be as deep as required or a local path,
|
||||
Paths may be as deep as required or a local path,
|
||||
e.g. `remote:directory/subdirectory` or `/directory/subdirectory`.
|
||||
|
||||
During the initial setup with `rclone config` you will specify the target
|
||||
@@ -24,9 +24,9 @@ Invoking `rclone mkdir backup:../desktop` is exactly the same as invoking
|
||||
The empty path is not allowed as a remote. To alias the current directory
|
||||
use `.` instead.
|
||||
|
||||
The target remote can also be a [connection string](/docs/#connection-strings).
|
||||
The target remote can also be a [connection string](/docs/#connection-strings).
|
||||
This can be used to modify the config of a remote for different uses, e.g.
|
||||
the alias `myDriveTrash` with the target remote `myDrive,trashed_only:`
|
||||
the alias `myDriveTrash` with the target remote `myDrive,trashed_only:`
|
||||
can be used to only show the trashed files in `myDrive`.
|
||||
|
||||
## Configuration
|
||||
@@ -34,11 +34,13 @@ can be used to only show the trashed files in `myDrive`.
|
||||
Here is an example of how to make an alias called `remote` for local folder.
|
||||
First run:
|
||||
|
||||
rclone config
|
||||
```sh
|
||||
rclone config
|
||||
```
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
```text
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
@@ -85,15 +87,21 @@ Once configured you can then use `rclone` like this,
|
||||
|
||||
List directories in top level in `/mnt/storage/backup`
|
||||
|
||||
rclone lsd remote:
|
||||
```sh
|
||||
rclone lsd remote:
|
||||
```
|
||||
|
||||
List all the files in `/mnt/storage/backup`
|
||||
|
||||
rclone ls remote:
|
||||
```sh
|
||||
rclone ls remote:
|
||||
```
|
||||
|
||||
Copy another local directory to the alias directory called source
|
||||
|
||||
rclone copy /home/source remote:source
|
||||
```sh
|
||||
rclone copy /home/source remote:source
|
||||
```
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/alias/alias.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
@@ -15,11 +15,13 @@ command.) You may put subdirectories in too, e.g.
|
||||
Here is an example of making a Microsoft Azure Blob Storage
|
||||
configuration. For a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
```sh
|
||||
rclone config
|
||||
```
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
```text
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
@@ -55,20 +57,28 @@ y/e/d> y
|
||||
|
||||
See all containers
|
||||
|
||||
rclone lsd remote:
|
||||
```sh
|
||||
rclone lsd remote:
|
||||
```
|
||||
|
||||
Make a new container
|
||||
|
||||
rclone mkdir remote:container
|
||||
```sh
|
||||
rclone mkdir remote:container
|
||||
```
|
||||
|
||||
List the contents of a container
|
||||
|
||||
rclone ls remote:container
|
||||
```sh
|
||||
rclone ls remote:container
|
||||
```
|
||||
|
||||
Sync `/home/local/directory` to the remote container, deleting any excess
|
||||
files in the container.
|
||||
|
||||
rclone sync --interactive /home/local/directory remote:container
|
||||
```sh
|
||||
rclone sync --interactive /home/local/directory remote:container
|
||||
```
|
||||
|
||||
### --fast-list
|
||||
|
||||
@@ -147,26 +157,35 @@ user with a password, depending on which environment variable are set.
|
||||
It reads configuration from these variables, in the following order:
|
||||
|
||||
1. Service principal with client secret
|
||||
- `AZURE_TENANT_ID`: ID of the service principal's tenant. Also called its "directory" ID.
|
||||
- `AZURE_TENANT_ID`: ID of the service principal's tenant. Also called its
|
||||
"directory" ID.
|
||||
- `AZURE_CLIENT_ID`: the service principal's client ID
|
||||
- `AZURE_CLIENT_SECRET`: one of the service principal's client secrets
|
||||
2. Service principal with certificate
|
||||
- `AZURE_TENANT_ID`: ID of the service principal's tenant. Also called its "directory" ID.
|
||||
- `AZURE_TENANT_ID`: ID of the service principal's tenant. Also called its
|
||||
"directory" ID.
|
||||
- `AZURE_CLIENT_ID`: the service principal's client ID
|
||||
- `AZURE_CLIENT_CERTIFICATE_PATH`: path to a PEM or PKCS12 certificate file including the private key.
|
||||
- `AZURE_CLIENT_CERTIFICATE_PASSWORD`: (optional) password for the certificate file.
|
||||
- `AZURE_CLIENT_SEND_CERTIFICATE_CHAIN`: (optional) Specifies whether an authentication request will include an x5c header to support subject name / issuer based authentication. When set to "true" or "1", authentication requests include the x5c header.
|
||||
- `AZURE_CLIENT_CERTIFICATE_PATH`: path to a PEM or PKCS12 certificate file
|
||||
including the private key.
|
||||
- `AZURE_CLIENT_CERTIFICATE_PASSWORD`: (optional) password for the
|
||||
certificate file.
|
||||
- `AZURE_CLIENT_SEND_CERTIFICATE_CHAIN`: (optional) Specifies whether an
|
||||
authentication request will include an x5c header to support subject
|
||||
name / issuer based authentication. When set to "true" or "1",
|
||||
authentication requests include the x5c header.
|
||||
3. User with username and password
|
||||
- `AZURE_TENANT_ID`: (optional) tenant to authenticate in. Defaults to "organizations".
|
||||
- `AZURE_CLIENT_ID`: client ID of the application the user will authenticate to
|
||||
- `AZURE_CLIENT_ID`: client ID of the application the user will authenticate
|
||||
to
|
||||
- `AZURE_USERNAME`: a username (usually an email address)
|
||||
- `AZURE_PASSWORD`: the user's password
|
||||
4. Workload Identity
|
||||
- `AZURE_TENANT_ID`: Tenant to authenticate in.
|
||||
- `AZURE_CLIENT_ID`: Client ID of the application the user will authenticate to.
|
||||
- `AZURE_FEDERATED_TOKEN_FILE`: Path to projected service account token file.
|
||||
- `AZURE_AUTHORITY_HOST`: Authority of an Azure Active Directory endpoint (default: login.microsoftonline.com).
|
||||
|
||||
- `AZURE_TENANT_ID`: Tenant to authenticate in
|
||||
- `AZURE_CLIENT_ID`: Client ID of the application the user will authenticate
|
||||
to
|
||||
- `AZURE_FEDERATED_TOKEN_FILE`: Path to projected service account token file
|
||||
- `AZURE_AUTHORITY_HOST`: Authority of an Azure Active Directory endpoint
|
||||
(default: login.microsoftonline.com).
|
||||
|
||||
##### Env Auth: 2. Managed Service Identity Credentials
|
||||
|
||||
@@ -193,19 +212,27 @@ Credentials created with the `az` tool can be picked up using `env_auth`.
|
||||
|
||||
For example if you were to login with a service principal like this:
|
||||
|
||||
az login --service-principal -u XXX -p XXX --tenant XXX
|
||||
```sh
|
||||
az login --service-principal -u XXX -p XXX --tenant XXX
|
||||
```
|
||||
|
||||
Then you could access rclone resources like this:
|
||||
|
||||
rclone lsf :azureblob,env_auth,account=ACCOUNT:CONTAINER
|
||||
```sh
|
||||
rclone lsf :azureblob,env_auth,account=ACCOUNT:CONTAINER
|
||||
```
|
||||
|
||||
Or
|
||||
|
||||
rclone lsf --azureblob-env-auth --azureblob-account=ACCOUNT :azureblob:CONTAINER
|
||||
```sh
|
||||
rclone lsf --azureblob-env-auth --azureblob-account=ACCOUNT :azureblob:CONTAINER
|
||||
```
|
||||
|
||||
Which is analogous to using the `az` tool:
|
||||
|
||||
az storage blob list --container-name CONTAINER --account-name ACCOUNT --auth-mode login
|
||||
```sh
|
||||
az storage blob list --container-name CONTAINER --account-name ACCOUNT --auth-mode login
|
||||
```
|
||||
|
||||
#### Account and Shared Key
|
||||
|
||||
@@ -226,18 +253,24 @@ explorer in the Azure portal.
|
||||
If you use a container level SAS URL, rclone operations are permitted
|
||||
only on a particular container, e.g.
|
||||
|
||||
rclone ls azureblob:container
|
||||
```sh
|
||||
rclone ls azureblob:container
|
||||
```
|
||||
|
||||
You can also list the single container from the root. This will only
|
||||
show the container specified by the SAS URL.
|
||||
|
||||
$ rclone lsd azureblob:
|
||||
container/
|
||||
```sh
|
||||
$ rclone lsd azureblob:
|
||||
container/
|
||||
```
|
||||
|
||||
Note that you can't see or access any other containers - this will
|
||||
fail
|
||||
|
||||
rclone ls azureblob:othercontainer
|
||||
```sh
|
||||
rclone ls azureblob:othercontainer
|
||||
```
|
||||
|
||||
Container level SAS URLs are useful for temporarily allowing third
|
||||
parties access to a single container or putting credentials into an
|
||||
@@ -245,7 +278,8 @@ untrusted environment such as a CI build server.
|
||||
|
||||
#### Service principal with client secret
|
||||
|
||||
If these variables are set, rclone will authenticate with a service principal with a client secret.
|
||||
If these variables are set, rclone will authenticate with a service principal
|
||||
with a client secret.
|
||||
|
||||
- `tenant`: ID of the service principal's tenant. Also called its "directory" ID.
|
||||
- `client_id`: the service principal's client ID
|
||||
@@ -256,13 +290,18 @@ The credentials can also be placed in a file using the
|
||||
|
||||
#### Service principal with certificate
|
||||
|
||||
If these variables are set, rclone will authenticate with a service principal with certificate.
|
||||
If these variables are set, rclone will authenticate with a service principal
|
||||
with certificate.
|
||||
|
||||
- `tenant`: ID of the service principal's tenant. Also called its "directory" ID.
|
||||
- `client_id`: the service principal's client ID
|
||||
- `client_certificate_path`: path to a PEM or PKCS12 certificate file including the private key.
|
||||
- `client_certificate_path`: path to a PEM or PKCS12 certificate file including
|
||||
the private key.
|
||||
- `client_certificate_password`: (optional) password for the certificate file.
|
||||
- `client_send_certificate_chain`: (optional) Specifies whether an authentication request will include an x5c header to support subject name / issuer based authentication. When set to "true" or "1", authentication requests include the x5c header.
|
||||
- `client_send_certificate_chain`: (optional) Specifies whether an
|
||||
authentication request will include an x5c header to support subject name /
|
||||
issuer based authentication. When set to "true" or "1", authentication
|
||||
requests include the x5c header.
|
||||
|
||||
**NB** `client_certificate_password` must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
@@ -297,15 +336,18 @@ be explicitly specified using exactly one of the `msi_object_id`,
|
||||
If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is
|
||||
set, this is is equivalent to using `env_auth`.
|
||||
|
||||
#### Fedrated Identity Credentials
|
||||
#### Fedrated Identity Credentials
|
||||
|
||||
If these variables are set, rclone will authenticate with fedrated identity.
|
||||
|
||||
- `tenant_id`: tenant_id to authenticate in storage
|
||||
- `client_id`: client ID of the application the user will authenticate to storage
|
||||
- `msi_client_id`: managed identity client ID of the application the user will authenticate to
|
||||
- `msi_client_id`: managed identity client ID of the application the user will
|
||||
authenticate to
|
||||
|
||||
By default "api://AzureADTokenExchange" is used as scope for token retrieval over MSI. This token is then exchanged for actual storage token using 'tenant_id' and 'client_id'.
|
||||
By default "api://AzureADTokenExchange" is used as scope for token retrieval
|
||||
over MSI. This token is then exchanged for actual storage token using
|
||||
'tenant_id' and 'client_id'.
|
||||
|
||||
#### Azure CLI tool `az` {#use_az}
|
||||
|
||||
@@ -322,7 +364,9 @@ Don't set `env_auth` at the same time.
|
||||
If you want to access resources with public anonymous access then set
|
||||
`account` only. You can do this without making an rclone config:
|
||||
|
||||
rclone lsf :azureblob,account=ACCOUNT:CONTAINER
|
||||
```sh
|
||||
rclone lsf :azureblob,account=ACCOUNT:CONTAINER
|
||||
```
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/azureblob/azureblob.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
@@ -14,11 +14,13 @@ e.g. `remote:path/to/dir`.
|
||||
Here is an example of making a Microsoft Azure Files Storage
|
||||
configuration. For a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
```sh
|
||||
rclone config
|
||||
```
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
```text
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
@@ -88,20 +90,28 @@ Once configured you can use rclone.
|
||||
|
||||
See all files in the top level:
|
||||
|
||||
rclone lsf remote:
|
||||
```sh
|
||||
rclone lsf remote:
|
||||
```
|
||||
|
||||
Make a new directory in the root:
|
||||
|
||||
rclone mkdir remote:dir
|
||||
```sh
|
||||
rclone mkdir remote:dir
|
||||
```
|
||||
|
||||
Recursively List the contents:
|
||||
|
||||
rclone ls remote:
|
||||
```sh
|
||||
rclone ls remote:
|
||||
```
|
||||
|
||||
Sync `/home/local/directory` to the remote directory, deleting any
|
||||
excess files in the directory.
|
||||
|
||||
rclone sync --interactive /home/local/directory remote:dir
|
||||
```sh
|
||||
rclone sync --interactive /home/local/directory remote:dir
|
||||
```
|
||||
|
||||
### Modified time
|
||||
|
||||
@@ -173,26 +183,35 @@ user with a password, depending on which environment variable are set.
|
||||
It reads configuration from these variables, in the following order:
|
||||
|
||||
1. Service principal with client secret
|
||||
- `AZURE_TENANT_ID`: ID of the service principal's tenant. Also called its "directory" ID.
|
||||
- `AZURE_TENANT_ID`: ID of the service principal's tenant. Also called its
|
||||
"directory" ID.
|
||||
- `AZURE_CLIENT_ID`: the service principal's client ID
|
||||
- `AZURE_CLIENT_SECRET`: one of the service principal's client secrets
|
||||
2. Service principal with certificate
|
||||
- `AZURE_TENANT_ID`: ID of the service principal's tenant. Also called its "directory" ID.
|
||||
- `AZURE_TENANT_ID`: ID of the service principal's tenant. Also called its
|
||||
"directory" ID.
|
||||
- `AZURE_CLIENT_ID`: the service principal's client ID
|
||||
- `AZURE_CLIENT_CERTIFICATE_PATH`: path to a PEM or PKCS12 certificate file including the private key.
|
||||
- `AZURE_CLIENT_CERTIFICATE_PASSWORD`: (optional) password for the certificate file.
|
||||
- `AZURE_CLIENT_SEND_CERTIFICATE_CHAIN`: (optional) Specifies whether an authentication request will include an x5c header to support subject name / issuer based authentication. When set to "true" or "1", authentication requests include the x5c header.
|
||||
- `AZURE_CLIENT_CERTIFICATE_PATH`: path to a PEM or PKCS12 certificate file
|
||||
including the private key.
|
||||
- `AZURE_CLIENT_CERTIFICATE_PASSWORD`: (optional) password for the
|
||||
certificate file.
|
||||
- `AZURE_CLIENT_SEND_CERTIFICATE_CHAIN`: (optional) Specifies whether an
|
||||
authentication request will include an x5c header to support subject
|
||||
name / issuer based authentication. When set to "true" or "1",
|
||||
authentication requests include the x5c header.
|
||||
3. User with username and password
|
||||
- `AZURE_TENANT_ID`: (optional) tenant to authenticate in. Defaults to "organizations".
|
||||
- `AZURE_CLIENT_ID`: client ID of the application the user will authenticate to
|
||||
- `AZURE_CLIENT_ID`: client ID of the application the user will authenticate
|
||||
to
|
||||
- `AZURE_USERNAME`: a username (usually an email address)
|
||||
- `AZURE_PASSWORD`: the user's password
|
||||
4. Workload Identity
|
||||
- `AZURE_TENANT_ID`: Tenant to authenticate in.
|
||||
- `AZURE_CLIENT_ID`: Client ID of the application the user will authenticate to.
|
||||
- `AZURE_FEDERATED_TOKEN_FILE`: Path to projected service account token file.
|
||||
- `AZURE_AUTHORITY_HOST`: Authority of an Azure Active Directory endpoint (default: login.microsoftonline.com).
|
||||
|
||||
- `AZURE_TENANT_ID`: Tenant to authenticate in
|
||||
- `AZURE_CLIENT_ID`: Client ID of the application the user will authenticate
|
||||
to
|
||||
- `AZURE_FEDERATED_TOKEN_FILE`: Path to projected service account token file
|
||||
- `AZURE_AUTHORITY_HOST`: Authority of an Azure Active Directory endpoint
|
||||
(default: login.microsoftonline.com).
|
||||
|
||||
##### Env Auth: 2. Managed Service Identity Credentials
|
||||
|
||||
@@ -219,15 +238,21 @@ Credentials created with the `az` tool can be picked up using `env_auth`.
|
||||
|
||||
For example if you were to login with a service principal like this:
|
||||
|
||||
az login --service-principal -u XXX -p XXX --tenant XXX
|
||||
```sh
|
||||
az login --service-principal -u XXX -p XXX --tenant XXX
|
||||
```
|
||||
|
||||
Then you could access rclone resources like this:
|
||||
|
||||
rclone lsf :azurefiles,env_auth,account=ACCOUNT:
|
||||
```sh
|
||||
rclone lsf :azurefiles,env_auth,account=ACCOUNT:
|
||||
```
|
||||
|
||||
Or
|
||||
|
||||
rclone lsf --azurefiles-env-auth --azurefiles-account=ACCOUNT :azurefiles:
|
||||
```sh
|
||||
rclone lsf --azurefiles-env-auth --azurefiles-account=ACCOUNT :azurefiles:
|
||||
```
|
||||
|
||||
#### Account and Shared Key
|
||||
|
||||
@@ -244,7 +269,8 @@ To use it leave `account`, `key` and "sas_url" blank and fill in `connection_str
|
||||
|
||||
#### Service principal with client secret
|
||||
|
||||
If these variables are set, rclone will authenticate with a service principal with a client secret.
|
||||
If these variables are set, rclone will authenticate with a service principal
|
||||
with a client secret.
|
||||
|
||||
- `tenant`: ID of the service principal's tenant. Also called its "directory" ID.
|
||||
- `client_id`: the service principal's client ID
|
||||
@@ -255,13 +281,18 @@ The credentials can also be placed in a file using the
|
||||
|
||||
#### Service principal with certificate
|
||||
|
||||
If these variables are set, rclone will authenticate with a service principal with certificate.
|
||||
If these variables are set, rclone will authenticate with a service principal
|
||||
with certificate.
|
||||
|
||||
- `tenant`: ID of the service principal's tenant. Also called its "directory" ID.
|
||||
- `client_id`: the service principal's client ID
|
||||
- `client_certificate_path`: path to a PEM or PKCS12 certificate file including the private key.
|
||||
- `client_certificate_path`: path to a PEM or PKCS12 certificate file including
|
||||
the private key.
|
||||
- `client_certificate_password`: (optional) password for the certificate file.
|
||||
- `client_send_certificate_chain`: (optional) Specifies whether an authentication request will include an x5c header to support subject name / issuer based authentication. When set to "true" or "1", authentication requests include the x5c header.
|
||||
- `client_send_certificate_chain`: (optional) Specifies whether an authentication
|
||||
request will include an x5c header to support subject name / issuer based
|
||||
authentication. When set to "true" or "1", authentication requests include
|
||||
the x5c header.
|
||||
|
||||
**NB** `client_certificate_password` must be obscured - see [rclone obscure](/commands/rclone_obscure/).
|
||||
|
||||
@@ -296,17 +327,21 @@ be explicitly specified using exactly one of the `msi_object_id`,
|
||||
If none of `msi_object_id`, `msi_client_id`, or `msi_mi_res_id` is
|
||||
set, this is is equivalent to using `env_auth`.
|
||||
|
||||
#### Fedrated Identity Credentials
|
||||
#### Fedrated Identity Credentials
|
||||
|
||||
If these variables are set, rclone will authenticate with fedrated identity.
|
||||
|
||||
- `tenant_id`: tenant_id to authenticate in storage
|
||||
- `client_id`: client ID of the application the user will authenticate to storage
|
||||
- `msi_client_id`: managed identity client ID of the application the user will authenticate to
|
||||
- `msi_client_id`: managed identity client ID of the application the user will
|
||||
authenticate to
|
||||
|
||||
By default "api://AzureADTokenExchange" is used as scope for token retrieval
|
||||
over MSI. This token is then exchanged for actual storage token using 'tenant_id'
|
||||
and 'client_id'.
|
||||
|
||||
By default "api://AzureADTokenExchange" is used as scope for token retrieval over MSI. This token is then exchanged for actual storage token using 'tenant_id' and 'client_id'.
|
||||
|
||||
#### Azure CLI tool `az` {#use_az}
|
||||
|
||||
Set to use the [Azure CLI tool `az`](https://learn.microsoft.com/en-us/cli/azure/)
|
||||
as the sole means of authentication.
|
||||
Setting this can be useful if you wish to use the `az` CLI on a host with
|
||||
|
||||
@@ -15,7 +15,9 @@ command.) You may put subdirectories in too, e.g. `remote:bucket/path/to/dir`.
|
||||
|
||||
Here is an example of making a b2 configuration. First run
|
||||
|
||||
rclone config
|
||||
```sh
|
||||
rclone config
|
||||
```
|
||||
|
||||
This will guide you through an interactive setup process. To authenticate
|
||||
you will either need your Account ID (a short hex number) and Master
|
||||
@@ -23,8 +25,8 @@ Application Key (a long hex number) OR an Application Key, which is the
|
||||
recommended method. See below for further details on generating and using
|
||||
an Application Key.
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
```text
|
||||
No remotes found, make a new one\?
|
||||
n) New remote
|
||||
q) Quit config
|
||||
n/q> n
|
||||
@@ -60,20 +62,29 @@ This remote is called `remote` and can now be used like this
|
||||
|
||||
See all buckets
|
||||
|
||||
rclone lsd remote:
|
||||
```sh
|
||||
rclone lsd remote:
|
||||
```
|
||||
|
||||
Create a new bucket
|
||||
|
||||
rclone mkdir remote:bucket
|
||||
```sh
|
||||
rclone mkdir remote:bucket
|
||||
```
|
||||
|
||||
List the contents of a bucket
|
||||
|
||||
rclone ls remote:bucket
|
||||
```sh
|
||||
rclone ls remote:bucket
|
||||
```
|
||||
|
||||
|
||||
Sync `/home/local/directory` to the remote bucket, deleting any
|
||||
excess files in the bucket.
|
||||
|
||||
rclone sync --interactive /home/local/directory remote:bucket
|
||||
```sh
|
||||
rclone sync --interactive /home/local/directory remote:bucket
|
||||
```
|
||||
|
||||
### Application Keys
|
||||
|
||||
@@ -219,7 +230,7 @@ version followed by a `cleanup` of the old versions.
|
||||
|
||||
Show current version and all the versions with `--b2-versions` flag.
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone -q ls b2:cleanup-test
|
||||
9 one.txt
|
||||
|
||||
@@ -232,7 +243,7 @@ $ rclone -q --b2-versions ls b2:cleanup-test
|
||||
|
||||
Retrieve an old version
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone -q --b2-versions copy b2:cleanup-test/one-v2016-07-04-141003-000.txt /tmp
|
||||
|
||||
$ ls -l /tmp/one-v2016-07-04-141003-000.txt
|
||||
@@ -241,7 +252,7 @@ $ ls -l /tmp/one-v2016-07-04-141003-000.txt
|
||||
|
||||
Clean up all the old versions and show that they've gone.
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone -q cleanup b2:cleanup-test
|
||||
|
||||
$ rclone -q ls b2:cleanup-test
|
||||
@@ -256,11 +267,13 @@ $ rclone -q --b2-versions ls b2:cleanup-test
|
||||
When using `--b2-versions` flag rclone is relying on the file name
|
||||
to work out whether the objects are versions or not. Versions' names
|
||||
are created by inserting timestamp between file name and its extension.
|
||||
```
|
||||
|
||||
```sh
|
||||
9 file.txt
|
||||
8 file-v2023-07-17-161032-000.txt
|
||||
16 file-v2023-06-15-141003-000.txt
|
||||
```
|
||||
|
||||
If there are real files present with the same names as versions, then
|
||||
behaviour of `--b2-versions` can be unpredictable.
|
||||
|
||||
@@ -270,7 +283,7 @@ It is useful to know how many requests are sent to the server in different scena
|
||||
|
||||
All copy commands send the following 4 requests:
|
||||
|
||||
```
|
||||
```text
|
||||
/b2api/v1/b2_authorize_account
|
||||
/b2api/v1/b2_create_bucket
|
||||
/b2api/v1/b2_list_buckets
|
||||
@@ -287,7 +300,7 @@ require any files to be uploaded, no more requests will be sent.
|
||||
Uploading files that do not require chunking, will send 2 requests per
|
||||
file upload:
|
||||
|
||||
```
|
||||
```text
|
||||
/b2api/v1/b2_get_upload_url
|
||||
/b2api/v1/b2_upload_file/
|
||||
```
|
||||
@@ -295,7 +308,7 @@ file upload:
|
||||
Uploading files requiring chunking, will send 2 requests (one each to
|
||||
start and finish the upload) and another 2 requests for each chunk:
|
||||
|
||||
```
|
||||
```text
|
||||
/b2api/v1/b2_start_large_file
|
||||
/b2api/v1/b2_get_upload_part_url
|
||||
/b2api/v1/b2_upload_part/
|
||||
@@ -309,14 +322,14 @@ rclone will show and act on older versions of files. For example
|
||||
|
||||
Listing without `--b2-versions`
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone -q ls b2:cleanup-test
|
||||
9 one.txt
|
||||
```
|
||||
|
||||
And with
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone -q --b2-versions ls b2:cleanup-test
|
||||
9 one.txt
|
||||
8 one-v2016-07-04-141032-000.txt
|
||||
@@ -336,7 +349,7 @@ permitted, so you can't upload files or delete them.
|
||||
Rclone supports generating file share links for private B2 buckets.
|
||||
They can either be for a file for example:
|
||||
|
||||
```
|
||||
```sh
|
||||
./rclone link B2:bucket/path/to/file.txt
|
||||
https://f002.backblazeb2.com/file/bucket/path/to/file.txt?Authorization=xxxxxxxx
|
||||
|
||||
@@ -344,7 +357,7 @@ https://f002.backblazeb2.com/file/bucket/path/to/file.txt?Authorization=xxxxxxxx
|
||||
|
||||
or if run on a directory you will get:
|
||||
|
||||
```
|
||||
```sh
|
||||
./rclone link B2:bucket/path
|
||||
https://f002.backblazeb2.com/file/bucket/path?Authorization=xxxxxxxx
|
||||
```
|
||||
@@ -352,7 +365,7 @@ https://f002.backblazeb2.com/file/bucket/path?Authorization=xxxxxxxx
|
||||
you can then use the authorization token (the part of the url from the
|
||||
`?Authorization=` on) on any file path under that directory. For example:
|
||||
|
||||
```
|
||||
```text
|
||||
https://f002.backblazeb2.com/file/bucket/path/to/file1?Authorization=xxxxxxxx
|
||||
https://f002.backblazeb2.com/file/bucket/path/file2?Authorization=xxxxxxxx
|
||||
https://f002.backblazeb2.com/file/bucket/path/folder/file3?Authorization=xxxxxxxx
|
||||
|
||||
@@ -31,7 +31,7 @@ section) before using, or data loss can result. Questions can be asked in the
|
||||
|
||||
For example, your first command might look like this:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
rclone bisync remote1:path1 remote2:path2 --create-empty-src-dirs --compare size,modtime,checksum --slow-hash-sync-only --resilient -MvP --drive-skip-gdocs --fix-case --resync --dry-run
|
||||
```
|
||||
|
||||
@@ -40,7 +40,7 @@ After that, remove `--resync` as well.
|
||||
|
||||
Here is a typical run log (with timestamps removed for clarity):
|
||||
|
||||
```bash
|
||||
```sh
|
||||
rclone bisync /testdir/path1/ /testdir/path2/ --verbose
|
||||
INFO : Synching Path1 "/testdir/path1/" with Path2 "/testdir/path2/"
|
||||
INFO : Path1 checking for diffs
|
||||
@@ -86,7 +86,7 @@ INFO : Bisync successful
|
||||
|
||||
## Command line syntax
|
||||
|
||||
```bash
|
||||
```sh
|
||||
$ rclone bisync --help
|
||||
Usage:
|
||||
rclone bisync remote1:path1 remote2:path2 [flags]
|
||||
@@ -169,7 +169,7 @@ be copied to Path1, and the process will then copy the Path1 tree to Path2.
|
||||
The `--resync` sequence is roughly equivalent to the following
|
||||
(but see [`--resync-mode`](#resync-mode) for other options):
|
||||
|
||||
```bash
|
||||
```sh
|
||||
rclone copy Path2 Path1 --ignore-existing [--create-empty-src-dirs]
|
||||
rclone copy Path1 Path2 [--create-empty-src-dirs]
|
||||
```
|
||||
@@ -225,7 +225,7 @@ Shutdown](#graceful-shutdown) mode, when needed) for a very robust
|
||||
almost any interruption it might encounter. Consider adding something like the
|
||||
following:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
--resilient --recover --max-lock 2m --conflict-resolve newer
|
||||
```
|
||||
|
||||
@@ -353,13 +353,13 @@ simultaneously (or just `modtime` AND `checksum`).
|
||||
being `size`, `modtime`, and `checksum`. For example, if you want to compare
|
||||
size and checksum, but not modtime, you would do:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
--compare size,checksum
|
||||
```
|
||||
|
||||
Or if you want to compare all three:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
--compare size,modtime,checksum
|
||||
```
|
||||
|
||||
@@ -627,7 +627,7 @@ specified (or when two identical suffixes are specified.) i.e. with
|
||||
`--conflict-loser pathname`, all of the following would produce exactly the
|
||||
same result:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
--conflict-suffix path
|
||||
--conflict-suffix path,path
|
||||
--conflict-suffix path1,path2
|
||||
@@ -642,7 +642,7 @@ changed with the [`--suffix-keep-extension`](/docs/#suffix-keep-extension) flag
|
||||
curly braces as globs. This can be helpful to track the date and/or time that
|
||||
each conflict was handled by bisync. For example:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
--conflict-suffix {DateOnly}-conflict
|
||||
// result: myfile.txt.2006-01-02-conflict1
|
||||
```
|
||||
@@ -667,7 +667,7 @@ conflicts with `..path1` and `..path2` (with two periods, and `path` instead of
|
||||
additional dots can be added by including them in the specified suffix string.
|
||||
For example, for behavior equivalent to the previous default, use:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
[--conflict-resolve none] --conflict-loser pathname --conflict-suffix .path
|
||||
```
|
||||
|
||||
@@ -707,13 +707,13 @@ For example, a possible sequence could look like this:
|
||||
|
||||
1. Normally scheduled bisync run:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
rclone bisync Path1 Path2 -MPc --check-access --max-delete 10 --filters-file /path/to/filters.txt -v --no-cleanup --ignore-listing-checksum --disable ListR --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient
|
||||
```
|
||||
|
||||
2. Periodic independent integrity check (perhaps scheduled nightly or weekly):
|
||||
|
||||
```bash
|
||||
```sh
|
||||
rclone check -MvPc Path1 Path2 --filter-from /path/to/filters.txt
|
||||
```
|
||||
|
||||
@@ -721,7 +721,7 @@ For example, a possible sequence could look like this:
|
||||
If one side is more up-to-date and you want to make the other side match it,
|
||||
you could run:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
rclone sync Path1 Path2 --filter-from /path/to/filters.txt --create-empty-src-dirs -MPc -v
|
||||
```
|
||||
|
||||
@@ -851,7 +851,7 @@ override `--backup-dir`.
|
||||
|
||||
Example:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
rclone bisync /Users/someuser/some/local/path/Bisync gdrive:Bisync --backup-dir1 /Users/someuser/some/local/path/BackupDir --backup-dir2 gdrive:BackupDir --suffix -2023-08-26 --suffix-keep-extension --check-access --max-delete 10 --filters-file /Users/someuser/some/local/path/bisync_filters.txt --no-cleanup --ignore-listing-checksum --checkers=16 --drive-pacer-min-sleep=10ms --create-empty-src-dirs --resilient -MvP --drive-skip-gdocs --fix-case
|
||||
```
|
||||
|
||||
@@ -1383,7 +1383,7 @@ listings and thus not checked during the check access phase.
|
||||
Here are two normal runs. The first one has a newer file on the remote.
|
||||
The second has no deltas between local and remote.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
2021/05/16 00:24:38 INFO : Synching Path1 "/path/to/local/tree/" with Path2 "dropbox:/"
|
||||
2021/05/16 00:24:38 INFO : Path1 checking for diffs
|
||||
2021/05/16 00:24:38 INFO : - Path1 File is new - file.txt
|
||||
@@ -1433,7 +1433,7 @@ numerous such messages in the log.
|
||||
Since there are no final error/warning messages on line *7*, rclone has
|
||||
recovered from failure after a retry, and the overall sync was successful.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
1: 2021/05/14 00:44:12 INFO : Synching Path1 "/path/to/local/tree" with Path2 "dropbox:"
|
||||
2: 2021/05/14 00:44:12 INFO : Path1 checking for diffs
|
||||
3: 2021/05/14 00:44:12 INFO : Path2 checking for diffs
|
||||
@@ -1446,7 +1446,7 @@ recovered from failure after a retry, and the overall sync was successful.
|
||||
This log shows a *Critical failure* which requires a `--resync` to recover from.
|
||||
See the [Runtime Error Handling](#error-handling) section.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
2021/05/12 00:49:40 INFO : Google drive root '': Waiting for checks to finish
|
||||
2021/05/12 00:49:40 INFO : Google drive root '': Waiting for transfers to finish
|
||||
2021/05/12 00:49:40 INFO : Google drive root '': not deleting files as there were IO errors
|
||||
@@ -1531,7 +1531,7 @@ on Linux you can use *Cron* which is described below.
|
||||
The 1st example runs a sync every 5 minutes between a local directory
|
||||
and an OwnCloud server, with output logged to a runlog file:
|
||||
|
||||
```bash
|
||||
```sh
|
||||
# Minute (0-59)
|
||||
# Hour (0-23)
|
||||
# Day of Month (1-31)
|
||||
@@ -1548,7 +1548,7 @@ If you run `rclone bisync` as a cron job, redirect stdout/stderr to a file.
|
||||
The 2nd example runs a sync to Dropbox every hour and logs all stdout (via the `>>`)
|
||||
and stderr (via `2>&1`) to a log file.
|
||||
|
||||
```bash
|
||||
```sh
|
||||
0 * * * * /path/to/rclone bisync /path/to/local/dropbox Dropbox: --check-access --filters-file /home/user/filters.txt >> /path/to/logs/dropbox-run.log 2>&1
|
||||
```
|
||||
|
||||
@@ -1630,7 +1630,7 @@ Rerunning the test will let it pass. Consider such failures as noise.
|
||||
|
||||
### Test command syntax
|
||||
|
||||
```bash
|
||||
```sh
|
||||
usage: go test ./cmd/bisync [options...]
|
||||
|
||||
Options:
|
||||
|
||||
@@ -18,11 +18,13 @@ to use JWT authentication. `rclone config` walks you through it.
|
||||
|
||||
Here is an example of how to make a remote called `remote`. First run:
|
||||
|
||||
rclone config
|
||||
```sh
|
||||
rclone config
|
||||
```
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
```text
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
@@ -94,11 +96,15 @@ Once configured you can then use `rclone` like this,
|
||||
|
||||
List directories in top level of your Box
|
||||
|
||||
rclone lsd remote:
|
||||
```sh
|
||||
rclone lsd remote:
|
||||
```
|
||||
|
||||
List all the files in your Box
|
||||
|
||||
rclone ls remote:
|
||||
```sh
|
||||
rclone ls remote:
|
||||
```
|
||||
|
||||
To copy a local directory to an Box directory called backup
|
||||
|
||||
@@ -123,9 +129,9 @@ According to the [box docs](https://developer.box.com/v2.0/docs/oauth-20#section
|
||||
|
||||
This means that if you
|
||||
|
||||
* Don't use the box remote for 60 days
|
||||
* Copy the config file with a box refresh token in and use it in two places
|
||||
* Get an error on a token refresh
|
||||
- Don't use the box remote for 60 days
|
||||
- Copy the config file with a box refresh token in and use it in two places
|
||||
- Get an error on a token refresh
|
||||
|
||||
then rclone will return an error which includes the text `Invalid
|
||||
refresh token`.
|
||||
@@ -138,7 +144,7 @@ did the authentication on.
|
||||
|
||||
Here is how to do it.
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone config
|
||||
Current remotes:
|
||||
|
||||
|
||||
@@ -31,11 +31,13 @@ with `cache`.
|
||||
|
||||
Here is an example of how to make a remote called `test-cache`. First run:
|
||||
|
||||
rclone config
|
||||
```sh
|
||||
rclone config
|
||||
```
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
```text
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
r) Rename remote
|
||||
@@ -115,19 +117,25 @@ You can then use it like this,
|
||||
|
||||
List directories in top level of your drive
|
||||
|
||||
rclone lsd test-cache:
|
||||
```sh
|
||||
rclone lsd test-cache:
|
||||
```
|
||||
|
||||
List all the files in your drive
|
||||
|
||||
rclone ls test-cache:
|
||||
```sh
|
||||
rclone ls test-cache:
|
||||
```
|
||||
|
||||
To start a cached mount
|
||||
|
||||
rclone mount --allow-other test-cache: /var/tmp/test-cache
|
||||
```sh
|
||||
rclone mount --allow-other test-cache: /var/tmp/test-cache
|
||||
```
|
||||
|
||||
### Write Features ###
|
||||
### Write Features
|
||||
|
||||
### Offline uploading ###
|
||||
### Offline uploading
|
||||
|
||||
In an effort to make writing through cache more reliable, the backend
|
||||
now supports this feature which can be activated by specifying a
|
||||
@@ -152,7 +160,7 @@ Uploads will be stored in a queue and be processed based on the order they were
|
||||
The queue and the temporary storage is persistent across restarts but
|
||||
can be cleared on startup with the `--cache-db-purge` flag.
|
||||
|
||||
### Write Support ###
|
||||
### Write Support
|
||||
|
||||
Writes are supported through `cache`.
|
||||
One caveat is that a mounted cache remote does not add any retry or fallback
|
||||
@@ -163,9 +171,9 @@ One special case is covered with `cache-writes` which will cache the file
|
||||
data at the same time as the upload when it is enabled making it available
|
||||
from the cache store immediately once the upload is finished.
|
||||
|
||||
### Read Features ###
|
||||
### Read Features
|
||||
|
||||
#### Multiple connections ####
|
||||
#### Multiple connections
|
||||
|
||||
To counter the high latency between a local PC where rclone is running
|
||||
and cloud providers, the cache remote can split multiple requests to the
|
||||
@@ -177,7 +185,7 @@ This is similar to buffering when media files are played online. Rclone
|
||||
will stay around the current marker but always try its best to stay ahead
|
||||
and prepare the data before.
|
||||
|
||||
#### Plex Integration ####
|
||||
#### Plex Integration
|
||||
|
||||
There is a direct integration with Plex which allows cache to detect during reading
|
||||
if the file is in playback or not. This helps cache to adapt how it queries
|
||||
@@ -196,9 +204,11 @@ How to enable? Run `rclone config` and add all the Plex options (endpoint, usern
|
||||
and password) in your remote and it will be automatically enabled.
|
||||
|
||||
Affected settings:
|
||||
- `cache-workers`: _Configured value_ during confirmed playback or _1_ all the other times
|
||||
|
||||
##### Certificate Validation #####
|
||||
- `cache-workers`: *Configured value* during confirmed playback or *1* all the
|
||||
other times
|
||||
|
||||
##### Certificate Validation
|
||||
|
||||
When the Plex server is configured to only accept secure connections, it is
|
||||
possible to use `.plex.direct` URLs to ensure certificate validation succeeds.
|
||||
@@ -213,60 +223,63 @@ have been replaced with dashes, e.g. `127.0.0.1` becomes `127-0-0-1`.
|
||||
|
||||
To get the `server-hash` part, the easiest way is to visit
|
||||
|
||||
https://plex.tv/api/resources?includeHttps=1&X-Plex-Token=your-plex-token
|
||||
<https://plex.tv/api/resources?includeHttps=1&X-Plex-Token=your-plex-token>
|
||||
|
||||
This page will list all the available Plex servers for your account
|
||||
with at least one `.plex.direct` link for each. Copy one URL and replace
|
||||
the IP address with the desired address. This can be used as the
|
||||
`plex_url` value.
|
||||
|
||||
### Known issues ###
|
||||
### Known issues
|
||||
|
||||
#### Mount and --dir-cache-time ####
|
||||
#### Mount and --dir-cache-time
|
||||
|
||||
--dir-cache-time controls the first layer of directory caching which works at the mount layer.
|
||||
Being an independent caching mechanism from the `cache` backend, it will manage its own entries
|
||||
based on the configured time.
|
||||
--dir-cache-time controls the first layer of directory caching which works at
|
||||
the mount layer. Being an independent caching mechanism from the `cache` backend,
|
||||
it will manage its own entries based on the configured time.
|
||||
|
||||
To avoid getting in a scenario where dir cache has obsolete data and cache would have the correct
|
||||
one, try to set `--dir-cache-time` to a lower time than `--cache-info-age`. Default values are
|
||||
already configured in this way.
|
||||
To avoid getting in a scenario where dir cache has obsolete data and cache would
|
||||
have the correct one, try to set `--dir-cache-time` to a lower time than
|
||||
`--cache-info-age`. Default values are already configured in this way.
|
||||
|
||||
#### Windows support - Experimental ####
|
||||
#### Windows support - Experimental
|
||||
|
||||
There are a couple of issues with Windows `mount` functionality that still require some investigations.
|
||||
It should be considered as experimental thus far as fixes come in for this OS.
|
||||
There are a couple of issues with Windows `mount` functionality that still
|
||||
require some investigations. It should be considered as experimental thus far
|
||||
as fixes come in for this OS.
|
||||
|
||||
Most of the issues seem to be related to the difference between filesystems
|
||||
on Linux flavors and Windows as cache is heavily dependent on them.
|
||||
|
||||
Any reports or feedback on how cache behaves on this OS is greatly appreciated.
|
||||
|
||||
- https://github.com/rclone/rclone/issues/1935
|
||||
- https://github.com/rclone/rclone/issues/1907
|
||||
- https://github.com/rclone/rclone/issues/1834
|
||||
|
||||
#### Risk of throttling ####
|
||||
- [Issue #1935](https://github.com/rclone/rclone/issues/1935)
|
||||
- [Issue #1907](https://github.com/rclone/rclone/issues/1907)
|
||||
- [Issue #1834](https://github.com/rclone/rclone/issues/1834)
|
||||
|
||||
#### Risk of throttling
|
||||
|
||||
Future iterations of the cache backend will make use of the pooling functionality
|
||||
of the cloud provider to synchronize and at the same time make writing through it
|
||||
more tolerant to failures.
|
||||
more tolerant to failures.
|
||||
|
||||
There are a couple of enhancements in track to add these but in the meantime
|
||||
there is a valid concern that the expiring cache listings can lead to cloud provider
|
||||
throttles or bans due to repeated queries on it for very large mounts.
|
||||
|
||||
Some recommendations:
|
||||
|
||||
- don't use a very small interval for entry information (`--cache-info-age`)
|
||||
- while writes aren't yet optimised, you can still write through `cache` which gives you the advantage
|
||||
of adding the file in the cache at the same time if configured to do so.
|
||||
- while writes aren't yet optimised, you can still write through `cache` which
|
||||
gives you the advantage of adding the file in the cache at the same time if
|
||||
configured to do so.
|
||||
|
||||
Future enhancements:
|
||||
|
||||
- https://github.com/rclone/rclone/issues/1937
|
||||
- https://github.com/rclone/rclone/issues/1936
|
||||
- [Issue #1937](https://github.com/rclone/rclone/issues/1937)
|
||||
- [Issue #1936](https://github.com/rclone/rclone/issues/1936)
|
||||
|
||||
#### cache and crypt ####
|
||||
#### cache and crypt
|
||||
|
||||
One common scenario is to keep your data encrypted in the cloud provider
|
||||
using the `crypt` remote. `crypt` uses a similar technique to wrap around
|
||||
@@ -281,30 +294,36 @@ which makes it think we're downloading the full file instead of small chunks.
|
||||
Organizing the remotes in this order yields better results:
|
||||
{{<color green>}}**cloud remote** -> **cache** -> **crypt**{{</color>}}
|
||||
|
||||
#### absolute remote paths ####
|
||||
#### absolute remote paths
|
||||
|
||||
`cache` can not differentiate between relative and absolute paths for the wrapped remote.
|
||||
Any path given in the `remote` config setting and on the command line will be passed to
|
||||
the wrapped remote as is, but for storing the chunks on disk the path will be made
|
||||
relative by removing any leading `/` character.
|
||||
`cache` can not differentiate between relative and absolute paths for the wrapped
|
||||
remote. Any path given in the `remote` config setting and on the command line will
|
||||
be passed to the wrapped remote as is, but for storing the chunks on disk the path
|
||||
will be made relative by removing any leading `/` character.
|
||||
|
||||
This behavior is irrelevant for most backend types, but there are backends where a leading `/`
|
||||
changes the effective directory, e.g. in the `sftp` backend paths starting with a `/` are
|
||||
relative to the root of the SSH server and paths without are relative to the user home directory.
|
||||
As a result `sftp:bin` and `sftp:/bin` will share the same cache folder, even if they represent
|
||||
a different directory on the SSH server.
|
||||
This behavior is irrelevant for most backend types, but there are backends where
|
||||
a leading `/` changes the effective directory, e.g. in the `sftp` backend paths
|
||||
starting with a `/` are relative to the root of the SSH server and paths without
|
||||
are relative to the user home directory. As a result `sftp:bin` and `sftp:/bin`
|
||||
will share the same cache folder, even if they represent a different directory
|
||||
on the SSH server.
|
||||
|
||||
### Cache and Remote Control (--rc) ###
|
||||
Cache supports the new `--rc` mode in rclone and can be remote controlled through the following end points:
|
||||
By default, the listener is disabled if you do not add the flag.
|
||||
### Cache and Remote Control (--rc)
|
||||
|
||||
Cache supports the new `--rc` mode in rclone and can be remote controlled
|
||||
through the following end points: By default, the listener is disabled if
|
||||
you do not add the flag.
|
||||
|
||||
### rc cache/expire
|
||||
|
||||
Purge a remote from the cache backend. Supports either a directory or a file.
|
||||
It supports both encrypted and unencrypted file names if cache is wrapped by crypt.
|
||||
|
||||
Params:
|
||||
- **remote** = path to remote **(required)**
|
||||
- **withData** = true/false to delete cached data (chunks) as well _(optional, false by default)_
|
||||
|
||||
- **remote** = path to remote **(required)**
|
||||
- **withData** = true/false to delete cached data (chunks) as
|
||||
well *(optional, false by default)*
|
||||
|
||||
{{< rem autogenerated options start" - DO NOT EDIT - instead edit fs.RegInfo in backend/cache/cache.go then run make backenddocs" >}}
|
||||
### Standard options
|
||||
|
||||
@@ -6,6 +6,31 @@ description: "Rclone Changelog"
|
||||
|
||||
# Changelog
|
||||
|
||||
## v1.71.1 - 2025-09-24
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.71.0...v1.71.1)
|
||||
|
||||
- Bug Fixes
|
||||
- bisync: Fix error handling for renamed conflicts (nielash)
|
||||
- march: Fix deadlock when using --fast-list on syncs (Nick Craig-Wood)
|
||||
- operations: Fix partial name collisions for non --inplace copies (Nick Craig-Wood)
|
||||
- pacer: Fix deadlock with --max-connections (Nick Craig-Wood)
|
||||
- doc fixes (albertony, anon-pradip, Claudius Ellsel, dougal, Jean-Christophe Cura, Nick Craig-Wood, nielash)
|
||||
- Mount
|
||||
- Do not log successful unmount as an error (Tilman Vogel)
|
||||
- VFS
|
||||
- Fix SIGHUP killing serve instead of flushing directory caches (dougal)
|
||||
- Local
|
||||
- Fix rmdir "Access is denied" on windows (nielash)
|
||||
- Box
|
||||
- Fix about after change in API return (Nick Craig-Wood)
|
||||
- Combine
|
||||
- Propagate SlowHash feature (skbeh)
|
||||
- Drive
|
||||
- Update making your own client ID instructions (Ed Craig-Wood)
|
||||
- Internet Archive
|
||||
- Fix server side copy files with spaces (Nick Craig-Wood)
|
||||
|
||||
## v1.71.0 - 2025-08-22
|
||||
|
||||
[See commits](https://github.com/rclone/rclone/compare/v1.70.0...v1.71.0)
|
||||
|
||||
@@ -26,8 +26,8 @@ then you should probably put the bucket in the remote `s3:bucket`.
|
||||
Now configure `chunker` using `rclone config`. We will call this one `overlay`
|
||||
to separate it from the `remote` itself.
|
||||
|
||||
```
|
||||
No remotes found, make a new one?
|
||||
```text
|
||||
No remotes found, make a new one\?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
q) Quit config
|
||||
@@ -92,16 +92,15 @@ So if you use a remote of `/path/to/secret/files` then rclone will
|
||||
chunk stuff in that directory. If you use a remote of `name` then rclone
|
||||
will put files in a directory called `name` in the current directory.
|
||||
|
||||
|
||||
### Chunking
|
||||
|
||||
When rclone starts a file upload, chunker checks the file size. If it
|
||||
doesn't exceed the configured chunk size, chunker will just pass the file
|
||||
to the wrapped remote (however, see caveat below). If a file is large, chunker will transparently cut
|
||||
data in pieces with temporary names and stream them one by one, on the fly.
|
||||
Each data chunk will contain the specified number of bytes, except for the
|
||||
last one which may have less data. If file size is unknown in advance
|
||||
(this is called a streaming upload), chunker will internally create
|
||||
to the wrapped remote (however, see caveat below). If a file is large, chunker
|
||||
will transparently cut data in pieces with temporary names and stream them one
|
||||
by one, on the fly. Each data chunk will contain the specified number of bytes,
|
||||
except for the last one which may have less data. If file size is unknown in
|
||||
advance (this is called a streaming upload), chunker will internally create
|
||||
a temporary copy, record its size and repeat the above process.
|
||||
|
||||
When upload completes, temporary chunk files are finally renamed.
|
||||
@@ -129,14 +128,13 @@ proceed with current command.
|
||||
You can set the `--chunker-fail-hard` flag to have commands abort with
|
||||
error message in such cases.
|
||||
|
||||
**Caveat**: As it is now, chunker will always create a temporary file in the
|
||||
**Caveat**: As it is now, chunker will always create a temporary file in the
|
||||
backend and then rename it, even if the file is below the chunk threshold.
|
||||
This will result in unnecessary API calls and can severely restrict throughput
|
||||
when handling transfers primarily composed of small files on some backends (e.g. Box).
|
||||
A workaround to this issue is to use chunker only for files above the chunk threshold
|
||||
via `--min-size` and then perform a separate call without chunker on the remaining
|
||||
files.
|
||||
|
||||
when handling transfers primarily composed of small files on some backends
|
||||
(e.g. Box). A workaround to this issue is to use chunker only for files above
|
||||
the chunk threshold via `--min-size` and then perform a separate call without
|
||||
chunker on the remaining files.
|
||||
|
||||
#### Chunk names
|
||||
|
||||
@@ -165,7 +163,6 @@ non-chunked files.
|
||||
When using `norename` transactions, chunk names will additionally have a unique
|
||||
file version suffix. For example, `BIG_FILE_NAME.rclone_chunk.001_bp562k`.
|
||||
|
||||
|
||||
### Metadata
|
||||
|
||||
Besides data chunks chunker will by default create metadata object for
|
||||
@@ -199,7 +196,6 @@ base name and show group names as virtual composite files.
|
||||
This method is more prone to missing chunk errors (especially missing
|
||||
last chunk) than format with metadata enabled.
|
||||
|
||||
|
||||
### Hashsums
|
||||
|
||||
Chunker supports hashsums only when a compatible metadata is present.
|
||||
@@ -243,7 +239,6 @@ hashsums at destination. Beware of consequences: the `sync` command will
|
||||
revert (sometimes silently) to time/size comparison if compatible hashsums
|
||||
between source and target are not found.
|
||||
|
||||
|
||||
### Modification times
|
||||
|
||||
Chunker stores modification times using the wrapped remote so support
|
||||
@@ -254,7 +249,6 @@ modification time of the metadata object on the wrapped remote.
|
||||
If file is chunked but metadata format is `none` then chunker will
|
||||
use modification time of the first data chunk.
|
||||
|
||||
|
||||
### Migrations
|
||||
|
||||
The idiomatic way to migrate to a different chunk size, hash type, transaction
|
||||
@@ -283,7 +277,6 @@ somewhere using the chunker remote and purge the original directory.
|
||||
The `copy` command will copy only active chunks while the `purge` will
|
||||
remove everything including garbage.
|
||||
|
||||
|
||||
### Caveats and Limitations
|
||||
|
||||
Chunker requires wrapped remote to support server-side `move` (or `copy` +
|
||||
|
||||
@@ -11,11 +11,16 @@ This is a backend for the [Cloudinary](https://cloudinary.com/) platform
|
||||
## About Cloudinary
|
||||
|
||||
[Cloudinary](https://cloudinary.com/) is an image and video API platform.
|
||||
Trusted by 1.5 million developers and 10,000 enterprise and hyper-growth companies as a critical part of their tech stack to deliver visually engaging experiences.
|
||||
Trusted by 1.5 million developers and 10,000 enterprise and hyper-growth
|
||||
companies as a critical part of their tech stack to deliver visually engaging
|
||||
experiences.
|
||||
|
||||
## Accounts & Pricing
|
||||
|
||||
To use this backend, you need to [create a free account](https://cloudinary.com/users/register_free) on Cloudinary. Start with a free plan with generous usage limits. Then, as your requirements grow, upgrade to a plan that best fits your needs. See [the pricing details](https://cloudinary.com/pricing).
|
||||
To use this backend, you need to [create a free account](https://cloudinary.com/users/register_free)
|
||||
on Cloudinary. Start with a free plan with generous usage limits. Then, as your
|
||||
requirements grow, upgrade to a plan that best fits your needs.
|
||||
See [the pricing details](https://cloudinary.com/pricing).
|
||||
|
||||
## Securing Your Credentials
|
||||
|
||||
@@ -25,13 +30,17 @@ Please refer to the [docs](/docs/#configuration-encryption-cheatsheet)
|
||||
|
||||
Here is an example of making a Cloudinary configuration.
|
||||
|
||||
First, create a [cloudinary.com](https://cloudinary.com/users/register_free) account and choose a plan.
|
||||
First, create a [cloudinary.com](https://cloudinary.com/users/register_free)
|
||||
account and choose a plan.
|
||||
|
||||
You will need to log in and get the `API Key` and `API Secret` for your account from the developer section.
|
||||
You will need to log in and get the `API Key` and `API Secret` for your account
|
||||
from the developer section.
|
||||
|
||||
Now run
|
||||
|
||||
`rclone config`
|
||||
```sh
|
||||
rclone config
|
||||
```
|
||||
|
||||
Follow the interactive setup process:
|
||||
|
||||
@@ -104,15 +113,21 @@ y/e/d> y
|
||||
|
||||
List directories in the top level of your Media Library
|
||||
|
||||
`rclone lsd cloudinary-media-library:`
|
||||
```sh
|
||||
rclone lsd cloudinary-media-library:
|
||||
```
|
||||
|
||||
Make a new directory.
|
||||
|
||||
`rclone mkdir cloudinary-media-library:directory`
|
||||
```sh
|
||||
rclone mkdir cloudinary-media-library:directory
|
||||
```
|
||||
|
||||
List the contents of a directory.
|
||||
|
||||
`rclone ls cloudinary-media-library:directory`
|
||||
```sh
|
||||
rclone ls cloudinary-media-library:directory
|
||||
```
|
||||
|
||||
### Modified time and hashes
|
||||
|
||||
|
||||
@@ -11,7 +11,7 @@ tree.
|
||||
|
||||
For example you might have a remote for images on one provider:
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone tree s3:imagesbucket
|
||||
/
|
||||
├── image1.jpg
|
||||
@@ -20,7 +20,7 @@ $ rclone tree s3:imagesbucket
|
||||
|
||||
And a remote for files on another:
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone tree drive:important/files
|
||||
/
|
||||
├── file1.txt
|
||||
@@ -30,7 +30,7 @@ $ rclone tree drive:important/files
|
||||
The `combine` backend can join these together into a synthetic
|
||||
directory structure like this:
|
||||
|
||||
```
|
||||
```sh
|
||||
$ rclone tree combined:
|
||||
/
|
||||
├── files
|
||||
@@ -44,7 +44,9 @@ $ rclone tree combined:
|
||||
You'd do this by specifying an `upstreams` parameter in the config
|
||||
like this
|
||||
|
||||
upstreams = images=s3:imagesbucket files=drive:important/files
|
||||
```text
|
||||
upstreams = images=s3:imagesbucket files=drive:important/files
|
||||
```
|
||||
|
||||
During the initial setup with `rclone config` you will specify the
|
||||
upstreams remotes as a space separated list. The upstream remotes can
|
||||
@@ -55,11 +57,13 @@ either be a local paths or other remotes.
|
||||
Here is an example of how to make a combine called `remote` for the
|
||||
example above. First run:
|
||||
|
||||
rclone config
|
||||
```sh
|
||||
rclone config
|
||||
```
|
||||
|
||||
This will guide you through an interactive setup process:
|
||||
|
||||
```
|
||||
```text
|
||||
No remotes found, make a new one?
|
||||
n) New remote
|
||||
s) Set configuration password
|
||||
@@ -103,21 +107,25 @@ the shared drives you have access to.
|
||||
Assuming your main (non shared drive) Google drive remote is called
|
||||
`drive:` you would run
|
||||
|
||||
rclone backend -o config drives drive:
|
||||
```sh
|
||||
rclone backend -o config drives drive:
|
||||
```
|
||||
|
||||
This would produce something like this:
|
||||
|
||||
[My Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
||||
```ini
|
||||
[My Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEF-01234567890,root_folder_id=:
|
||||
|
||||
[Test Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
[Test Drive]
|
||||
type = alias
|
||||
remote = drive,team_drive=0ABCDEFabcdefghijkl,root_folder_id=:
|
||||
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
[AllDrives]
|
||||
type = combine
|
||||
upstreams = "My Drive=My Drive:" "Test Drive=Test Drive:"
|
||||
```
|
||||
|
||||
If you then add that config to your config file (find it with `rclone
|
||||
config file`) then you can access all the shared drives in one place
|
||||
|
||||
@@ -15,8 +15,6 @@ mounting them, listing them in lots of different ways.
|
||||
See the home page (https://rclone.org/) for installation, usage,
|
||||
documentation, changelog and configuration walkthroughs.
|
||||
|
||||
|
||||
|
||||
```
|
||||
rclone [flags]
|
||||
```
|
||||
@@ -1015,7 +1013,7 @@ rclone [flags]
|
||||
--use-json-log Use json log format
|
||||
--use-mmap Use mmap allocator (see docs)
|
||||
--use-server-modtime Use server modified time instead of object metadata
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.71.0")
|
||||
--user-agent string Set the user-agent to a specified string (default "rclone/v1.71.1")
|
||||
-v, --verbose count Print lots more stuff (repeat for more)
|
||||
-V, --version Print the version number
|
||||
--webdav-auth-redirect Preserve authentication on redirect
|
||||
@@ -1057,6 +1055,9 @@ rclone [flags]
|
||||
|
||||
## See Also
|
||||
|
||||
<!-- markdownlint-capture -->
|
||||
<!-- markdownlint-disable ul-style line-length -->
|
||||
|
||||
* [rclone about](/commands/rclone_about/) - Get quota information from the remote.
|
||||
* [rclone authorize](/commands/rclone_authorize/) - Remote authorization.
|
||||
* [rclone backend](/commands/rclone_backend/) - Run a backend-specific command.
|
||||
@@ -1111,3 +1112,5 @@ rclone [flags]
|
||||
* [rclone tree](/commands/rclone_tree/) - List the contents of the remote in a tree like fashion.
|
||||
* [rclone version](/commands/rclone_version/) - Show the version number.
|
||||
|
||||
|
||||
<!-- markdownlint-restore -->
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user